language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | private String extractPathFromUrl(String url) {
String path = url.replaceAll(".+://.+?(?=/)", "");
if (!path.startsWith("/")) {
throw new RuntimeException("Path must start with '/': " + path);
}
return path;
} |
python | def request_and_check(self, url, method='get',
expected_content_type=None, **kwargs):
"""Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
"""
assert method in ['get', 'post']
result = self.driver.request(method, url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' %
(url, result.status_code))
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError(
'Error requesting %r, content type %r does not match %r' %
(url, content_type, expected_content_type))
return result |
python | def _make_image_description(self, datasets, **kwargs):
"""
generate image description for mitiff.
Satellite: NOAA 18
Date and Time: 06:58 31/05-2016
SatDir: 0
Channels: 6 In this file: 1-VIS0.63 2-VIS0.86 3(3B)-IR3.7
4-IR10.8 5-IR11.5 6(3A)-VIS1.6
Xsize: 4720
Ysize: 5544
Map projection: Stereographic
Proj string: +proj=stere +lon_0=0 +lat_0=90 +lat_ts=60
+ellps=WGS84 +towgs84=0,0,0 +units=km
+x_0=2526000.000000 +y_0=5806000.000000
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: 1.000000 Ay: 1.000000 Bx: -2526.000000 By: -262.000000
Satellite: <satellite name>
Date and Time: <HH:MM dd/mm-yyyy>
SatDir: 0
Channels: <number of chanels> In this file: <channels names in order>
Xsize: <number of pixels x>
Ysize: <number of pixels y>
Map projection: Stereographic
Proj string: <proj4 string with +x_0 and +y_0 which is the positive
distance from proj origo
to the lower left corner of the image data>
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: <pixels size x in km> Ay: <pixel size y in km> Bx: <left corner of
upper right pixel in km>
By: <upper corner of upper right pixel in km>
if palette image write special palette
if normal channel write table calibration:
Table_calibration: <channel name>, <calibration type>, [<unit>],
<no of bits of data>,
[<calibration values space separated>]\n\n
"""
translate_platform_name = {'metop01': 'Metop-B',
'metop02': 'Metop-A',
'metop03': 'Metop-C',
'noaa15': 'NOAA-15',
'noaa16': 'NOAA-16',
'noaa17': 'NOAA-17',
'noaa18': 'NOAA-18',
'noaa19': 'NOAA-19'}
first_dataset = datasets
if isinstance(datasets, list):
LOG.debug("Datasets is a list of dataset")
first_dataset = datasets[0]
if 'platform_name' in first_dataset.attrs:
_platform_name = translate_platform_name.get(
first_dataset.attrs['platform_name'],
first_dataset.attrs['platform_name'])
elif 'platform_name' in kwargs:
_platform_name = translate_platform_name.get(
kwargs['platform_name'], kwargs['platform_name'])
else:
_platform_name = None
_image_description = ''
_image_description.encode('utf-8')
_image_description += ' Satellite: '
if _platform_name is not None:
_image_description += _platform_name
_image_description += '\n'
_image_description += ' Date and Time: '
# Select earliest start_time
first = True
earliest = 0
for dataset in datasets:
if first:
earliest = dataset.attrs['start_time']
else:
if dataset.attrs['start_time'] < earliest:
earliest = dataset.attrs['start_time']
first = False
LOG.debug("earliest start_time: %s", earliest)
_image_description += earliest.strftime("%H:%M %d/%m-%Y\n")
_image_description += ' SatDir: 0\n'
_image_description += ' Channels: '
if isinstance(datasets, list):
LOG.debug("len datasets: %s", len(datasets))
_image_description += str(len(datasets))
elif 'bands' in datasets.sizes:
LOG.debug("len datasets: %s", datasets.sizes['bands'])
_image_description += str(datasets.sizes['bands'])
elif len(datasets.sizes) == 2:
LOG.debug("len datasets: 1")
_image_description += '1'
_image_description += ' In this file: '
channels = self._make_channel_list(datasets, **kwargs)
try:
cns = self.translate_channel_name.get(kwargs['sensor'], {})
except KeyError:
pass
_image_description += self._channel_names(channels, cns, **kwargs)
_image_description += self._add_sizes(datasets, first_dataset)
_image_description += ' Map projection: Stereographic\n'
_image_description += self._add_proj4_string(datasets, first_dataset)
_image_description += ' TrueLat: 60N\n'
_image_description += ' GridRot: 0\n'
_image_description += ' Xunit:1000 m Yunit: 1000 m\n'
_image_description += ' NPX: %.6f' % (0)
_image_description += ' NPY: %.6f' % (0) + '\n'
_image_description += self._add_pixel_sizes(datasets, first_dataset)
_image_description += self._add_corners(datasets, first_dataset)
if isinstance(datasets, list):
LOG.debug("Area extent: %s", first_dataset.attrs['area'].area_extent)
else:
LOG.debug("Area extent: %s", datasets.attrs['area'].area_extent)
_image_description += self._add_calibration(channels, cns, datasets, **kwargs)
return _image_description |
python | def get(self, path, params=None):
"""Make a GET request, optionally including a parameters, to a path.
The path of the request is the full URL.
Parameters
----------
path : str
The URL to request
params : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
Raises
------
HTTPError
If the server returns anything other than a 200 (OK) code
See Also
--------
get_query, get
"""
resp = self._session.get(path, params=params)
if resp.status_code != 200:
if resp.headers.get('Content-Type', '').startswith('text/html'):
text = resp.reason
else:
text = resp.text
raise requests.HTTPError('Error accessing {0}\n'
'Server Error ({1:d}: {2})'.format(resp.request.url,
resp.status_code,
text))
return resp |
java | private void obtainPositiveButtonText(@NonNull final TypedArray typedArray) {
setPositiveButtonText(
typedArray.getText(R.styleable.DialogPreference_android_positiveButtonText));
} |
python | def cto(self):
"""
The final character position in the surface string.
Defaults to -1 if there is no valid cto value.
"""
cto = -1
try:
if self.lnk.type == Lnk.CHARSPAN:
cto = self.lnk.data[1]
except AttributeError:
pass # use default cto of -1
return cto |
java | private static String normalizeClusterUrl(String clusterIdentifier) {
try {
URI uri = new URI(clusterIdentifier.trim());
// URIs without protocol prefix
if (!uri.isOpaque() && null != uri.getHost()) {
clusterIdentifier = uri.getHost();
} else {
clusterIdentifier = uri.toString().replaceAll("[/:]"," ").trim().replaceAll(" ", "_");
}
} catch (URISyntaxException e) {
//leave ID as is
}
return clusterIdentifier;
} |
java | void setPageIndexAsString(String pageIndex) {
if (CmsStringUtil.isEmpty(pageIndex)) {
return;
}
try {
m_pageIndex = Integer.parseInt(pageIndex);
} catch (NumberFormatException e) {
// intentionally left blank
}
} |
python | def columns(self):
"""
Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..]
"""
schema = self.schema()
return [schema.column(col) for col in self.__columns] |
python | def should_skip(app, what, name, obj, skip, options):
"""
Callback object chooser function for docstring documentation.
"""
if name in ["__doc__", "__module__", "__dict__", "__weakref__",
"__abstractmethods__"
] or name.startswith("_abc_"):
return True
return False |
java | public List<IWord> getWordList()
{
List<IWord> wordList = new LinkedList<IWord>();
for (Sentence sentence : sentenceList)
{
wordList.addAll(sentence.wordList);
}
return wordList;
} |
java | private static int createDFSPaths()
{
String basePath = new String(TEST_BASE_DIR) + "/" + hostName_ + "_" + processName_;
try {
long startTime = System.nanoTime();
Boolean ret = dfsClient_.mkdirs(basePath);
timingMkdirs_.add(new Double((System.nanoTime() - startTime)/(1E9)));
if (!ret) {
System.out.printf("Error: failed to create test base dir [%s]\n", basePath);
return -1;
}
} catch( IOException e) {
e.printStackTrace();
throw new RuntimeException();
}
Date alpha = new Date();
if (CreateDFSPaths(0, basePath) < 0) {
return -1;
}
Date zigma = new Date();
System.out.printf("Client: %d paths created in %d msec\n",
totalCreateCount, timeDiffMilliSec(alpha, zigma));
return 0;
} |
java | public static MonitorAndManagementSettings newInstance(URL settingsXml) throws IOException, JAXBException {
InputStream istream = settingsXml.openStream();
JAXBContext ctx = JAXBContext.newInstance(MonitorAndManagementSettings.class);
return (MonitorAndManagementSettings) ctx.createUnmarshaller().unmarshal(istream);
} |
java | public static void startPollingForMessages(final String queueURL) {
if (!StringUtils.isBlank(queueURL) && !POLLING_THREADS.containsKey(queueURL)) {
logger.info("Starting SQS river using queue {} (polling interval: {}s)", queueURL, POLLING_INTERVAL);
POLLING_THREADS.putIfAbsent(queueURL, Para.getExecutorService().submit(new SQSRiver(queueURL)));
Para.addDestroyListener(new DestroyListener() {
public void onDestroy() {
stopPollingForMessages(queueURL);
}
});
}
} |
python | def convert_to_push(ir, node):
"""
Convert a call to a PUSH operaiton
The funciton assume to receive a correct IR
The checks must be done by the caller
May necessitate to create an intermediate operation (InitArray)
Necessitate to return the lenght (see push documentation)
As a result, the function return may return a list
"""
lvalue = ir.lvalue
if isinstance(ir.arguments[0], list):
ret = []
val = TemporaryVariable(node)
operation = InitArray(ir.arguments[0], val)
ret.append(operation)
ir = Push(ir.destination, val)
length = Literal(len(operation.init_values))
t = operation.init_values[0].type
ir.lvalue.set_type(ArrayType(t, length))
ret.append(ir)
if lvalue:
length = Length(ir.array, lvalue)
length.lvalue.points_to = ir.lvalue
ret.append(length)
return ret
ir = Push(ir.destination, ir.arguments[0])
if lvalue:
ret = []
ret.append(ir)
length = Length(ir.array, lvalue)
length.lvalue.points_to = ir.lvalue
ret.append(length)
return ret
return ir |
python | def find_nested_models(self, model, definitions):
'''
Prepare dictionary with reference to another definitions, create one dictionary
that contains full information about model, with all nested reference
:param model: --dictionary that contains information about model
:type model: dict
:param definitions: --dictionary that contains copy of all definitions
:type definitions: dict
:return: dictionary with all nested reference
:rtype: dict
'''
for key, value in model.items():
if isinstance(value, dict):
model[key] = self.find_nested_models(value, definitions)
elif key == '$ref':
def_name = value.split('/')[-1]
def_property = definitions[def_name]['properties']
return self.find_nested_models(def_property, definitions)
return model |
java | @SuppressWarnings("all")
public void validateFalse(boolean value, String name, String message) {
if (value) {
addError(name, Optional.ofNullable(message).orElse(messages.get(Validation.FALSE_KEY.name(), name)));
}
} |
python | def get_etf_records(self, etf_name, offset, limit, _async=False):
"""
查询etf换入换出明细
:param etf_name: eth基金名称
:param offset: 开始位置,0为最新一条
:param limit: 返回记录条数(0, 100]
:param _async:
:return:
"""
params = {}
path = '/etf/list'
params['etf_name'] = etf_name
params['offset'] = offset
params['limit'] = limit
return api_key_get(params, path, _async=_async) |
python | def get_scaled_f_scores(self,
category,
scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA):
''' Computes scaled-fscores
Parameters
----------
category : str
category name to score
scaler_algo : str
Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default.
beta : float
Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default.
Returns
-------
np.array of harmonic means of scaled P(word|category) and scaled P(category|word)
'''
assert beta > 0
cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)
scores = self._get_scaled_f_score_from_counts(cat_word_counts, not_cat_word_counts, scaler_algo, beta)
return np.array(scores) |
python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'event_rate') and self.event_rate is not None:
_dict['event_rate'] = self.event_rate
return _dict |
python | def verify_token(self, token, requested_access):
"""
Check the token bearer is permitted to access the resource
:param token: Access token
:param requested_access: the access level the client has requested
:returns: boolean
"""
client = API(options.url_auth,
auth_username=options.service_id,
auth_password=options.client_secret,
ssl_options=ssl_server_options())
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({'token': token, 'requested_access': requested_access})
client.auth.verify.prepare_request(headers=headers, request_timeout=180)
try:
result = yield client.auth.verify.post(body=body)
except tornado.httpclient.HTTPError as ex:
# Must be converted to a tornado.web.HTTPError for the server
# to handle it correctly
logging.exception(ex.message)
raise HTTPError(500, 'Internal Server Error')
raise Return(result['has_access']) |
python | def _numbers_units(N):
"""
>>> _numbers_units(45)
'123456789012345678901234567890123456789012345'
"""
lst = range(1, N + 1)
return "".join(list(map(lambda i: str(i % 10), lst))) |
java | private static Expression buildAccessChain(
Expression base, CodeChunk.Generator generator, Iterator<ChainAccess> chain) {
if (!chain.hasNext()) {
return base; // base case
}
ChainAccess link = chain.next();
if (link.nullSafe) {
if (!base.isCheap()) {
base = generator.declarationBuilder().setRhs(base).build().ref();
}
return ifExpression(base.doubleEqualsNull(), LITERAL_NULL)
.setElse(buildAccessChain(link.extend(base), generator, chain))
.build(generator);
}
return buildAccessChain(link.extend(base), generator, chain);
} |
python | def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CompressedStreamFileEntry: a file entry or None if not available.
"""
return compressed_stream_file_entry.CompressedStreamFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True) |
python | def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
def _int2hex(number):
return "{0:0{1}x}".format(number, 2)
try:
unpacked_data = struct.unpack('!6B', buff[offset:offset+6])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
transformed_data = ':'.join([_int2hex(x) for x in unpacked_data])
self._value = transformed_data |
python | def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
warnings.warn('profiler.profiler_set_config() is deprecated. '
'Please use profiler.set_config() instead')
keys = c_str_array([key for key in ["profile_" + mode, "filename"]])
values = c_str_array([str(val) for val in [True, filename]])
assert len(keys) == len(values)
check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle)) |
python | def reset(self):
"""Resets simulation."""
# TODO(yukez): investigate black screen of death
# if there is an active viewer window, destroy it
self._destroy_viewer()
self._reset_internal()
self.sim.forward()
return self._get_observation() |
python | def is_zero_user(self):
"""返回当前用户是否为三零用户,其实是四零: 赞同0,感谢0,提问0,回答0.
:return: 是否是三零用户
:rtype: bool
"""
return self.upvote_num + self.thank_num + \
self.question_num + self.answer_num == 0 |
python | def match_many(self, models, results, relation):
"""
Match the eargerly loaded resuls to their single parents.
:param models: The parents
:type models: list
:param results: The results collection
:type results: Collection
:param relation: The relation
:type relation: str
:rtype: list
"""
return self._match_one_or_many(models, results, relation, 'many') |
python | def power_up(self):
"""
power up the HX711
:return: always True
:rtype bool
"""
GPIO.output(self._pd_sck, False)
time.sleep(0.01)
return True |
python | def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features)) |
java | @Override
public PathImpl schemeWalk(String userPath,
Map<String,Object> attributes,
String filePath,
int offset)
{
int length = filePath.length();
if (length <= offset || filePath.charAt(offset) != '(')
return super.schemeWalk(userPath, attributes, filePath, offset);
MergePath mergePath = createMergePath();
mergePath.setUserPath(userPath);
int head = ++offset;
int tail = head;
while (tail < length) {
int ch = filePath.charAt(tail);
if (ch == ')') {
if (head + 1 != tail) {
String subPath = filePath.substring(head, tail);
if (subPath.startsWith("(") && subPath.endsWith(")"))
subPath = subPath.substring(1, subPath.length() - 1);
mergePath.addMergePath(VfsOld.lookup(subPath));
}
if (tail + 1 == length)
return mergePath;
else
return mergePath.fsWalk(userPath, attributes, filePath.substring(tail + 1));
}
else if (ch == ';') {
String subPath = filePath.substring(head, tail);
if (subPath.startsWith("(") && subPath.endsWith(")"))
subPath = subPath.substring(1, subPath.length() - 1);
mergePath.addMergePath(VfsOld.lookup(subPath));
head = ++tail;
}
else if (ch == '(') {
int depth = 1;
for (tail++; tail < length; tail++) {
if (filePath.charAt(tail) == '(')
depth++;
else if (filePath.charAt(tail) == ')') {
tail++;
depth--;
if (depth == 0)
break;
}
}
if (depth != 0)
return new NotFoundPath(getSchemeMap(), filePath);
}
else
tail++;
}
return new NotFoundPath(getSchemeMap(), filePath);
} |
java | @Override
public CPDefinitionVirtualSetting fetchByUUID_G(String uuid, long groupId) {
return fetchByUUID_G(uuid, groupId, true);
} |
java | public void setTickMarkColor(final Color COLOR) {
if (null == tickMarkColor) {
_tickMarkColor = COLOR;
fireUpdateEvent(REDRAW_EVENT);
} else {
tickMarkColor.set(COLOR);
}
} |
python | def prox_hard_plus(X, step, thresh=0):
"""Hard thresholding with projection onto non-negative numbers
"""
return prox_plus(prox_hard(X, step, thresh=thresh), step) |
python | def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms |
java | public CreateNotificationResponse createNotification(CreateNotificationRequest request) {
checkNotNull(request, "The parameter request should NOT be null.");
checkStringNotEmpty(request.getName(),
"The parameter name should NOT be null or empty string.");
checkStringNotEmpty(request.getEndpoint(),
"The parameter endpoint should NOT be null or empty string.");
InternalRequest internalRequest = createRequest(HttpMethodName.POST, request, LIVE_NOTIFICATION);
return invokeHttpClient(internalRequest, CreateNotificationResponse.class);
} |
java | public static <K> boolean containsAny(Set<K> aSet, K[] arr) {
for (K obj : arr) {
if (aSet.contains(obj)) {
return true;
}
}
return false;
} |
python | def create_chebyshev_samples(order, dim=1):
"""
Chebyshev sampling function.
Args:
order (int):
The number of samples to create along each axis.
dim (int):
The number of dimensions to create samples for.
Returns:
samples following Chebyshev sampling scheme mapped to the
``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.
"""
x_data = .5*numpy.cos(numpy.arange(order, 0, -1)*numpy.pi/(order+1)) + .5
x_data = chaospy.quad.combine([x_data]*dim)
return x_data.T |
python | def transform(self, jam, query=None):
'''Transform jam object to make data for this task
Parameters
----------
jam : jams.JAMS
The jams container object
query : string, dict, or callable [optional]
An optional query to narrow the elements of `jam.annotations`
to be considered.
If not provided, all annotations are considered.
Returns
-------
data : dict
A dictionary of transformed annotations.
All annotations which can be converted to the target namespace
will be converted.
'''
anns = []
if query:
results = jam.search(**query)
else:
results = jam.annotations
# Find annotations that can be coerced to our target namespace
for ann in results:
try:
anns.append(jams.nsconvert.convert(ann, self.namespace))
except jams.NamespaceError:
pass
duration = jam.file_metadata.duration
# If none, make a fake one
if not anns:
anns = [self.empty(duration)]
# Apply transformations
results = []
for ann in anns:
results.append(self.transform_annotation(ann, duration))
# If the annotation range is None, it spans the entire track
if ann.time is None or ann.duration is None:
valid = [0, duration]
else:
valid = [ann.time, ann.time + ann.duration]
results[-1]['_valid'] = time_to_frames(valid, sr=self.sr,
hop_length=self.hop_length)
# Prefix and collect
return self.merge(results) |
python | def combining_successors(state, last_action=()):
"""
Successors function for finding path of combining F2L pair.
"""
((corner, edge), (L, U, F, D, R, B)) = state
U_turns = [Formula("U"), Formula("U'"), Formula("U2")] if len(last_action) != 1 else []
R_turns = [Formula("R U R'"), Formula("R U' R'"), Formula("R U2 R'")] if "R" not in last_action else []
F_turns = [Formula("F' U F"), Formula("F' U' F"), Formula("F' U2 F")] if "F" not in last_action else []
for act in (U_turns + R_turns + F_turns):
new = (corner, edge)
for q in act:
new = F2LPairSolver._rotate(new, q)
yield act, (new, (L, U, F, D, R, B)) |
java | private Map<String, Resource> getClassPathResources(String mimetype, Collection<String> paths) {
// If no paths are provided, just return an empty map
if (paths == null)
return Collections.<String, Resource>emptyMap();
// Add classpath resource for each path provided
Map<String, Resource> resources = new HashMap<String, Resource>(paths.size());
for (String path : paths)
resources.put(path, new ClassPathResource(classLoader, mimetype, path));
// Callers should not rely on modifying the result
return Collections.unmodifiableMap(resources);
} |
python | def iri_to_uri(value, normalize=False):
"""
Encodes a unicode IRI into an ASCII byte string URI
:param value:
A unicode string of an IRI
:param normalize:
A bool that controls URI normalization
:return:
A byte string of the ASCII-encoded URI
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
value must be a unicode string, not %s
''',
type_name(value)
))
scheme = None
# Python 2.6 doesn't split properly is the URL doesn't start with http:// or https://
if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'):
real_prefix = None
prefix_match = re.match('^[^:]*://', value)
if prefix_match:
real_prefix = prefix_match.group(0)
value = 'http://' + value[len(real_prefix):]
parsed = urlsplit(value)
if real_prefix:
value = real_prefix + value[7:]
scheme = _urlquote(real_prefix[:-3])
else:
parsed = urlsplit(value)
if scheme is None:
scheme = _urlquote(parsed.scheme)
hostname = parsed.hostname
if hostname is not None:
hostname = hostname.encode('idna')
# RFC 3986 allows userinfo to contain sub-delims
username = _urlquote(parsed.username, safe='!$&\'()*+,;=')
password = _urlquote(parsed.password, safe='!$&\'()*+,;=')
port = parsed.port
if port is not None:
port = str_cls(port).encode('ascii')
netloc = b''
if username is not None:
netloc += username
if password:
netloc += b':' + password
netloc += b'@'
if hostname is not None:
netloc += hostname
if port is not None:
default_http = scheme == b'http' and port == b'80'
default_https = scheme == b'https' and port == b'443'
if not normalize or (not default_http and not default_https):
netloc += b':' + port
# RFC 3986 allows a path to contain sub-delims, plus "@" and ":"
path = _urlquote(parsed.path, safe='/!$&\'()*+,;=@:')
# RFC 3986 allows the query to contain sub-delims, plus "@", ":" , "/" and "?"
query = _urlquote(parsed.query, safe='/?!$&\'()*+,;=@:')
# RFC 3986 allows the fragment to contain sub-delims, plus "@", ":" , "/" and "?"
fragment = _urlquote(parsed.fragment, safe='/?!$&\'()*+,;=@:')
if normalize and query is None and fragment is None and path == b'/':
path = None
# Python 2.7 compat
if path is None:
path = ''
output = urlunsplit((scheme, netloc, path, query, fragment))
if isinstance(output, str_cls):
output = output.encode('latin1')
return output |
java | protected static int binarySearchGuess(@NotNull final BasePage page, @NotNull final ByteIterable key) {
int index = binarySearchGuessUnsafe(page, key);
if (index < 0) index = 0;
return index;
} |
java | @NonNull @UiThread
public static Router attachRouter(@NonNull Activity activity, @NonNull ViewGroup container, @Nullable Bundle savedInstanceState) {
ThreadUtils.ensureMainThread();
LifecycleHandler lifecycleHandler = LifecycleHandler.install(activity);
Router router = lifecycleHandler.getRouter(container, savedInstanceState);
router.rebindIfNeeded();
return router;
} |
java | protected EncodedImage getByteBufferBackedEncodedImage(
InputStream inputStream,
int length) throws IOException {
CloseableReference<PooledByteBuffer> ref = null;
try {
if (length <= 0) {
ref = CloseableReference.of(mPooledByteBufferFactory.newByteBuffer(inputStream));
} else {
ref = CloseableReference.of(mPooledByteBufferFactory.newByteBuffer(inputStream, length));
}
return new EncodedImage(ref);
} finally {
Closeables.closeQuietly(inputStream);
CloseableReference.closeSafely(ref);
}
} |
python | def verify(x, t, y, pi, errorOnFail=True):
"""
Verifies a zero-knowledge proof.
@errorOnFail: Raise an exception if the proof does not hold.
"""
# Unpack the proof
p,_,_ = pi
# Verify types
assertType(x, str)
assertType(t, str)
assertType(y, G1Element)
assertType(p, G2Element)
# TODO: beta can be pre-computed while waiting for a server response.
beta = hashG1(t, x)
# Compute q = e( H(t,m), P)**kw two ways
q1 = pair(beta, p)
q2 = pair(y, generatorG2())
# The BLS signature is valid when q1 == q2
if q1 == q2:
return True
if errorOnFail:
raise Exception("BLS signature failed verification")
else:
return False |
python | def poll(self, id):
"""Poll with a given id.
Parameters
----------
id : int
Poll id.
Returns
-------
an :class:`ApiQuery` of :class:`Poll`
Raises
------
:class:`NotFound`
If a poll with the requested id doesn't exist.
"""
@api_query('poll', pollid=str(id))
async def result(_, root):
elem = root.find('POLL')
if not elem:
raise NotFound(f'No poll found with id {id}')
return Poll(elem)
return result(self) |
java | public static mps_upgrade upgrade(nitro_service client, mps_upgrade resource) throws Exception
{
return ((mps_upgrade[]) resource.perform_operation(client, "upgrade"))[0];
} |
python | def RebootInstance(r, instance, reboot_type=None, ignore_secondaries=False,
dry_run=False):
"""
Reboots an instance.
@type instance: str
@param instance: instance to rebot
@type reboot_type: str
@param reboot_type: one of: hard, soft, full
@type ignore_secondaries: bool
@param ignore_secondaries: if True, ignores errors for the secondary node
while re-assembling disks (in hard-reboot mode only)
@type dry_run: bool
@param dry_run: whether to perform a dry run
"""
query = {
"ignore_secondaries": ignore_secondaries,
"dry-run": dry_run,
}
if reboot_type:
if reboot_type not in ("hard", "soft", "full"):
raise GanetiApiError("reboot_type must be one of 'hard',"
" 'soft', or 'full'")
query["type"] = reboot_type
return r.request("post", "/2/instances/%s/reboot" % instance, query=query) |
python | def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
logs
/containers/<id>/logs
power
/containers/<id>/power
``super`` is called otherwise.
"""
if which in ('logs', 'power'):
return '{0}/{1}'.format(
super(AbstractDockerContainer, self).path(which='self'),
which
)
return super(AbstractDockerContainer, self).path(which) |
python | def stats(args):
"""
%prog stats infile.gff
Collect gene statistics based on gff file. There are some terminology issues
here and so normally we call "gene" are actually mRNA, and sometimes "exon"
are actually CDS, but they are configurable.
Thee numbers are written to text file in four separate folders,
corresponding to the four metrics:
Exon length, Intron length, Gene length, Exon count
With data written to disk then you can run %prog histogram
"""
p = OptionParser(stats.__doc__)
p.add_option("--gene", default="mRNA",
help="The gene type [default: %default]")
p.add_option("--exon", default="CDS",
help="The exon type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
g = make_index(gff_file)
exon_lengths = []
intron_lengths = []
gene_lengths = []
exon_counts = []
for feat in g.features_of_type(opts.gene):
exons = []
for c in g.children(feat.id, 1):
if c.featuretype != opts.exon:
continue
exons.append((c.chrom, c.start, c.stop))
introns = range_interleave(exons)
feat_exon_lengths = [(stop - start + 1) for (chrom, start, stop) in exons]
feat_intron_lengths = [(stop - start + 1) for (chrom, start, stop) in introns]
exon_lengths += feat_exon_lengths
intron_lengths += feat_intron_lengths
gene_lengths.append(sum(feat_exon_lengths))
exon_counts.append(len(feat_exon_lengths))
a = SummaryStats(exon_lengths)
b = SummaryStats(intron_lengths)
c = SummaryStats(gene_lengths)
d = SummaryStats(exon_counts)
for x, title in zip((a, b, c, d), metrics):
x.title = title
print(x, file=sys.stderr)
prefix = gff_file.split(".")[0]
for x in (a, b, c, d):
dirname = x.title
mkdir(dirname)
txtfile = op.join(dirname, prefix + ".txt")
x.tofile(txtfile) |
java | public DescribeComplianceByResourceRequest withComplianceTypes(String... complianceTypes) {
if (this.complianceTypes == null) {
setComplianceTypes(new com.amazonaws.internal.SdkInternalList<String>(complianceTypes.length));
}
for (String ele : complianceTypes) {
this.complianceTypes.add(ele);
}
return this;
} |
python | def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt) |
java | public FessMessages addErrorsDesignFileIsUnsupportedType(String property) {
assertPropertyNotNull(property);
add(property, new UserMessage(ERRORS_design_file_is_unsupported_type));
return this;
} |
python | def token(self):
'''
The token used for the request
'''
# find the token (cookie or headers)
if AUTH_TOKEN_HEADER in self.request.headers:
return self.request.headers[AUTH_TOKEN_HEADER]
else:
return self.get_cookie(AUTH_COOKIE_NAME) |
python | def check_unknown_attachment_in_space(confluence, space_key):
"""
Detect errors in space
:param confluence:
:param space_key:
:return:
"""
page_ids = get_all_pages_ids(confluence, space_key)
print("Start review pages {} in {}".format(len(page_ids), space_key))
for page_id in page_ids:
link = confluence.has_unknown_attachment_error(page_id)
if len(link) > 0:
print(link) |
python | def plot_total(self, colorbar=True, cb_orientation='vertical',
cb_label=None, ax=None, show=True, fname=None, **kwargs):
"""
Plot the total gravity disturbance.
Usage
-----
x.plot_total([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = 'gravity disturbance'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
Notes
-----
If the normal gravity is removed from the total gravitational
acceleration, the output will be displayed in mGals.
"""
if self.normal_gravity is True:
if cb_label is None:
cb_label = 'Gravity disturbance, mGal'
else:
if cb_label is None:
cb_label = 'Gravity disturbance, m s$^{-2}$'
if ax is None:
if self.normal_gravity is True:
fig, axes = (self.total*1.e5).plot(
colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, show=False, **kwargs)
else:
fig, axes = self.total.plot(
colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, show=False, **kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
if self.normal_gravity is True:
(self.total*1.e5).plot(
colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs)
else:
self.total.plot(
colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs) |
java | @RequestMapping("/person")
public final String person(
@RequestParam(value = "id",
required = false,
defaultValue = "I0") final String idString,
@RequestParam(value = "db",
required = false,
defaultValue = "schoeller") final String dbName,
final Model model) {
logger.debug("Entering person");
final RenderingContext context = createRenderingContext();
final Person person = fetchPerson(dbName, idString, context);
final List<PlaceInfo> places = fetchPlaces(person, context);
final Boolean showMap = !places.isEmpty();
final String filename = gedbrowserHome + "/" + dbName + ".ged";
model.addAttribute("filename", filename);
model.addAttribute("name", nameHtml(context, person));
model.addAttribute("model", personRenderer(context, person));
model.addAttribute("places", places);
model.addAttribute("key", getMapsKey());
model.addAttribute("showMap", showMap);
model.addAttribute("appInfo", appInfo);
logger.debug("Exiting person");
return "person";
} |
java | public void reset() {
while (holes != null) {
holes = freePointBag(holes);
}
contour.clear();
holes = null;
} |
python | def handle_truncated_response(callback, params, entities):
"""
Handle truncated responses
:param callback:
:param params:
:param entities:
:return:
"""
results = {}
for entity in entities:
results[entity] = []
while True:
try:
marker_found = False
response = callback(**params)
for entity in entities:
if entity in response:
results[entity] = results[entity] + response[entity]
for marker_name in ['NextToken', 'Marker', 'PaginationToken']:
if marker_name in response and response[marker_name]:
params[marker_name] = response[marker_name]
marker_found = True
if not marker_found:
break
except Exception as e:
if is_throttled(e):
time.sleep(1)
else:
raise e
return results |
java | @Override
@SuppressWarnings("unchecked")
@Scope(DocScope.IO)
public <T> T projectDOMNode(final Node documentOrElement, final Class<T> projectionInterface) {
ensureIsValidProjectionInterface(projectionInterface);
if (documentOrElement == null) {
throw new IllegalArgumentException("Parameter node must not be null");
}
final Map<Class<?>, Object> mixinsForProjection = mixins.containsKey(projectionInterface) ? Collections.unmodifiableMap(mixins.get(projectionInterface)) : Collections.<Class<?>, Object> emptyMap();
final ProjectionInvocationHandler projectionInvocationHandler = new ProjectionInvocationHandler(XBProjector.this, documentOrElement, projectionInterface, mixinsForProjection, flags.contains(Flags.TO_STRING_RENDERS_XML), flags.contains(Flags.ABSENT_IS_EMPTY));
final Set<Class<?>> interfaces = new HashSet<Class<?>>();
interfaces.add(projectionInterface);
interfaces.add(DOMAccess.class);
interfaces.add(Serializable.class);
if (flags.contains(Flags.SYNCHRONIZE_ON_DOCUMENTS)) {
final Document document = DOMHelper.getOwnerDocumentFor(documentOrElement);
InvocationHandler synchronizedInvocationHandler = new InvocationHandler() {
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
synchronized (document) {
return projectionInvocationHandler.invoke(proxy, method, args);
}
}
};
return ((T) Proxy.newProxyInstance(projectionInterface.getClassLoader(), interfaces.toArray(new Class<?>[interfaces.size()]), synchronizedInvocationHandler));
}
return ((T) Proxy.newProxyInstance(projectionInterface.getClassLoader(), interfaces.toArray(new Class<?>[interfaces.size()]), projectionInvocationHandler));
} |
python | def do_labels_update(self, info, labels):
"""Updates a dictionary of labels using the assigned update_op_func
Args:
info (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
labels (dict[string[string]]): the labels dictionary
Return:
`True` if desc is supported, otherwise `False`
"""
if self.update_label_func:
self.update_label_func(self.label_name, info, labels) |
python | def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback) |
java | public GetLifecyclePolicyPreviewResult withPreviewResults(LifecyclePolicyPreviewResult... previewResults) {
if (this.previewResults == null) {
setPreviewResults(new java.util.ArrayList<LifecyclePolicyPreviewResult>(previewResults.length));
}
for (LifecyclePolicyPreviewResult ele : previewResults) {
this.previewResults.add(ele);
}
return this;
} |
python | def dump_pk(obj, abspath,
pk_protocol=pk_protocol, replace=False, compress=False,
enable_verbose=True):
"""Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".pickle":
if ext != ".tmp":
raise Exception("file extension are not '.pickle'!")
else:
_, ext = os.path.splitext(root)
if ext != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, "
"it's already exists" % abspath)
else: # if not exists, just write to it
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st)) |
java | public boolean compare(Object object) {
if (!(object instanceof IChemObject)) {
return false;
}
ChemObject chemObj = (ChemObject) object;
return Objects.equal(identifier, chemObj.identifier);
} |
python | def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE) |
python | def find_dimension_by_name(self, dim_name):
"""the method searching dimension with a given name"""
for dim in self.dimensions:
if is_equal_strings_ignore_case(dim.name, dim_name):
return dim
return None |
java | static ResourceLocator.Request buildChallengeRequest(ResourceLocator.Session session, String symbol) {
// The "options" part causes the cookie to be set.
// Other path endings may also work,
// but there has to be something after the symbol
return session.request().host(FINANCE_YAHOO_COM).path("/quote/" + symbol + "/options");
} |
python | def load(*args, **kwargs):
"""Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
"""
kwargs.update(dict(object_hook=json_numpy_obj_hook))
return _json.load(*args, **kwargs) |
python | def find_all(self, locator, search_object=None, force_find=False):
'''
Find all elements matching locator
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@rtype: list[WebElementWrapper]
@return: list of WebElementWrappers
'''
return self.find(locator=locator, find_all=True, search_object=search_object, force_find=force_find) |
java | public void save(final SecretKey key) throws IOException {
final File keyFile = getKeyPath(key);
keyFile.getParentFile().mkdirs(); // make directories if they do not exist
try (OutputStream fos = Files.newOutputStream(keyFile.toPath());
ObjectOutputStream oout = new ObjectOutputStream(fos)) {
oout.writeObject(key);
}
} |
java | public ImageSource toFastBitmap() {
OneBandSource l = new OneBandSource(width, height);
PowerSpectrum();
double max = Math.log(PowerMax + 1.0);
double scale = 1.0;
if (scaleValue > 0) scale = scaleValue / max;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
double p = Power[i][j];
double plog = Math.log(p + 1.0);
l.setRGB(i, j, (int) (plog * scale * 255));
}
}
return l;
} |
python | def backup(path, name=None):
"""Start a Backup run"""
from PyHardLinkBackup.phlb.phlb_main import backup
backup(path, name) |
python | def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set) |
java | public void addClassConstant(Content summariesTree, Content classConstantTree) {
if (configuration.allowTag(HtmlTag.SECTION)) {
summaryTree.addContent(classConstantTree);
} else {
summariesTree.addContent(classConstantTree);
}
} |
java | public String toJSON() {
StringBuffer result = new StringBuffer();
result.append("{\n");
for (Entry<String, List<String>> simpleEntry : m_simpleAttributes.entrySet()) {
result.append("\"").append(simpleEntry.getKey()).append("\"").append(": [\n");
boolean firstValue = true;
for (String value : simpleEntry.getValue()) {
if (firstValue) {
firstValue = false;
} else {
result.append(",\n");
}
result.append("\"").append(value).append("\"");
}
result.append("],\n");
}
for (Entry<String, List<CmsEntity>> entityEntry : m_entityAttributes.entrySet()) {
result.append("\"").append(entityEntry.getKey()).append("\"").append(": [\n");
boolean firstValue = true;
for (CmsEntity value : entityEntry.getValue()) {
if (firstValue) {
firstValue = false;
} else {
result.append(",\n");
}
result.append(value.toJSON());
}
result.append("],\n");
}
result.append("\"id\": \"").append(m_id).append("\"");
result.append("}");
return result.toString();
} |
java | public <T> List<T> querySingleColumnTypedResults(String sql, String[] args) {
@SuppressWarnings("unchecked")
List<T> result = (List<T>) querySingleColumnResults(sql, args);
return result;
} |
java | public Boolean getWicketDebugToolbar() {
if(getDevelopment()) {
return wicketDebugToolbar != null ? wicketDebugToolbar : true;
} else {
return wicketDebugToolbar != null ? wicketDebugToolbar : false;
}
} |
python | def _get_value(first, second):
"""
数据转化
:param first:
:param second:
:return:
>>> _get_value(1,'2')
2
>>> _get_value([1,2],[2,3])
[1, 2, 3]
"""
if isinstance(first, list) and isinstance(second, list):
return list(set(first).union(set(second)))
elif isinstance(first, dict) and isinstance(second, dict):
first.update(second)
return first
elif first is not None and second is not None and not isinstance(first, type(second)):
return type(first)(second)
else:
return second |
python | def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec |
python | def _interpolate_p(p, r, v):
"""
interpolates p based on the values in the A table for the
scalar value of r and the scalar value of v
"""
# interpolate p (v should be in table)
# if .5 < p < .75 use linear interpolation in q
# if p > .75 use quadratic interpolation in log(y + r/v)
# by -1. / (1. + 1.5 * _phi((1. + p)/2.))
# find the 3 closest v values
p0, p1, p2 = _select_ps(p)
try:
y0 = _func(A[(p0, v)], p0, r, v) + 1.
except:
print(p,r,v)
y1 = _func(A[(p1, v)], p1, r, v) + 1.
y2 = _func(A[(p2, v)], p2, r, v) + 1.
y_log0 = math.log(y0 + float(r)/float(v))
y_log1 = math.log(y1 + float(r)/float(v))
y_log2 = math.log(y2 + float(r)/float(v))
# If p < .85 apply only the ordinate transformation
# if p > .85 apply the ordinate and the abcissa transformation
# In both cases apply quadratic interpolation
if p > .85:
p_t = _ptransform(p)
p0_t = _ptransform(p0)
p1_t = _ptransform(p1)
p2_t = _ptransform(p2)
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \
(y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)
else:
d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)
d0 = y_log1
# interpolate value
y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
elif p > .5:
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2-p1) - \
(y_log1-y_log0)/(p1-p0))/(p2-p0)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)
else:
d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)
d0 = y_log1
# interpolate values
y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
else:
# linear interpolation in q and p
q0 = math.sqrt(2) * -y0 * \
scipy.stats.t.isf((1.+p0)/2., max(v, 1e38))
q1 = math.sqrt(2) * -y1 * \
scipy.stats.t.isf((1.+p1)/2., max(v, 1e38))
d1 = (q1-q0)/(p1-p0)
d0 = q0
# interpolate values
q = d1 * (p-p0) + d0
# transform back to y
y = -q / (math.sqrt(2) * \
scipy.stats.t.isf((1.+p)/2., max(v, 1e38)))
return y |
java | public PrivateKey get(KeyStoreChooser keyStoreChooser, PrivateKeyChooserByAlias privateKeyChooserByAlias) {
CacheKey cacheKey = new CacheKey(keyStoreChooser.getKeyStoreName(), privateKeyChooserByAlias.getAlias());
PrivateKey retrievedPrivateKey = cache.get(cacheKey);
if (retrievedPrivateKey != null) {
return retrievedPrivateKey;
}
KeyStore keyStore = keyStoreRegistry.get(keyStoreChooser);
if (keyStore != null) {
PrivateKeyFactoryBean factory = new PrivateKeyFactoryBean();
factory.setKeystore(keyStore);
factory.setAlias(privateKeyChooserByAlias.getAlias());
factory.setPassword(privateKeyChooserByAlias.getPassword());
try {
factory.afterPropertiesSet();
PrivateKey privateKey = (PrivateKey) factory.getObject();
if (privateKey != null) {
cache.put(cacheKey, privateKey);
}
return privateKey;
} catch (Exception e) {
throw new PrivateKeyException("error initializing private key factory bean", e);
}
}
return null;
} |
python | def upload_token(
self,
bucket,
key=None,
expires=3600,
policy=None,
strict_policy=True):
"""生成上传凭证
Args:
bucket: 上传的空间名
key: 上传的文件名,默认为空
expires: 上传凭证的过期时间,默认为3600s
policy: 上传策略,默认为空
Returns:
上传凭证
"""
if bucket is None or bucket == '':
raise ValueError('invalid bucket name')
scope = bucket
if key is not None:
scope = '{0}:{1}'.format(bucket, key)
args = dict(
scope=scope,
deadline=int(time.time()) + expires,
)
if policy is not None:
self.__copy_policy(policy, args, strict_policy)
return self.__upload_token(args) |
python | def _verify_dict(self, conf):
"""
Check that the configuration contains all necessary keys.
:type conf: dict
:rtype: None
:raise SATOSAConfigurationError: if the configuration is incorrect
:param conf: config to verify
:return: None
"""
if not conf:
raise SATOSAConfigurationError("Missing configuration or unknown format")
for key in SATOSAConfig.mandatory_dict_keys:
if key not in conf:
raise SATOSAConfigurationError("Missing key '%s' in config" % key)
for key in SATOSAConfig.sensitive_dict_keys:
if key not in conf and "SATOSA_{key}".format(key=key) not in os.environ:
raise SATOSAConfigurationError("Missing key '%s' from config and ENVIRONMENT" % key) |
python | def with_(self, replacement):
"""Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given
"""
ensure_string(replacement)
if is_mapping(self._replacements):
raise ReplacementError("string replacements already provided")
self._replacements = dict.fromkeys(self._replacements, replacement)
return self |
python | def get_more(self, show=True, proxy=None, timeout=0):
"""
Calls get_querymore() Is for convenience. You like.
"""
return self.get_querymore(show, proxy, timeout) |
java | public void writeTo(Writer target, boolean flushTarget, boolean emptyAfter)
throws IOException
{
//if (target instanceof GrailsWrappedWriter) {
// target = ((GrailsWrappedWriter)target).unwrap();
//}
if (target instanceof StreamCharBufferWriter)
{
if (target == writer)
{
throw new IllegalArgumentException(
"Cannot write buffer to itself.");
}
((StreamCharBufferWriter) target).write(this);
return;
}
writeToImpl(target, flushTarget, emptyAfter);
} |
python | def save_image(image, destination=None, filename=None, **options):
"""
Save a PIL image.
"""
if destination is None:
destination = BytesIO()
filename = filename or ''
# Ensure plugins are fully loaded so that Image.EXTENSION is populated.
Image.init()
format = Image.EXTENSION.get(os.path.splitext(filename)[1].lower(), 'JPEG')
if format in ('JPEG', 'WEBP'):
options.setdefault('quality', 85)
saved = False
if format == 'JPEG':
if image.mode.endswith('A'):
# From PIL 4.2, saving an image with a transparency layer raises an
# IOError, so explicitly remove it.
image = image.convert(image.mode[:-1])
if settings.THUMBNAIL_PROGRESSIVE and (
max(image.size) >= settings.THUMBNAIL_PROGRESSIVE):
options['progressive'] = True
try:
image.save(destination, format=format, optimize=1, **options)
saved = True
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default). This
# shouldn't be triggered very often these days, as recent versions
# of pillow avoid the MAXBLOCK limitation.
pass
if not saved:
image.save(destination, format=format, **options)
if hasattr(destination, 'seek'):
destination.seek(0)
return destination |
java | @XmlElementDecl(namespace = "http://schema.intuit.com/finance/v3", name = "Term", substitutionHeadNamespace = "http://schema.intuit.com/finance/v3", substitutionHeadName = "IntuitObject")
public JAXBElement<Term> createTerm(Term value) {
return new JAXBElement<Term>(_Term_QNAME, Term.class, null, value);
} |
python | def execution_order(self, refcounts):
"""
Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed.
"""
return iter(nx.topological_sort(
self.graph.subgraph(
{term for term, refcount in refcounts.items() if refcount > 0},
),
)) |
java | public static long getWeekStartTime(final long time) {
final Calendar start = Calendar.getInstance();
start.setFirstDayOfWeek(Calendar.MONDAY);
start.setTimeInMillis(time);
start.set(Calendar.DAY_OF_WEEK, Calendar.MONDAY);
start.set(Calendar.HOUR, 0);
start.set(Calendar.MINUTE, 0);
start.set(Calendar.SECOND, 0);
start.set(Calendar.MILLISECOND, 0);
return start.getTimeInMillis();
} |
java | private String getRowLineBuf(int colCount, List<Integer> colMaxLenList, String[][] data) {
StringBuilder rowBuilder = new StringBuilder();
int colWidth = 0;
for (int i = 0; i < colCount; i++) {
colWidth = colMaxLenList.get(i) + 3;
for (int j = 0; j < colWidth; j++) {
if (j == 0) {
rowBuilder.append("+");
} else if ((i + 1 == colCount && j + 1 == colWidth)) {//for last column close the border
rowBuilder.append("-+");
} else {
rowBuilder.append("-");
}
}
}
return rowBuilder.append("\n").toString();
} |
java | public void deleteObjects(Collection objects)
{
for (Iterator iterator = objects.iterator(); iterator.hasNext();)
{
getDatabase().deletePersistent(iterator.next());
}
} |
python | def _prepRESearchStr(matchStr, wordInitial='ok', wordFinal='ok',
spanSyllable='ok', stressedSyllable='ok'):
'''
Prepares a user's RE string for a search
'''
# Protect sounds that are two characters
# After this we can assume that each character represents a sound
# (We'll revert back when we're done processing the RE)
replList = [(u'ei', u'9'), (u'tʃ', u'='), (u'oʊ', u'~'),
(u'dʒ', u'@'), (u'aʊ', u'%'), (u'ɑɪ', u'&'),
(u'ɔi', u'$')]
# Add to the replList
currentReplNum = 0
startI = 0
for left, right in (('(', ')'), ('[', ']')):
while True:
try:
i = matchStr.index(left, startI)
except ValueError:
break
j = matchStr.index(right, i) + 1
replList.append((matchStr[i:j], str(currentReplNum)))
currentReplNum += 1
startI = j
for charA, charB in replList:
matchStr = matchStr.replace(charA, charB)
# Characters to check between all other characters
# Don't check between all other characters if the character is already
# in the search string or
interleaveStr = None
stressOpt = (stressedSyllable == 'ok' or stressedSyllable == 'only')
spanOpt = (spanSyllable == 'ok' or spanSyllable == 'only')
if stressOpt and spanOpt:
interleaveStr = u"\.?ˈ?"
elif stressOpt:
interleaveStr = u"ˈ?"
elif spanOpt:
interleaveStr = u"\.?"
if interleaveStr is not None:
matchStr = interleaveStr.join(matchStr)
# Setting search boundaries
# We search on '[^\.#]' and not '.' so that the search doesn't span
# multiple syllables or words
if wordInitial == 'only':
matchStr = u'#' + matchStr
elif wordInitial == 'no':
# Match the closest preceeding syllable. If there is none, look
# for word boundary plus at least one other character
matchStr = u'(?:\.[^\.#]*?|#[^\.#]+?)' + matchStr
else:
matchStr = u'[#\.][^\.#]*?' + matchStr
if wordFinal == 'only':
matchStr = matchStr + u'#'
elif wordFinal == 'no':
matchStr = matchStr + u"(?:[^\.#]*?\.|[^\.#]+?#)"
else:
matchStr = matchStr + u'[^\.#]*?[#\.]'
# For sounds that are designated two characters, prevent
# detecting those sounds if the user wanted a sound
# designated by one of the contained characters
# Forward search ('a' and not 'ab')
insertList = []
for charA, charB in [(u'e', u'i'), (u't', u'ʃ'), (u'd', u'ʒ'),
(u'o', u'ʊ'), (u'a', u'ʊ|ɪ'), (u'ɔ', u'i'), ]:
startI = 0
while True:
try:
i = matchStr.index(charA, startI)
except ValueError:
break
if matchStr[i + 1] != charB:
forwardStr = u'(?!%s)' % charB
# matchStr = matchStr[:i + 1] + forwardStr + matchStr[i + 1:]
startI = i + 1 + len(forwardStr)
insertList.append((i + 1, forwardStr))
# Backward search ('b' and not 'ab')
for charA, charB in [(u't', u'ʃ'), (u'd', u'ʒ'),
(u'a|o', u'ʊ'), (u'e|ɔ', u'i'), (u'ɑ' u'ɪ'), ]:
startI = 0
while True:
try:
i = matchStr.index(charB, startI)
except ValueError:
break
if matchStr[i - 1] != charA:
backStr = u'(?<!%s)' % charA
# matchStr = matchStr[:i] + backStr + matchStr[i:]
startI = i + 1 + len(backStr)
insertList.append((i, backStr))
insertList.sort()
for i, insertStr in insertList[::-1]:
matchStr = matchStr[:i] + insertStr + matchStr[i:]
# Revert the special sounds back from 1 character to 2 characters
for charA, charB in replList:
matchStr = matchStr.replace(charB, charA)
# Replace special characters
replDict = {"D": u"(?:t(?!ʃ)|d(?!ʒ)|[sz])", # dentals
"F": u"[ʃʒfvszɵðh]", # fricatives
"S": u"(?:t(?!ʃ)|d(?!ʒ)|[pbkg])", # stops
"N": u"[nmŋ]", # nasals
"R": u"[rɝɚ]", # rhotics
"V": u"(?:aʊ|ei|oʊ|ɑɪ|ɔi|[iuæɑɔəɛɪʊʌ]):?", # vowels
"B": u"\.", # syllable boundary
}
for char, replStr in replDict.items():
matchStr = matchStr.replace(char, replStr)
return matchStr |
java | public List<String> getPossiblePaths(int maxDepth) {
if (allDissectors.isEmpty()) {
return Collections.emptyList(); // nothing to do.
}
try {
assembleDissectors();
} catch (MissingDissectorsException | InvalidDissectorException e) {
// Simply swallow this one
}
List<String> paths = new ArrayList<>();
Map<String, List<String>> pathNodes = new HashMap<>();
for (Dissector dissector : allDissectors) {
final String inputType = dissector.getInputType();
if (inputType == null) {
LOG.error("Dissector returns null on getInputType(): [{}]", dissector.getClass().getCanonicalName());
return Collections.emptyList();
}
final List<String> outputs = dissector.getPossibleOutput();
if (LOG.isDebugEnabled()) {
LOG.debug("------------------------------------");
LOG.debug("Possible: Dissector IN {}", inputType);
for (String output: outputs) {
LOG.debug("Possible: --> {}", output);
}
}
List<String> existingOutputs = pathNodes.get(inputType);
if (existingOutputs != null) {
outputs.addAll(existingOutputs);
}
pathNodes.put(inputType, outputs);
}
findAdditionalPossiblePaths(pathNodes, paths, "", rootType, maxDepth, "");
for (Entry<String, Set<String>> typeRemappingSet: typeRemappings.entrySet()) {
for (String typeRemapping: typeRemappingSet.getValue()) {
String remappedPath = typeRemapping + ':' + typeRemappingSet.getKey();
LOG.debug("Adding remapped path: {}", remappedPath);
paths.add(remappedPath);
findAdditionalPossiblePaths(pathNodes, paths, typeRemappingSet.getKey(), typeRemapping, maxDepth - 1, "");
}
}
return paths;
} |
python | def register_regex_entity(self, regex_str, domain=0):
"""
A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
Args:
regex_str(str): a string representing a regular expression as defined above
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_regex_entity(regex_str=regex_str) |
java | public int getCurrentMethodOrdinal(int overrideId, int pathId, String clientUUID, String[] filters) throws Exception {
int currentOrdinal = 0;
List<EnabledEndpoint> enabledEndpoints = getEnabledEndpoints(pathId, clientUUID, filters);
for (EnabledEndpoint enabledEndpoint : enabledEndpoints) {
if (enabledEndpoint.getOverrideId() == overrideId) {
currentOrdinal++;
}
}
return currentOrdinal;
} |
python | def update_pypsa_generator_import(network):
"""
Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
:meth:`~.grid.network.EDisGo.analyze` of the API class
:class:`~.grid.network.EDisGo`.
Translating eDisGo's grid topology to PyPSA representation is structured
into translating the topology and adding time series for components of the
grid. In both cases translation of MV grid only (`mode='mv'`), LV grid only
(`mode='lv'`), MV and LV (`mode=None`) share some code. The
code is organized as follows:
* Medium-voltage only (`mode='mv'`): All medium-voltage grid components are
exported by :func:`mv_to_pypsa` including the LV station. LV grid load
and generation is considered using :func:`add_aggregated_lv_components`.
Time series are collected by `_pypsa_load_timeseries` (as example
for loads, generators and buses) specifying `mode='mv'`). Timeseries
for aggregated load/generation at substations are determined individually.
* Low-voltage only (`mode='lv'`): LV grid topology including the MV-LV
transformer is exported. The slack is defind at primary side of the MV-LV
transformer.
* Both level MV+LV (`mode=None`): The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together. First, both grid levels are translated seperately using
:func:`mv_to_pypsa` and :func:`lv_to_pypsa`. Those are merge by
:func:`combine_mv_and_lv`. Time series are obtained at once for both grid
levels.
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub-graphs/ Sub-networks: It is ensured the grid has no islanded parts
* Completeness of time series: It is ensured each component has a time
series
* Buses available: Each component (load, generator, line, transformer) is
connected to a bus. The PyPSA representation is check for completeness of
buses.
* Duplicate labels in components DataFrames and components' time series
DataFrames
Parameters
----------
network : :class:`~.grid.network.Network`
eDisGo grid container
mode : str
Determines grid levels that are translated to
`PyPSA grid representation
<https://www.pypsa.org/doc/components.html#network>`_. Specify
* None to export MV and LV grid levels. None is the default.
* ('mv' to export MV grid level only. This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station. This option is implemented, though the rest of edisgo does
not handle it yet.)
* ('lv' to export LV grid level only. This option is not yet
implemented)
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or \
:pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis.
Returns
-------
:pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container.
"""
# get topology and time series data
if network.pypsa.edisgo_mode is None:
mv_components = mv_to_pypsa(network)
lv_components = lv_to_pypsa(network)
components = combine_mv_and_lv(mv_components, lv_components)
elif network.pypsa.edisgo_mode is 'mv':
raise NotImplementedError
elif network.pypsa.edisgo_mode is 'lv':
raise NotImplementedError
else:
raise ValueError("Provide proper mode or leave it empty to export "
"entire grid topology.")
# check topology
_check_topology(components)
# create power flow problem
pypsa_network = PyPSANetwork()
pypsa_network.edisgo_mode = network.pypsa.edisgo_mode
pypsa_network.set_snapshots(network.pypsa.snapshots)
# import grid topology to PyPSA network
# buses are created first to avoid warnings
pypsa_network.import_components_from_dataframe(components['Bus'], 'Bus')
for k, comps in components.items():
if k is not 'Bus' and not comps.empty:
pypsa_network.import_components_from_dataframe(comps, k)
# import time series to PyPSA network
pypsa_network.generators_t.p_set = network.pypsa.generators_t.p_set
pypsa_network.generators_t.q_set = network.pypsa.generators_t.q_set
pypsa_network.loads_t.p_set = network.pypsa.loads_t.p_set
pypsa_network.loads_t.q_set = network.pypsa.loads_t.q_set
pypsa_network.storage_units_t.p_set = network.pypsa.storage_units_t.p_set
pypsa_network.storage_units_t.q_set = network.pypsa.storage_units_t.q_set
pypsa_network.buses_t.v_mag_pu_set = network.pypsa.buses_t.v_mag_pu_set
network.pypsa = pypsa_network
if len(list(components['Generator'].index.values)) > 1:
update_pypsa_generator_timeseries(network)
if list(components['Bus'].index.values):
update_pypsa_bus_timeseries(network)
if len(list(components['StorageUnit'].index.values)) > 0:
update_pypsa_storage_timeseries(network)
_check_integrity_of_pypsa(pypsa_network) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.