language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | static private int findPlaceholderEndIndex(CharSequence buf, int startIndex) {
int index = startIndex + placeholderPrefix.length();
int withinNestedPlaceholder = 0;
while (index < buf.length()) {
if (StringUtils.substringMatch(buf, index, placeholderSuffix)) {
if (withinNestedPlaceholder > 0) {
withinNestedPlaceholder--;
index = index + 1;
}
else {
return index;
}
}
else if (StringUtils.substringMatch(buf, index, placeholderPrefix)) {
withinNestedPlaceholder++;
index = index + placeholderPrefix.length();
}
else {
index++;
}
}
return -1;
} |
java | public static CommerceUserSegmentCriterion fetchByCommerceUserSegmentEntryId_First(
long commerceUserSegmentEntryId,
OrderByComparator<CommerceUserSegmentCriterion> orderByComparator) {
return getPersistence()
.fetchByCommerceUserSegmentEntryId_First(commerceUserSegmentEntryId,
orderByComparator);
} |
python | def parse(self, text, layers=None):
"""Parsing passed text to json.
Args:
text: Text to parse.
layers (optional): Special fields. Only one string
or iterable object (e.g "Data", ("Data", "Fio")).
Only these fields will be returned.
Returns:
The parsed text into a json object.
"""
params = {
"text": text,
"key": self.key,
}
if layers is not None:
# if it's string
if isinstance(layers, six.string_types):
params["layers"] = layers
# if it's another iterable object
elif isinstance(layers, collections.Iterable):
params["layers"] = ",".join(layers)
req = requests.get(self.NLU_URL, params=params)
return req.json() |
java | public String cookieDomain(String expectedCookieName) {
Cookie cookie = cookie(expectedCookieName);
if (cookie != null) {
return cookie.getDomain();
}
return null;
} |
java | public String generateSqlSelectAll(String tableName) {
try {
return cacheSQLs.get("SELECT-ALL:" + tableName, () -> {
return MessageFormat.format("SELECT {2} FROM {0} ORDER BY {1}", tableName,
strPkColumns, strAllColumns);
});
} catch (ExecutionException e) {
throw new DaoException(e);
}
} |
python | def p_field(self, p):
'''field : field_id field_req field_type IDENTIFIER annotations
| field_id field_req field_type IDENTIFIER '=' const_value \
annotations'''
if len(p) == 8:
default = p[6]
annotations = p[7]
else:
default = None
annotations = p[5]
p[0] = ast.Field(
id=p[1],
name=p[4],
field_type=p[3],
requiredness=p[2],
default=default,
annotations=annotations,
lineno=p.lineno(4),
) |
python | def handle(self):
"""Process an incoming package."""
data = self.request[0]
sock = self.request[1]
frame = KNXIPFrame.from_frame(data)
if frame.service_type_id == KNXIPFrame.TUNNELING_REQUEST:
req = KNXTunnelingRequest.from_body(frame.body)
msg = CEMIMessage.from_body(req.cemi)
send_ack = False
tunnel = self.server.tunnel
if msg.code == 0x29:
# LData.req
send_ack = True
elif msg.code == 0x2e:
# LData.con
send_ack = True
else:
problem = "Unimplemented cEMI message code {}".format(msg.code)
logging.error(problem)
raise KNXException(problem)
# Cache data
if (msg.cmd == CEMIMessage.CMD_GROUP_WRITE) or (
msg.cmd == CEMIMessage.CMD_GROUP_RESPONSE):
# saw a value for a group address on the bus
tunnel.received_message(msg.dst_addr, msg.data)
# Put RESPONSES into the result queue
if msg.cmd == CEMIMessage.CMD_GROUP_RESPONSE:
tunnel.result_queue.put(msg.data)
if send_ack:
bodyack = [0x04, req.channel, req.seq, E_NO_ERROR]
ack = KNXIPFrame(KNXIPFrame.TUNNELLING_ACK)
ack.body = bodyack
sock.sendto(ack.to_frame(), self.client_address)
elif frame.service_type_id == KNXIPFrame.TUNNELLING_ACK:
logging.debug("Received tunneling ACK")
self.server.tunnel.ack_semaphore.release()
elif frame.service_type_id == KNXIPFrame.DISCONNECT_RESPONSE:
logging.debug("Disconnected")
self.channel = None
tunnel = self.server.tunnel
tunnel.data_server.shutdown()
tunnel.data_server = None
elif frame.service_type_id == KNXIPFrame.CONNECTIONSTATE_RESPONSE:
logging.debug("Connection state response")
tunnel.connection_state = frame.body[2]
else:
logging.info(
"Message type %s not yet implemented", frame.service_type_id) |
java | public Observable<ServiceResponse<PolicyDefinitionInner>> getBuiltInWithServiceResponseAsync(String policyDefinitionName) {
if (policyDefinitionName == null) {
throw new IllegalArgumentException("Parameter policyDefinitionName is required and cannot be null.");
}
if (this.client.apiVersion() == null) {
throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null.");
}
return service.getBuiltIn(policyDefinitionName, this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<PolicyDefinitionInner>>>() {
@Override
public Observable<ServiceResponse<PolicyDefinitionInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PolicyDefinitionInner> clientResponse = getBuiltInDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
} |
python | def disable_search_updates():
"""
Context manager used to temporarily disable auto_sync.
This is useful when performing bulk updates on objects - when
you may not want to flood the indexing process.
>>> with disable_search_updates():
... for obj in model.objects.all():
... obj.save()
The function works by temporarily removing the apps._on_model_save
signal handler from the model.post_save signal receivers, and then
restoring them after.
"""
_receivers = signals.post_save.receivers.copy()
signals.post_save.receivers = _strip_on_model_save()
yield
signals.post_save.receivers = _receivers |
python | def index(self):
""" Display NIPAP version info
"""
c.pynipap_version = pynipap.__version__
try:
c.nipapd_version = pynipap.nipapd_version()
except:
c.nipapd_version = 'unknown'
c.nipap_db_version = pynipap.nipap_db_version()
return render('/version.html') |
python | def flat_git_tree_to_nested(flat_tree, prefix=''):
'''
Given an array in format:
[
["100644", "blob", "ab3ce...", "748", ".gitignore" ],
["100644", "blob", "ab3ce...", "748", "path/to/thing" ],
...
]
Outputs in a nested format:
{
"path": "/",
"type": "directory",
"children": [
{
"type": "blob",
"size": 748,
"sha": "ab3ce...",
"mode": "100644",
},
...
],
...
}
'''
root = _make_empty_dir_dict(prefix if prefix else '/')
# Filter all descendents of this prefix
descendent_files = [
info for info in flat_tree
if os.path.dirname(info[PATH]).startswith(prefix)
]
# Figure out strictly leaf nodes of this tree (can be immediately added as
# children)
children_files = [
info for info in descendent_files
if os.path.dirname(info[PATH]) == prefix
]
# Figure out all descendent directories
descendent_dirs = set(
os.path.dirname(info[PATH]) for info in descendent_files
if os.path.dirname(info[PATH]).startswith(prefix)
and not os.path.dirname(info[PATH]) == prefix
)
# Figure out all descendent directories
children_dirs = set(
dir_path for dir_path in descendent_dirs
if os.path.dirname(dir_path) == prefix
)
# Recurse into children dirs, constructing file trees for each of them,
# then appending those
for dir_path in children_dirs:
info = flat_git_tree_to_nested(descendent_files, prefix=dir_path)
root['children'].append(info)
# Append direct children files
for info in children_files:
root['children'].append(_make_child(info))
return root |
java | public void marshall(SMSChannelResponse sMSChannelResponse, ProtocolMarshaller protocolMarshaller) {
if (sMSChannelResponse == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(sMSChannelResponse.getApplicationId(), APPLICATIONID_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getCreationDate(), CREATIONDATE_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getEnabled(), ENABLED_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getHasCredential(), HASCREDENTIAL_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getId(), ID_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getIsArchived(), ISARCHIVED_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getLastModifiedBy(), LASTMODIFIEDBY_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getLastModifiedDate(), LASTMODIFIEDDATE_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getPlatform(), PLATFORM_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getPromotionalMessagesPerSecond(), PROMOTIONALMESSAGESPERSECOND_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getSenderId(), SENDERID_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getShortCode(), SHORTCODE_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getTransactionalMessagesPerSecond(), TRANSACTIONALMESSAGESPERSECOND_BINDING);
protocolMarshaller.marshall(sMSChannelResponse.getVersion(), VERSION_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public void postDelete(String blogName, Long postId) {
Map<String, String> map = new HashMap<String, String>();
map.put("id", postId.toString());
requestBuilder.post(JumblrClient.blogPath(blogName, "/post/delete"), map);
} |
python | def _persist(source, path, component=None, storage_options=None,
**kwargs):
"""Save array to local persistent store
Makes a parquet dataset out of the data using zarr.
This then becomes a data entry in the persisted datasets catalog.
Only works locally for the moment.
Parameters
----------
source: a DataSource instance to save
name: str or None
Key to refer to this persisted dataset by. If not given, will
attempt to get from the source's name
kwargs: passed on to zarr array creation, see
"""
from dask.array import to_zarr, from_array
from ..source.zarr import ZarrArraySource
try:
arr = source.to_dask()
except NotImplementedError:
arr = from_array(source.read(), chunks=-1).rechunk('auto')
to_zarr(arr, path, component=None,
storage_options=storage_options, **kwargs)
source = ZarrArraySource(path, storage_options, component)
return source |
python | def show_ext(self, path, id, **_params):
"""Client extension hook for show."""
return self.get(path % id, params=_params) |
java | public SearchGiphy searchByID(String id) throws GiphyException {
SearchGiphy giphy = null;
HashMap<String, String> params = new HashMap<String, String>();
params.put("api_key", apiKey);
Request request = new Request(UrlUtil.buildUrlQuery(IDEndpoint + id, params));
try {
Response response = sender.sendRequest(request);
giphy = gson.fromJson(response.getBody(), SearchGiphy.class);
} catch (JsonSyntaxException | IOException e) {
log.error(e.getMessage(), e);
throw new GiphyException(e);
}
return giphy;
} |
python | def milestone(self, column=None, value=None, **kwargs):
"""
Status codes and related dates of certain grants,
>>> GICS().milestone('milestone_date', '16-MAR-01')
"""
return self._resolve_call('GIC_MILESTONE', column, value, **kwargs) |
python | def resolve_push_to(push_to, default_url, default_namespace):
'''
Given a push-to value, return the registry and namespace.
:param push_to: string: User supplied --push-to value.
:param default_url: string: Container engine's default_index value (e.g. docker.io).
:return: tuple: registry_url, namespace
'''
protocol = 'http://' if push_to.startswith('http://') else 'https://'
url = push_to = REMOVE_HTTP.sub('', push_to)
namespace = default_namespace
parts = url.split('/', 1)
special_set = {'.', ':'}
char_set = set([c for c in parts[0]])
if len(parts) == 1:
if not special_set.intersection(char_set) and parts[0] != 'localhost':
registry_url = default_url
namespace = push_to
else:
registry_url = protocol + parts[0]
else:
registry_url = protocol + parts[0]
namespace = parts[1]
return registry_url, namespace |
python | def items(self, desc=None, start_value=None, shift_by=None):
'''Returns a python ``generator`` which can be used to iterate over
:func:`dynts.TimeSeries.dates` and :func:`dynts.TimeSeries.values`
returning a two dimensional
tuple ``(date,value)`` in each iteration.
Similar to the python dictionary items
function.
:parameter desc: if ``True`` the iteratioon starts from the more
recent data and proceeds backwards.
:parameter shift_by: optional parallel shift in values.
:parameter start_value: optional start value of timeseries.
'''
if self:
if shift_by is None and start_value is not None:
for cross in self.values():
missings = 0
if shift_by is None:
shift_by = []
for v in cross:
shift_by.append(start_value - v)
if v != v:
missings += 1
else:
for j in range(len(shift_by)):
s = shift_by[j]
v = cross[j]
if s != s:
if v == v:
shift_by[j] = start_value - v
else:
missings += 1
if not missings:
break
if shift_by:
for d, v in zip(self.dates(desc=desc), self.values(desc=desc)):
yield d, v + shift_by
else:
for d, v in zip(self.dates(desc=desc), self.values(desc=desc)):
yield d, v |
java | @Override
public void setCountForDoc(String word, long count) {
T element = extendedVocabulary.get(word);
if (element != null) {
element.setSequencesCount(count);
}
} |
java | public void addBlock(String tierAlias, long blockId, long blockSize, long blockLastAccessTimeMs) {
UIFileBlockInfo block =
new UIFileBlockInfo(blockId, blockSize, blockLastAccessTimeMs, tierAlias,
mAlluxioConfiguration);
List<UIFileBlockInfo> blocksOnTier = mBlocksOnTier.get(tierAlias);
if (blocksOnTier == null) {
blocksOnTier = new ArrayList<>();
mBlocksOnTier.put(tierAlias, blocksOnTier);
}
blocksOnTier.add(block);
Long sizeOnTier = mSizeOnTier.get(tierAlias);
mSizeOnTier.put(tierAlias, (sizeOnTier == null ? 0L : sizeOnTier) + blockSize);
} |
python | def image(self):
"""
The image of the container.
"""
image_id = self.attrs.get('ImageID', self.attrs['Image'])
if image_id is None:
return None
return self.client.images.get(image_id.split(':')[1]) |
python | def download(self,
url,
dest_path=None):
"""
:param url:
:type url: str
:param dest_path:
:type dest_path: str
"""
if os.path.exists(dest_path):
os.remove(dest_path)
resp = get(url, stream=True)
size = int(resp.headers.get("content-length"))
label = "Downloading {filename} ({size:.2f}MB)".format(
filename=os.path.basename(dest_path),
size=size / float(self.chunk_size) / self.chunk_size
)
with open_file(dest_path, 'wb') as file:
content_iter = resp.iter_content(chunk_size=self.chunk_size)
with progressbar(content_iter,
length=size / self.chunk_size,
label=label) as bar:
for chunk in bar:
if chunk:
file.write(chunk)
file.flush() |
java | @Override
public void pausePlayback()
{
if (isNotPausingNorPaused() && isNotStoppingNorStopped())
{
setIsPausing();
while (!isPaused() && !isStopped())
{
try { Thread.sleep(1); } catch (InterruptedException ex) { /*noop*/ }
}
stopLine();
}
else
if (isPaused())
{
startLine();
setIsPlaying();
}
} |
java | @SuppressWarnings("checkstyle:nestedifdepth")
protected synchronized String replaceProp(File sourceFile, int sourceLine, String text,
MavenProject project, ReplacementType replacementType) throws MojoExecutionException {
String result = text;
final Pattern p = buildMacroPatternWithGroup(Macros.MACRO_PROP);
final Matcher m = p.matcher(text);
boolean hasResult = m.find();
Properties props = null;
if (project != null) {
props = project.getProperties();
}
if (hasResult) {
final StringBuffer sb = new StringBuffer();
final StringBuilder replacement = new StringBuilder();
String propName;
do {
propName = m.group(1);
if (propName != null) {
propName = propName.trim();
if (propName.length() > 0) {
replacement.setLength(0);
if (props != null) {
final String value = props.getProperty(propName);
if (value != null && !value.isEmpty()) {
replacement.append(value);
}
}
if (replacement.length() != 0) {
m.appendReplacement(sb, Matcher.quoteReplacement(replacement.toString()));
}
} else {
getBuildContext().addMessage(
sourceFile,
sourceLine, 1,
"no property name for Prop tag: " + m.group(0), //$NON-NLS-1$
BuildContext.SEVERITY_WARNING, null);
}
} else {
getBuildContext().addMessage(
sourceFile,
sourceLine, 1,
"no property name for Prop tag: " + m.group(0), //$NON-NLS-1$
BuildContext.SEVERITY_WARNING, null);
}
hasResult = m.find();
}
while (hasResult);
m.appendTail(sb);
result = sb.toString();
}
return result;
} |
python | def get_results(self):
"""
:returns: an :class:`IterResult` instance
"""
return IterResult(self._loop(), self.name, self.argnames,
self.sent, self.monitor.hdf5) |
java | public void setAll(int[] samples, int count, int start, int skip,
long frameNumber, EncodedElement result) {
// assert(start == 0);
this.samples = samples;
this.count = count;
this.start = start;
this.skip = skip;
this.frameNumber = frameNumber;
this.result = result;
valid = false;
this.encodedSamples = 0;
} |
python | def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
path_to_lsm_files = path.join(self.lsm_input_folder_path,
self.lsm_search_card)
self._xd = pa.open_mfdataset(path_to_lsm_files,
lat_var=self.lsm_lat_var,
lon_var=self.lsm_lon_var,
time_var=self.lsm_time_var,
lat_dim=self.lsm_lat_dim,
lon_dim=self.lsm_lon_dim,
time_dim=self.lsm_time_dim,
loader=self.pangaea_loader)
self.lsm_time_dim = 'time'
self.lsm_time_var = 'time'
return self._xd |
java | public Observable<List<EventSubscriptionInner>> listByResourceAsync(String resourceGroupName, String providerNamespace, String resourceTypeName, String resourceName) {
return listByResourceWithServiceResponseAsync(resourceGroupName, providerNamespace, resourceTypeName, resourceName).map(new Func1<ServiceResponse<List<EventSubscriptionInner>>, List<EventSubscriptionInner>>() {
@Override
public List<EventSubscriptionInner> call(ServiceResponse<List<EventSubscriptionInner>> response) {
return response.body();
}
});
} |
java | public static double[][] transposeDiagonalTimes(double[][] m1, double[] d2, double[][] m3) {
final int innerdim = d2.length;
assert m1.length == innerdim : ERR_MATRIX_INNERDIM;
assert m3.length == innerdim : ERR_MATRIX_INNERDIM;
final int coldim1 = getColumnDimensionality(m1);
final int coldim2 = getColumnDimensionality(m3);
final double[][] r = new double[coldim1][coldim2];
final double[] Acoli = new double[innerdim]; // Buffer
// multiply it with each row from A
for(int i = 0; i < coldim1; i++) {
final double[] r_i = r[i]; // Output row
// Make a linear copy of column i from A
for(int k = 0; k < innerdim; k++) {
Acoli[k] = m1[k][i] * d2[k];
}
for(int j = 0; j < coldim2; j++) {
double s = 0;
for(int k = 0; k < innerdim; k++) {
s += Acoli[k] * m3[k][j];
}
r_i[j] = s;
}
}
return r;
} |
java | public String nonPipelineRead(String key) throws Exception {
String res = jedisClient.get().get(key);
if (res != null) {
if (res.isEmpty()) {
throw new Exception("Data retrieved is not ok ");
}
} else {
return CacheMiss;
}
return ResultOK;
} |
java | @Action(semantics = SemanticsOf.SAFE)
public List<ApplicationUser> findMatching(final String search) {
if (search != null && search.length() > 0) {
return find(search);
}
return Lists.newArrayList();
} |
java | public Collection<?> toCollection(Object val) {
if (val == null) {
return Collections.emptyList();
}
else if (val instanceof Collection<?>) {
return (Collection<?>) val;
}
else if (val.getClass().isArray()) {
return newArrayList((Object[]) val);
}
else if (val instanceof Map<?,?>) {
return ((Map<?,?>)val).entrySet();
}
else {
return newArrayList(val);
}
} |
java | public void addAdditionalElement(Component component) {
m_additionalElements.add(new InfoElementBean(m_additionalElements.size() + 1, component));
removeClickListener(m_clickListener);
m_clickListener = getClickListener(m_htmlLines, m_additionalElements);
addClickListener(m_clickListener);
} |
python | def get_media(self, id, **data):
"""
GET /media/:id/
Return an :format:`image` for a given id.
"""
return self.get("/media/{0}/".format(id), data=data) |
java | @Override public void filter(Filter filter) throws NoTestsRemainException {
List<FrameworkMethod> filteredChildren = ParentRunnerSpy.getFilteredChildren(this);
// Iterate over a clone so that we can safely mutate the original.
for (FrameworkMethod child : new ArrayList<>(filteredChildren)) {
if (!filter.shouldRun(describeChildPlain(child))) {
filteredChildren.remove(child);
}
}
if (filteredChildren.isEmpty()) {
throw new NoTestsRemainException();
}
} |
java | @Override
public boolean validate(boolean show) {
Boolean oldValid = valid;
valid = true;
if (errorHandler != null && !validators.isEmpty()) {
List<EditorError> errors = new ArrayList<>();
for (ValidatorWrapper<V> wrapper : validators) {
Validator<V> validator = wrapper.getValidator();
List<EditorError> result = validator.validate(inputWidget, inputWidget.getValue());
if (result != null && !result.isEmpty()) {
errors.addAll(result);
valid = false;
}
}
if (show) {
if (errors.size() > 0) {
errorHandler.showErrors(errors);
} else {
errorHandler.clearErrors();
}
}
}
if (valid != oldValid) {
eventBus.fireEvent(new ValidationChangedEvent(valid));
}
return valid;
} |
java | public End<Flow<T>> getOrCreateEnd()
{
List<Node> nodeList = childNode.get("end");
if (nodeList != null && nodeList.size() > 0)
{
return new EndImpl<Flow<T>>(this, "end", childNode, nodeList.get(0));
}
return createEnd();
} |
python | def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
raise hg_util.Abort("no reviewers listed in CL")
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
raise hg_util.Abort("no changed files, not sending mail")
cl.Mail(ui, repo) |
java | public double readLux() throws IOException {
int ambient = readU16LittleEndian(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CHAN0_LOW);
int ir = readU16LittleEndian(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CHAN1_LOW);
if (ambient >= 0xffff || ir >= 0xffff) // value(s) exeed(s) datarange
throw new RuntimeException("Gain too high. Values exceed range.");
if (false && this.gain == TSL2561Gain.GAIN_1X) {
ambient *= 16; // scale 1x to 16x
ir *= 16; // scale 1x to 16x
}
double ratio = (ir / (float) ambient);
LOG.debug("IR Result:" + ir);
LOG.debug("Ambient Result:" + ambient);
/*
* For the values below, see
* https://github.com/adafruit/Adafruit_TSL2561
* /blob/master/Adafruit_TSL2561_U.h
*/
double lux = 0d;
if ((ratio >= 0) && (ratio <= TSL2561_LUX_K4C))
lux = (TSL2561_LUX_B1C * ambient) - (0.0593 * ambient * (Math.pow(ratio, 1.4)));
else if (ratio <= TSL2561_LUX_K5C)
lux = (TSL2561_LUX_B5C * ambient) - (TSL2561_LUX_M5C * ir);
else if (ratio <= TSL2561_LUX_K6C)
lux = (TSL2561_LUX_B6C * ambient) - (TSL2561_LUX_M6C * ir);
else if (ratio <= TSL2561_LUX_K7C)
lux = (TSL2561_LUX_B7C * ambient) - (TSL2561_LUX_M7C * ir);
else if (ratio > TSL2561_LUX_K8C)
lux = 0;
return lux;
} |
java | @SuppressWarnings("unchecked")
private Vector<Object> getList(TreeMap<String, Object> map, String key) {
Object obj = null;
synchronized (map) {
obj = map.get(key);
}
if (obj != null && obj instanceof Vector) {
// ZAP: Added the type argument.
return (Vector<Object>) obj;
}
return null;
} |
java | private void writeGroup(Group group, Node node) throws OrganizationServiceException
{
try
{
node.setProperty(GroupProperties.JOS_LABEL, group.getLabel());
node.setProperty(GroupProperties.JOS_DESCRIPTION, group.getDescription());
}
catch (RepositoryException e)
{
throw new OrganizationServiceException("Can not write group properties", e);
}
} |
java | private static Bond toBeamEdgeLabel(IBond b, int flavour) throws CDKException {
if (SmiFlavor.isSet(flavour, SmiFlavor.UseAromaticSymbols) && b.isAromatic()) {
if (!b.getBegin().isAromatic() || !b.getEnd().isAromatic())
throw new IllegalStateException("Aromatic bond connects non-aromatic atomic atoms");
return Bond.AROMATIC;
}
if (b.getOrder() == null) throw new CDKException("A bond had undefined order, possible query bond?");
IBond.Order order = b.getOrder();
switch (order) {
case SINGLE:
return Bond.SINGLE;
case DOUBLE:
return Bond.DOUBLE;
case TRIPLE:
return Bond.TRIPLE;
case QUADRUPLE:
return Bond.QUADRUPLE;
default:
if (!SmiFlavor.isSet(flavour, SmiFlavor.UseAromaticSymbols) && b.isAromatic())
throw new CDKException("Cannot write Kekulé SMILES output due to aromatic bond with unset bond order - molecule should be Kekulized");
throw new CDKException("Unsupported bond order: " + order);
}
} |
java | @Override
synchronized public Map<String, Serializable> generateDDL() {
Map<String, Serializable> returnMap = new HashMap<String, Serializable>();
WsResource ddlOutputDirectory = locationService.get().resolveResource(OUTPUT_DIR);
if (ddlOutputDirectory.exists() == false) {
ddlOutputDirectory.create();
}
// Try to put the canonical path to the DDL output directory in the results.
// If we can't, then put the symbolic name.
try {
returnMap.put(OUTPUT_DIRECTORY, ddlOutputDirectory.asFile().getCanonicalPath());
} catch (IOException ioe) {
returnMap.put(OUTPUT_DIRECTORY, OUTPUT_DIR);
}
boolean success = true;
int fileCount = 0;
Map<String, DDLGenerationParticipant> participants = new HashMap<String, DDLGenerationParticipant>();
Iterator<ServiceAndServiceReferencePair<DDLGenerationParticipant>> i = generators.getServicesWithReferences();
while (i.hasNext()) {
// We'll request the DDL be written to a file whose name is chosen by the component providing the service.
ServiceAndServiceReferencePair<DDLGenerationParticipant> generatorPair = i.next();
DDLGenerationParticipant generator = generatorPair.getService();
String rawId = generator.getDDLFileName();
// Remove any restricted characters from the file name, and make sure
// that the resulting string is not empty. If it's empty, supply a
// default name.
String id = (rawId != null) ? PathUtils.replaceRestrictedCharactersInFileName(rawId) : null;
if ((id == null) || (id.length() == 0)) {
throw new IllegalArgumentException("Service " + generator.toString() + " DDL file name: " + rawId);
}
participants.put(id, generator);
}
for (Map.Entry<String, DDLGenerationParticipant> entry : participants.entrySet()) {
String id = entry.getKey();
DDLGenerationParticipant participant = entry.getValue();
// The path to the file is in the server's output directory.
WsResource ddlOutputResource = locationService.get().resolveResource(OUTPUT_DIR + id + ".ddl");
if (ddlOutputResource.exists() == false) {
ddlOutputResource.create();
}
// Use the text file output stream factory to create the file so that
// it is readable on distributed and z/OS platforms. Overwrite the
// file if it already exists. We have to specify the encoding explicitly
// on the OutputStreamWriter or Findbugs gets upset. We always specify
// UTF-8 because the output DDL might have UNICODE characters. We use
// a TextFileOutputStreamFactory in an attempt to make the file readable
// on z/OS. The file will be tagged as 'ISO8859-1' on z/OS, allowing at
// least some of the characters to be printable. The z/OS chtag command
// does not appear to honor 'UTF-8' as an encoding, even though iconv
// supports it. The data on the disk will be correct in any case, the
// customer may need to FTP it to a distributed machine, or use iconv,
// to be able to view the data.
try {
TextFileOutputStreamFactory f = TrConfigurator.getFileOutputStreamFactory();
OutputStream os = f.createOutputStream(ddlOutputResource.asFile(), false);
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
participant.generate(bw);
// The JPA code may close the stream for us. Just make sure it's
// closed so that we flush any data out.
bw.close();
fileCount++;
} catch (Throwable t) {
// We'll get an FFDC here... indicate that we had trouble.
success = false;
}
}
returnMap.put(SUCCESS, Boolean.valueOf(success));
returnMap.put(FILE_COUNT, Integer.valueOf(fileCount));
return returnMap;
} |
python | def which(cmd, path="PATH"):
"""Find cmd on PATH."""
if os.path.exists(cmd):
return cmd
if cmd[0] == '/':
return None
for segment in os.getenv(path).split(":"):
program = os.path.normpath(os.path.join(segment, cmd))
if os.path.exists(program):
return program
return None |
python | def assign_funcs(modname, service, module=None, pack=None):
'''
Assign _get_conn and _cache_id functions to the named module.
.. code-block:: python
__utils__['boto.assign_partials'](__name__, 'ec2')
'''
if pack:
global __salt__ # pylint: disable=W0601
__salt__ = pack
mod = sys.modules[modname]
setattr(mod, '_get_conn', get_connection_func(service, module=module))
setattr(mod, '_cache_id', cache_id_func(service))
# TODO: Remove this and import salt.utils.data.exactly_one into boto_* modules instead
# Leaving this way for now so boto modules can be back ported
setattr(mod, '_exactly_one', exactly_one) |
java | public Observable<FirewallRuleInner> getAsync(String resourceGroupName, String accountName, String firewallRuleName) {
return getWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName).map(new Func1<ServiceResponse<FirewallRuleInner>, FirewallRuleInner>() {
@Override
public FirewallRuleInner call(ServiceResponse<FirewallRuleInner> response) {
return response.body();
}
});
} |
java | public static Instant ofEpochSecond(long epochSecond, long nanoAdjustment) {
long secs = Math.addExact(epochSecond, Math.floorDiv(nanoAdjustment, NANOS_PER_SECOND));
int nos = (int)Math.floorMod(nanoAdjustment, NANOS_PER_SECOND);
return create(secs, nos);
} |
python | def getInstance(cls, *args):
'''
Returns a singleton instance of the class
'''
if not cls.__singleton:
cls.__singleton = Heroku(*args)
return cls.__singleton |
python | def _write(self, s):
"""Write a string out to the SSL socket fully."""
try:
write = self.sock.write
except AttributeError:
# Works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:] |
python | def append(self, key, value=None, dir=False, ttl=None, timeout=None):
"""Creates a new automatically increasing key in the given directory
key.
"""
return self.adapter.append(key, value, dir=dir, ttl=ttl,
timeout=timeout) |
java | public static MutableLongTuple of(long x, long y, long z, long w)
{
return new DefaultLongTuple(new long[]{ x, y, z, w });
} |
python | def count(index,h):
'''
Gives count of the documents stored in Elasticsearch. If index option is
provided, it will provide document count of that index.
'''
try:
response = base.es.cat.count(index,h=h)
table = base.draw_table(response)
except Exception as e:
click.echo(e)
else:
click.echo(table) |
python | def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R |
python | def default_listener(col_attr, default):
"""Establish a default-setting listener."""
@event.listens_for(col_attr, "init_scalar", retval=True, propagate=True)
def init_scalar(target, value, dict_):
if default.is_callable:
# the callable of ColumnDefault always accepts a context argument
value = default.arg(None)
elif default.is_scalar:
value = default.arg
else:
raise NotImplementedError(
"Can't invoke pre-default for a SQL-level column default")
dict_[col_attr.key] = value
return value |
python | async def get_data(self, resource):
"""Get detail for a resource from the data endpoint."""
url = '{}{}'.format(
self.base_url, self.endpoint.format(resource=resource))
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.info(
"Response from Netdata: %s", response.status)
data = await response.json()
_LOGGER.debug(data)
self.values = {k: v for k, v in zip(
data['labels'], data['data'][0])}
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError() |
python | def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [
(key, value) for key, value in headers if not is_hop_by_hop_header(key)
] |
python | def solidityKeccak(cls, abi_types, values):
"""
Executes keccak256 exactly as Solidity does.
Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
and list of corresponding values -- `[20, [-1, 5, 0], True]`
"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
if isinstance(cls, type):
w3 = None
else:
w3 = cls
normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)
hex_string = add_0x_prefix(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
))
return cls.keccak(hexstr=hex_string) |
python | def prepare_sparse_params(self, param_rowids):
'''Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays
'''
if not self._kvstore:
return
assert(isinstance(param_rowids, dict))
for param_name, rowids in param_rowids.items():
if isinstance(rowids, (tuple, list)):
rowids_1d = []
for r in rowids:
rowids_1d.append(r.reshape((-1,)).astype(np.int64))
rowid = mx.nd.concat(*rowids_1d, dim=0)
else:
rowid = rowids
param_idx = self._exec_group.param_names.index(param_name)
param_val = self._exec_group.param_arrays[param_idx]
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=rowid,
priority=-param_idx) |
python | def expand_target_selector(target_selector: str, conf: Config):
"""Return a normalized target name (where `**:*` is the normalized form of
itself).
Target specifier can be:
- `**:*` - means to recursively build all targets under current
working dir.
- relative path from current working directory to another directory -
means to build all targets defined in that build module.
- a name of a target - means to build this named target in the build module
in the current working directory.
- a named target in another build module, with the build module given as a
relative path from the current working directory (e.g. `../foo:bar`) -
means to build the specified named target in the specified build
module.
- in cases where a relative path can be specified, it should be given using
standard POSIX relative path construction.
"""
if target_selector == '**:*':
return target_selector
if ':' not in target_selector:
target_selector += ':*'
build_module, target_name = split(target_selector)
build_module = normpath(join(conf.get_rel_work_dir(), build_module))
return '{}:{}'.format(PurePath(build_module).as_posix().strip('.'),
validate_name(target_name)) |
java | @Override
public DescribeBuildResult describeBuild(DescribeBuildRequest request) {
request = beforeClientExecution(request);
return executeDescribeBuild(request);
} |
python | def close(self):
"""Close the pooled shared connection."""
# Instead of actually closing the connection,
# unshare it and/or return it to the pool.
if self._con:
self._pool.unshare(self._shared_con)
self._shared_con = self._con = None |
python | def enriched(self, thresh=0.05, idx=True):
"""
Enriched features.
{threshdoc}
"""
return self.upregulated(thresh=thresh, idx=idx) |
python | def _find_file(self, load):
'''
Convenience function for calls made using the RemoteClient
'''
path = load.get('path')
if not path:
return {'path': '',
'rel': ''}
tgt_env = load.get('saltenv', 'base')
return self.find_file(path, tgt_env) |
python | def percent_bandwidth(data, period, std=2.0):
"""
Percent Bandwidth.
Formula:
%_bw = data() - l_bb() / bb_range()
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
percent_bandwidth = ((np.array(data) -
lower_bollinger_band(data, period, std)) /
bb_range(data, period, std)
)
return percent_bandwidth |
java | public com.google.protobuf.ByteString
getPivotNameBytes() {
java.lang.Object ref = pivotName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
pivotName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
} |
java | @Override
public void process(@NotNull final Bytes in, @NotNull final Bytes out, final N nc) {
if (!handshakeComplete) {
try {
doHandshake(nc);
} catch (Throwable t) {
LOGGER.error("Failed to complete SSL handshake at " + Instant.now(), t);
throw new IllegalStateException("Unable to perform handshake", t);
}
handshakeComplete = true;
}
bufferHandler.set(delegate, in, out, nc);
stateMachine.action();
} |
python | def set_output_volume(volume):
'''
Set the volume of sound.
volume
The level of volume. Can range from 0 to 100.
CLI Example:
.. code-block:: bash
salt '*' desktop.set_output_volume <volume>
'''
cmd = 'osascript -e "set volume output volume {0}"'.format(volume)
call = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
python_shell=False
)
_check_cmd(call)
return get_output_volume() |
java | @Unstable
public long indexOf(Block child)
{
CounterBlockMatcher counter = new CounterBlockMatcher(child);
Block found = getFirstBlock(counter, Axes.ANCESTOR_OR_SELF);
return found != null ? counter.getCount() : -1;
} |
java | public synchronized boolean containsId(int requestId)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "containsId", ""+requestId);
testReqIdTableEntry.requestId = requestId;
boolean result = table.containsKey(testReqIdTableEntry);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "containsId", ""+result);
return result;
} |
java | public void parse() {
if (!lexerEngine.skipIfEqual(DefaultKeyword.FOR)) {
return;
}
lexerEngine.skipIfEqual(DefaultKeyword.UPDATE, PostgreSQLKeyword.SHARE);
lexerEngine.unsupportedIfEqual(DefaultKeyword.OF);
lexerEngine.skipIfEqual(PostgreSQLKeyword.NOWAIT);
} |
python | def build_dump_order(orm_class, orm_classes):
"""pass in an array, when you encounter a ref, call this method again with the array
when something has no more refs, then it gets appended to the array and returns, each
time something gets through the list they are added, but before they are added to the
list it is checked to see if it is already in the listt"""
if orm_class in orm_classes: return
for field_name, field_val in orm_class.schema.fields.items():
if field_val.is_ref():
build_dump_order(field_val.schema.orm_class, orm_classes)
if orm_class not in orm_classes:
orm_classes.append(orm_class) |
java | public ServiceFuture<RunCommandDocumentInner> getAsync(String location, String commandId, final ServiceCallback<RunCommandDocumentInner> serviceCallback) {
return ServiceFuture.fromResponse(getWithServiceResponseAsync(location, commandId), serviceCallback);
} |
java | public BitcoinAuxPOWBranch parseAuxPOWBranch(ByteBuffer rawByteBuffer) {
byte[] noOfLinksVarInt=BitcoinUtil.convertVarIntByteBufferToByteArray(rawByteBuffer);
long currentNoOfLinks=BitcoinUtil.getVarInt(noOfLinksVarInt);
ArrayList<byte[]> links = new ArrayList((int)currentNoOfLinks);
for (int i=0;i<currentNoOfLinks;i++) {
byte[] currentLink = new byte[32];
rawByteBuffer.get(currentLink,0,32);
links.add(currentLink);
}
byte[] branchSideBitmask=new byte[4];
rawByteBuffer.get(branchSideBitmask,0,4);
return new BitcoinAuxPOWBranch(noOfLinksVarInt, links, branchSideBitmask);
} |
python | def parse_first_row(row, url_instance):
"""
Static method that parses a given table row element by executing `Parser.FIRST_ROW_XPATH` and scrapping torrent's
id, title, tracked by status, category url and torrent url. Used specifically with a torrent's first table row.
:param lxml.HtmlElement row: row to parse
:param urls.Url url_instance: Url used to combine base url's with scrapped links from tr
:return: scrapped id, title, tracked by status, category url and torrent url
:rtype: list
"""
tags = row.xpath(Parser.FIRST_ROW_XPATH)
category_url = url_instance.combine(tags[0].get('href'))
title = unicode(tags[1].text)
# work with the incomplete URL to get str_id
torrent_url = tags[1].get('href')
str_id = torrent_url.split('details/')[1]
str_id = str_id[:-1] if str_id.endswith('/') else str_id
# complete the torrent URL with BASE_URL
torrent_url = url_instance.combine(torrent_url)
# means that torrent has external property
if len(tags) == 3:
# monkey patch the missing external query param
category_url += '&external=1'
tracked_by = '(external)'
else:
tracked_by = 'Demonoid'
return [str_id, title, tracked_by, category_url, torrent_url] |
java | public Result addToSession(String key, String value) {
current(true).session().put(key, value);
return this;
} |
java | static private int readBytes(int c[], int len, InputStream is)
throws IOException {
byte buf[] = new byte[len];
if (is.read(buf, 0, len) < len) {
return -1;
}
// fill the passed in int array
for (int i = 0; i < len; i++) {
c[i] = buf[i] & 0xff;
}
return 0;
} |
python | def get(self, path, content=True, type=None, format=None, load_alternative_format=True):
""" Takes a path for an entity and returns its model"""
path = path.strip('/')
ext = os.path.splitext(path)[1]
# Not a notebook?
if not self.exists(path) or (type != 'notebook' if type else ext not in self.all_nb_extensions()):
return super(TextFileContentsManager, self).get(path, content, type, format)
fmt = preferred_format(ext, self.preferred_jupytext_formats_read)
if ext == '.ipynb':
model = self._notebook_model(path, content=content)
else:
self.set_default_format_options(fmt, read=True)
with mock.patch('nbformat.reads', _jupytext_reads(fmt)):
model = self._notebook_model(path, content=content)
if not load_alternative_format:
return model
if not content:
# Modification time of a paired notebook, in this context - Jupyter is checking timestamp
# before saving - is the most recent among all representations #118
if path not in self.paired_notebooks:
return model
fmt, formats = self.paired_notebooks.get(path)
for alt_path, _ in paired_paths(path, fmt, formats):
if alt_path != path and self.exists(alt_path):
alt_model = self._notebook_model(alt_path, content=False)
if alt_model['last_modified'] > model['last_modified']:
model['last_modified'] = alt_model['last_modified']
return model
# We will now read a second file if this is a paired notebooks.
nbk = model['content']
jupytext_formats = nbk.metadata.get('jupytext', {}).get('formats') or self.default_formats(path)
jupytext_formats = long_form_multiple_formats(jupytext_formats)
# Compute paired notebooks from formats
alt_paths = [(path, fmt)]
if jupytext_formats:
try:
_, fmt = find_base_path_and_format(path, jupytext_formats)
alt_paths = paired_paths(path, fmt, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
except InconsistentPath as err:
self.log.info("Unable to read paired notebook: %s", str(err))
else:
if path in self.paired_notebooks:
fmt, formats = self.paired_notebooks.get(path)
alt_paths = paired_paths(path, fmt, formats)
if len(alt_paths) > 1 and ext == '.ipynb':
# Apply default options (like saving and reloading would do)
jupytext_metadata = model['content']['metadata'].get('jupytext', {})
self.set_default_format_options(jupytext_metadata, read=True)
if jupytext_metadata:
model['content']['metadata']['jupytext'] = jupytext_metadata
org_model = model
fmt_inputs = fmt
path_inputs = path_outputs = path
model_outputs = None
# Source format is first non ipynb format found on disk
if path.endswith('.ipynb'):
for alt_path, alt_fmt in alt_paths:
if not alt_path.endswith('.ipynb') and self.exists(alt_path):
self.log.info(u'Reading SOURCE from {}'.format(alt_path))
path_inputs = alt_path
fmt_inputs = alt_fmt
model_outputs = model
model = self.get(alt_path, content=content, type=type, format=format,
load_alternative_format=False)
break
# Outputs taken from ipynb if in group, if file exists
else:
for alt_path, _ in alt_paths:
if alt_path.endswith('.ipynb') and self.exists(alt_path):
self.log.info(u'Reading OUTPUTS from {}'.format(alt_path))
path_outputs = alt_path
model_outputs = self.get(alt_path, content=content, type=type, format=format,
load_alternative_format=False)
break
try:
check_file_version(model['content'], path_inputs, path_outputs)
except Exception as err:
raise HTTPError(400, str(err))
# Before we combine the two files, we make sure we're not overwriting ipynb cells
# with an outdated text file
try:
if model_outputs and model_outputs['last_modified'] > model['last_modified'] + \
timedelta(seconds=self.outdated_text_notebook_margin):
raise HTTPError(
400,
'''{out} (last modified {out_last})
seems more recent than {src} (last modified {src_last})
Please either:
- open {src} in a text editor, make sure it is up to date, and save it,
- or delete {src} if not up to date,
- or increase check margin by adding, say,
c.ContentsManager.outdated_text_notebook_margin = 5 # in seconds # or float("inf")
to your .jupyter/jupyter_notebook_config.py file
'''.format(src=path_inputs, src_last=model['last_modified'],
out=path_outputs, out_last=model_outputs['last_modified']))
except OverflowError:
pass
if model_outputs:
combine_inputs_with_outputs(model['content'], model_outputs['content'], fmt_inputs)
elif not path.endswith('.ipynb'):
nbk = model['content']
language = nbk.metadata.get('jupytext', {}).get('main_language', 'python')
if 'kernelspec' not in nbk.metadata and language != 'python':
kernelspec = kernelspec_from_language(language)
if kernelspec:
nbk.metadata['kernelspec'] = kernelspec
# Trust code cells when they have no output
for cell in model['content'].cells:
if cell.cell_type == 'code' and not cell.outputs and cell.metadata.get('trusted') is False:
cell.metadata['trusted'] = True
# Path and name of the notebook is the one of the original path
model['path'] = org_model['path']
model['name'] = org_model['name']
return model |
python | def change_task_size(self, size):
"""Blocking request to change number of running tasks"""
self._pause.value = True
self.log.debug("About to change task size to {0}".format(size))
try:
size = int(size)
except ValueError:
self.log.error("Cannot change task size, non integer size provided")
return False
if size < 0:
self.log.error("Cannot change task size, less than 0 size provided")
return False
self.max_tasks = size
if size < self.max_tasks:
diff = self.max_tasks - size
self.log.debug("Reducing size offset by {0}".format(diff))
while True:
self._update_tasks()
if len(self.free_tasks) >= diff:
for i in range(diff):
task_id = self.free_tasks.pop(0)
del self.current_tasks[task_id]
break
time.sleep(0.5)
if not size:
self._reset_and_pause()
return True
elif size > self.max_tasks:
diff = size - self.max_tasks
for i in range(diff):
task_id = str(uuid.uuid4())
self.current_tasks[task_id] = {}
self.free_tasks.append(task_id)
self._pause.value = False
self.log.debug("Task size changed to {0}".format(size))
return True |
java | public static double[] rowMeans (RealMatrix matrix) {
// Get the col sums:
double[] retval = EMatrixUtils.rowSums(matrix);
// Iterate over return value and divide by the length:
for (int i = 0; i < retval.length; i++) {
retval[i] = retval[i] / matrix.getColumnDimension();
}
// Done, return row means:
return retval;
} |
java | @Nullable
public static <P extends Presenter> ReflectionPresenterFactory<P> fromViewClass(Class<?> viewClass) {
RequiresPresenter annotation = viewClass.getAnnotation(RequiresPresenter.class);
//noinspection unchecked
Class<P> presenterClass = annotation == null ? null : (Class<P>)annotation.value();
return presenterClass == null ? null : new ReflectionPresenterFactory<>(presenterClass);
} |
java | public AggregationBuilder buildAggregation(KunderaQuery query, EntityMetadata entityMetadata, QueryBuilder filter)
{
SelectStatement selectStatement = query.getSelectStatement();
// To apply filter for where clause
AggregationBuilder aggregationBuilder = buildWhereAggregations(entityMetadata, filter);
if (KunderaQueryUtils.hasGroupBy(query.getJpqlExpression()))
{
TermsBuilder termsBuilder = processGroupByClause(selectStatement.getGroupByClause(), entityMetadata, query);
aggregationBuilder.subAggregation(termsBuilder);
}
else
{
if (KunderaQueryUtils.hasHaving(query.getJpqlExpression()))
{
logger.error("Identified having clause without group by, Throwing not supported operation Exception");
throw new UnsupportedOperationException(
"Currently, Having clause without group by caluse is not supported.");
}
else
{
aggregationBuilder = (selectStatement != null) ? query.isAggregated() ? buildSelectAggregations(
aggregationBuilder, selectStatement, entityMetadata) : null : null;
}
}
return aggregationBuilder;
} |
java | public void updateObjectMemberFieldAccesses(String className, FieldEntry field) throws AparapiException {
final String accessedFieldName = field.getNameAndTypeEntry().getNameUTF8Entry().getUTF8();
// Quickly bail if it is a ref
if (field.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8().startsWith("L")
|| field.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8().startsWith("[L")) {
throw new ClassParseException(ClassParseException.TYPE.OBJECTARRAYFIELDREFERENCE);
}
if (logger.isLoggable(Level.FINEST)) {
logger.finest("Updating access: " + className + " field:" + accessedFieldName);
}
final ClassModel memberClassModel = getOrUpdateAllClassAccesses(className);
final Class<?> memberClass = memberClassModel.getClassWeAreModelling();
ClassModel superCandidate = null;
// We may add this field if no superclass match
boolean add = true;
// No exact match, look for a superclass
for (final ClassModel c : allFieldsClasses.values()) {
if (logger.isLoggable(Level.FINEST)) {
logger.finest(" super: " + c.getClassWeAreModelling().getName() + " for " + className);
}
if (c.isSuperClass(memberClass)) {
if (logger.isLoggable(Level.FINE)) {
logger.fine("selected super: " + c.getClassWeAreModelling().getName() + " for " + className);
}
superCandidate = c;
break;
}
if (logger.isLoggable(Level.FINEST)) {
logger.finest(" no super match for " + memberClass.getName());
}
}
// Look at super's fields for a match
if (superCandidate != null) {
final ArrayList<FieldEntry> structMemberSet = superCandidate.getStructMembers();
for (final FieldEntry f : structMemberSet) {
if (f.getNameAndTypeEntry().getNameUTF8Entry().getUTF8().equals(accessedFieldName)
&& f.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8()
.equals(field.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8())) {
if (logger.isLoggable(Level.FINE)) {
logger.fine("Found match: " + accessedFieldName + " class: " + field.getClassEntry().getNameUTF8Entry().getUTF8()
+ " to class: " + f.getClassEntry().getNameUTF8Entry().getUTF8());
}
if (!f.getClassEntry().getNameUTF8Entry().getUTF8().equals(field.getClassEntry().getNameUTF8Entry().getUTF8())) {
// Look up in class hierarchy to ensure it is the same field
final Field superField = getFieldFromClassHierarchy(superCandidate.getClassWeAreModelling(), f
.getNameAndTypeEntry().getNameUTF8Entry().getUTF8());
final Field classField = getFieldFromClassHierarchy(memberClass, f.getNameAndTypeEntry().getNameUTF8Entry()
.getUTF8());
if (!superField.equals(classField)) {
throw new ClassParseException(ClassParseException.TYPE.OVERRIDENFIELD);
}
}
add = false;
break;
}
}
}
// There was no matching field in the supers, add it to the memberClassModel
// if not already there
if (add) {
boolean found = false;
final ArrayList<FieldEntry> structMemberSet = memberClassModel.getStructMembers();
for (final FieldEntry f : structMemberSet) {
if (f.getNameAndTypeEntry().getNameUTF8Entry().getUTF8().equals(accessedFieldName)
&& f.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8()
.equals(field.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8())) {
found = true;
}
}
if (!found) {
structMemberSet.add(field);
if (logger.isLoggable(Level.FINE)) {
logger.fine("Adding assigned field " + field.getNameAndTypeEntry().getNameUTF8Entry().getUTF8() + " type: "
+ field.getNameAndTypeEntry().getDescriptorUTF8Entry().getUTF8() + " to "
+ memberClassModel.getClassWeAreModelling().getName());
}
}
}
} |
python | def _to_array(value):
"""When `value` is a plain Python sequence, return it as a NumPy array."""
if hasattr(value, 'shape'):
return value
elif hasattr(value, '__len__'):
return array(value)
else:
return float_(value) |
python | def zval_dict_from_potcar(potcar):
"""
Creates zval_dictionary for calculating the ionic polarization from
Potcar object
potcar: Potcar object
"""
zval_dict = {}
for p in potcar:
zval_dict.update({p.element: p.ZVAL})
return zval_dict |
java | public void addListener(Listener listener, long listenerCheckMillis) {
queue.add(listener);
long newFrequency = Math.min(MINIMUM_CHECK_DELAY_MILLIS, listenerCheckMillis);
//first listener
if (currentScheduledFrequency.get() == -1) {
if (currentScheduledFrequency.compareAndSet(-1, newFrequency)) {
fixedSizedScheduler.schedule(checker, listenerCheckMillis, TimeUnit.MILLISECONDS);
}
} else {
long frequency = currentScheduledFrequency.get();
if (frequency > newFrequency) {
currentScheduledFrequency.compareAndSet(frequency, newFrequency);
}
}
} |
python | def fit(self, X):
""" Apply KMeans Clustering
X: dataset with feature vectors
"""
self.centers_, self.labels_, self.sse_arr_, self.n_iter_ = \
_kmeans(X, self.n_clusters, self.max_iter, self.n_trials, self.tol) |
java | public ServiceFuture<Void> beginFailoverPriorityChangeAsync(String resourceGroupName, String accountName, List<FailoverPolicy> failoverPolicies, final ServiceCallback<Void> serviceCallback) {
return ServiceFuture.fromResponse(beginFailoverPriorityChangeWithServiceResponseAsync(resourceGroupName, accountName, failoverPolicies), serviceCallback);
} |
java | protected String getElapsedTime(boolean factor, long... testMilliseconds) {
long elapsedTime;
String info_days = BootstrapConstants.messages.getString("info.days");
String info_hours = BootstrapConstants.messages.getString("info.hours");
String info_minutes = BootstrapConstants.messages.getString("info.minutes");
String info_seconds = BootstrapConstants.messages.getString("info.seconds");
if (testMilliseconds.length == 0) {
// Grab elapsed time in millis
elapsedTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
} else {
// This mean this function is used in a test case where the value of elapsed time is
// being assigned by the testing class
elapsedTime = testMilliseconds[0];
}
StringBuilder timeString = new StringBuilder(30);
if (elapsedTime <= 0) {
long scndsN = 0L;
timeString.append(MessageFormat.format(info_seconds, scndsN));
return timeString.toString();
}
final double secMillis = 1000.0;
final long minuteMillis = 60 * (long) secMillis;
final long hourMillis = 60 * minuteMillis;
final long dayMillis = 24 * hourMillis;
long mod = elapsedTime;
if (factor) {
// Issue 01: Hard coding the "," or "." result in bad looking translated messages
// Example in pl language: CWWKE0036I: Serwer defaultServer zosta? zatrzymany po 4 dn., 5 godz., 50 min., 34,231 sek..
// we see 2 issues: 1- Some languages might not recognize the ","
// 2- When having to write an abbreviation at the end of the statement you will end up with 2 dots
// Issue 02: Introducing the unit Milliseconds is a suggested solution to get more accurate numbers. Currently we are
// losing some accuracy when we convert between 'Long' & 'Double'.
long days = mod / dayMillis;
mod = mod % dayMillis;
if (days > 0)
timeString.append(MessageFormat.format(info_days, days)).append(", ");
long hours = mod / hourMillis;
mod = mod % hourMillis;
if (hours > 0)
timeString.append(MessageFormat.format(info_hours, hours)).append(", ");
long minutes = mod / minuteMillis;
mod = mod % minuteMillis;
if (minutes > 0)
timeString.append(MessageFormat.format(info_minutes, minutes)).append(", ");
double seconds = mod / secMillis;
mod = mod % (long) secMillis;
if (mod == 0)
timeString.append(MessageFormat.format(info_seconds, String.format("%,.0f", seconds)));
else if (Long.toString(mod).endsWith("00"))
timeString.append(MessageFormat.format(info_seconds, String.format("%,.1f", seconds)));
else if (Long.toString(mod).endsWith("0"))
timeString.append(MessageFormat.format(info_seconds, String.format("%,.2f", seconds)));
else
timeString.append(MessageFormat.format(info_seconds, String.format("%,.3f", seconds)));
return timeString.toString();
} else {
double seconds = elapsedTime / secMillis;
mod = elapsedTime % (long) secMillis;
// mod is not correct for really large numbers so just drop the fraction
if (seconds >= 0xFFFFFFFFL)
timeString.append(MessageFormat.format(info_seconds, String.format("%,.0f", seconds)));
else if (mod == 0)
timeString.append(MessageFormat.format(info_seconds, String.format("%,.0f", seconds)));
else if (Long.toString(mod).endsWith("00"))
timeString.append(MessageFormat.format(info_seconds, String.format("%,.1f", seconds)));
else if (Long.toString(mod).endsWith("0"))
timeString.append(MessageFormat.format(info_seconds, String.format("%,.2f", seconds)));
else
timeString.append(MessageFormat.format(info_seconds, String.format("%,.3f", seconds)));
return timeString.toString();
}
} |
python | def addFixedEffect(self,F=None,A=None, REML=True, index=None):
"""
set sample and trait designs
F: NxK sample design
A: LxP sample design
REML: REML for this term?
index: index of which fixed effect to replace. If None, just append.
"""
if F is None: F = np.ones((self.N,1))
if A is None:
A = np.eye(self.P)
A_identity = True
elif (A.shape == (self.P,self.P)) & (A==np.eye(self.P)).all():
A_identity = True
else:
A_identity = False
assert F.shape[0]==self.N, "F dimension mismatch"
assert A.shape[1]==self.P, "A dimension mismatch"
if index is None or index==self.n_terms:
self.F.append(F)
self.A.append(A)
self.A_identity.append(A_identity)
self.REML_term.append(REML)
# build B matrix and indicator
self.B.append(np.zeros((F.shape[1],A.shape[0])))
self._n_terms+=1
self._update_indicator(F.shape[1],A.shape[0])
elif index >self.n_terms:
raise Exception("index exceeds max index of terms")
else:
self._n_fixed_effs-=self.F[index].shape[1]*self.A[index].shape[0]
if self.REML_term[index]:
self._n_fixed_effs_REML-=self.F[index].shape[1]*self.A[index].shape[0]
self.F[index] = F
self.A[index] = A
self.A_identity[index] = A_identity
self.REML_term[index]=REML
self.B[index] = np.zeros((F.shape[1],A.shape[0]))
self._rebuild_indicator()
self._n_fixed_effs+=F.shape[1]*A.shape[0]
if REML:
self._n_fixed_effs_REML+=F.shape[1]*A.shape[0]
self.clear_cache('Fstar','Astar','Xstar','Xhat',
'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ') |
python | def get_metric_data_points(self, metric, start, end, points=None,
resolution=None, stats=None):
"""
Returns the data points for a given metric for the given period. The
'start' and 'end' times must be specified; they can be be either Python
date/datetime values, or a Unix timestamp.
The 'points' parameter represents the number of points to return. The
'resolution' parameter represents the granularity of the data. You must
specify either 'points' or 'resolution'. The allowed values for
resolution are:
FULL
MIN5
MIN20
MIN60
MIN240
MIN1440
Finally, the 'stats' parameter specifies the stats you want returned.
By default only the 'average' is returned. You omit this parameter,
pass in a single value, or pass in a list of values. The allowed values
are:
average
variance
min
max
"""
allowed_resolutions = ("FULL", "MIN5", "MIN20", "MIN60", "MIN240",
"MIN1440")
if not (points or resolution):
raise exc.MissingMonitoringCheckGranularity("You must specify "
"either the 'points' or 'resolution' parameter when "
"fetching metrics.")
if resolution:
if resolution.upper() not in allowed_resolutions:
raise exc.InvalidMonitoringMetricsResolution("The specified "
"resolution '%s' is not valid. The valid values are: "
"%s." % (resolution, str(allowed_resolutions)))
start_tm = utils.to_timestamp(start)
end_tm = utils.to_timestamp(end)
# NOTE: For some odd reason, the timestamps required for this must be
# in milliseconds, instead of the UNIX standard for timestamps, which
# is in seconds. So the values here are multiplied by 1000 to make it
# work. If the API is ever corrected, the next two lines should be
# removed. GitHub #176.
start_tm *= 1000
end_tm *= 1000
qparms = []
# Timestamps with fractional seconds currently cause a 408 (timeout)
qparms.append("from=%s" % int(start_tm))
qparms.append("to=%s" % int(end_tm))
if points:
qparms.append("points=%s" % points)
if resolution:
qparms.append("resolution=%s" % resolution.upper())
if stats:
stats = utils.coerce_to_list(stats)
for stat in stats:
qparms.append("select=%s" % stat)
qparm = "&".join(qparms)
uri = "/%s/%s/plot?%s" % (self.uri_base, metric, qparm)
try:
resp, resp_body = self.api.method_get(uri)
except exc.BadRequest as e:
msg = e.message
dtls = e.details
if msg.startswith("Validation error"):
raise exc.InvalidMonitoringMetricsRequest("Your request was "
"invalid: '%s'" % dtls)
else:
raise
return resp_body["values"] |
java | public void setTile(int x, int y, String tile) {
if ((x > getArrayWidth()) || (y > getArrayHeight()) || (x < 0) || (y < 0)) {
throw new IllegalArgumentException();
}
image[x][y] = tile;
} |
java | public T Else(def<T> func) {
if (procceed)
if (initVal != null && !initVal.equals(false)) {
return body.apply(initVal);
} else {
return func.apply();
}
else
return this.val;
} |
java | void encode(
DEROutputStream out)
throws IOException
{
if (out instanceof ASN1OutputStream || out instanceof BEROutputStream)
{
out.write(SEQUENCE | CONSTRUCTED);
out.write(0x80);
Enumeration e = getObjects();
while (e.hasMoreElements())
{
out.writeObject(e.nextElement());
}
out.write(0x00);
out.write(0x00);
}
else
{
super.encode(out);
}
} |
java | public void updateExternalSREButtonLabels() {
if (this.enableSystemWideSelector) {
final ISREInstall wideSystemSRE = SARLRuntime.getDefaultSREInstall();
final String wideSystemSRELabel;
if (wideSystemSRE == null) {
wideSystemSRELabel = Messages.SREConfigurationBlock_0;
} else {
wideSystemSRELabel = wideSystemSRE.getName();
}
this.systemSREButton.setText(MessageFormat.format(
Messages.SREConfigurationBlock_1, wideSystemSRELabel));
}
if (!this.projectProviderFactories.isEmpty()) {
final ISREInstall projectSRE = retreiveProjectSRE();
final String projectSRELabel;
if (projectSRE == null) {
projectSRELabel = Messages.SREConfigurationBlock_0;
} else {
projectSRELabel = projectSRE.getName();
}
this.projectSREButton.setText(MessageFormat.format(
Messages.SREConfigurationBlock_3, projectSRELabel));
}
} |
java | private static List<ActionRef> getRefs(Xml node)
{
final Collection<Xml> children = node.getChildren(NODE_ACTION);
final List<ActionRef> actions = new ArrayList<>(children.size());
for (final Xml action : children)
{
final String path = action.readString(ATT_PATH);
final boolean cancel = action.readBoolean(false, ATT_CANCEL);
actions.add(new ActionRef(path, cancel, getRefs(action)));
}
return actions;
} |
java | public UriMappingResolver addResources(String... resources) {
if (resources.length % 2 == 1) {
throw new IllegalArgumentException
("Expected even number of arguments");
}
for (int i = 0; i < resources.length; i += 2) {
addSchema(resources[i], resources[i+1]);
}
return this;
} |
java | JCExpression annotationValue() {
int pos;
switch (token.kind) {
case MONKEYS_AT:
pos = token.pos;
nextToken();
return annotation(pos, Tag.ANNOTATION);
case LBRACE:
pos = token.pos;
accept(LBRACE);
ListBuffer<JCExpression> buf = new ListBuffer<>();
if (token.kind == COMMA) {
nextToken();
} else if (token.kind != RBRACE) {
buf.append(annotationValue());
while (token.kind == COMMA) {
nextToken();
if (token.kind == RBRACE) break;
buf.append(annotationValue());
}
}
accept(RBRACE);
return toP(F.at(pos).NewArray(null, List.nil(), buf.toList()));
default:
mode = EXPR;
return term1();
}
} |
java | public T removeFirst() {
if ( this.firstNode == null ) {
return null;
}
final T node = this.firstNode;
this.firstNode = node.getNext();
node.setNext( null );
if ( this.firstNode != null ) {
this.firstNode.setPrevious( null );
} else {
this.lastNode = null;
}
this.size--;
return node;
} |
python | def get_filename(self):
"""
Return ``self.filename`` if set otherwise return the template basename with a ``.pdf`` extension.
:rtype: str
"""
if self.filename is None:
name = splitext(basename(self.template_name))[0]
return '{}.pdf'.format(name)
return self.filename |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.