language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @XmlElementDecl(namespace = "http://docs.oasis-open.org/ns/cmis/messaging/200908/", name = "extension", scope = ApplyACL.class)
public JAXBElement<CmisExtensionType> createApplyACLExtension(
CmisExtensionType value) {
return new JAXBElement<CmisExtensionType>(
_GetPropertiesExtension_QNAME, CmisExtensionType.class,
ApplyACL.class, value);
} |
java | public static Object stringToValue(String string) {
if (string.equals("")) {
return string;
}
if (string.equalsIgnoreCase("true")) {
return Boolean.TRUE;
}
if (string.equalsIgnoreCase("false")) {
return Boolean.FALSE;
}
if (string.equalsIgnoreCase("null")) {
return JSONObject.NULL;
}
/*
* If it might be a number, try converting it. If a number cannot be
* produced, then the value will just be a string.
*/
char initial = string.charAt(0);
if ((initial >= '0' && initial <= '9') || initial == '-') {
try {
if (string.indexOf('.') > -1 || string.indexOf('e') > -1
|| string.indexOf('E') > -1
|| "-0".equals(string)) {
Double d = Double.valueOf(string);
if (!d.isInfinite() && !d.isNaN()) {
return d;
}
} else {
Long myLong = new Long(string);
if (string.equals(myLong.toString())) {
if (myLong.longValue() == myLong.intValue()) {
return Integer.valueOf(myLong.intValue());
}
return myLong;
}
}
} catch (Exception ignore) {}
}
return string;
} |
python | def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) |
java | public SICoreConnection getConnection() throws SISessionDroppedException,
SIConnectionDroppedException, SISessionUnavailableException,
SIConnectionUnavailableException {
final String methodName = "getConnection";
if (TRACE.isEntryEnabled()) {
SibTr.entry(this, TRACE, methodName);
}
checkValid();
// Delegate in order to get the correct exception behaviour...
_delegateSession.getConnection();
// ...then just return the parent connection
if (TRACE.isEntryEnabled()) {
SibTr.exit(this, TRACE, methodName, _parentConnection);
}
return _parentConnection;
} |
java | public void calculateGisModelDwgPolylines() {
for( int i = 0; i < dwgObjects.size(); i++ ) {
DwgObject pol = (DwgObject) dwgObjects.get(i);
if (pol instanceof DwgPolyline2D) {
int flags = ((DwgPolyline2D) pol).getFlags();
int firstHandle = ((DwgPolyline2D) pol).getFirstVertexHandle();
int lastHandle = ((DwgPolyline2D) pol).getLastVertexHandle();
Vector pts = new Vector();
Vector bulges = new Vector();
double[] pt = new double[3];
for( int j = 0; j < dwgObjects.size(); j++ ) {
DwgObject firstVertex = (DwgObject) dwgObjects.get(j);
if (firstVertex instanceof DwgVertex2D) {
int vertexHandle = firstVertex.getHandle();
if (vertexHandle == firstHandle) {
int k = 0;
while( true ) {
DwgObject vertex = (DwgObject) dwgObjects.get(j + k);
int vHandle = vertex.getHandle();
if (vertex instanceof DwgVertex2D) {
pt = ((DwgVertex2D) vertex).getPoint();
pts.add(new Point2D.Double(pt[0], pt[1]));
double bulge = ((DwgVertex2D) vertex).getBulge();
bulges.add(new Double(bulge));
k++;
if (vHandle == lastHandle && vertex instanceof DwgVertex2D) {
break;
}
} else if (vertex instanceof DwgSeqend) {
break;
}
}
}
}
}
if (pts.size() > 0) {
Point2D[] newPts = new Point2D[pts.size()];
if ((flags & 0x1) == 0x1) {
newPts = new Point2D[pts.size() + 1];
for( int j = 0; j < pts.size(); j++ ) {
newPts[j] = (Point2D) pts.get(j);
}
newPts[pts.size()] = (Point2D) pts.get(0);
bulges.add(new Double(0));
} else {
for( int j = 0; j < pts.size(); j++ ) {
newPts[j] = (Point2D) pts.get(j);
}
}
double[] bs = new double[bulges.size()];
for( int j = 0; j < bulges.size(); j++ ) {
bs[j] = ((Double) bulges.get(j)).doubleValue();
}
((DwgPolyline2D) pol).setBulges(bs);
Point2D[] points = GisModelCurveCalculator.calculateGisModelBulge(newPts, bs);
((DwgPolyline2D) pol).setPts(points);
} else {
// System.out.println("Encontrada polil�nea sin puntos ...");
// TODO: No se debe mandar nunca una polil�nea sin puntos, si esto
// ocurre es porque existe un error que hay que corregir ...
}
} else if (pol instanceof DwgPolyline3D) {
int closedFlags = ((DwgPolyline3D) pol).getClosedFlags();
int firstHandle = ((DwgPolyline3D) pol).getFirstVertexHandle();
int lastHandle = ((DwgPolyline3D) pol).getLastVertexHandle();
Vector pts = new Vector();
double[] pt = new double[3];
for( int j = 0; j < dwgObjects.size(); j++ ) {
DwgObject firstVertex = (DwgObject) dwgObjects.get(j);
if (firstVertex instanceof DwgVertex3D) {
int vertexHandle = firstVertex.getHandle();
if (vertexHandle == firstHandle) {
int k = 0;
while( true ) {
DwgObject vertex = (DwgObject) dwgObjects.get(j + k);
int vHandle = vertex.getHandle();
if (vertex instanceof DwgVertex3D) {
pt = ((DwgVertex3D) vertex).getPoint();
pts.add(new double[]{pt[0], pt[1], pt[2]});
k++;
if (vHandle == lastHandle && vertex instanceof DwgVertex3D) {
break;
}
} else if (vertex instanceof DwgSeqend) {
break;
}
}
}
}
}
if (pts.size() > 0) {
double[][] newPts = new double[pts.size()][3];
if ((closedFlags & 0x1) == 0x1) {
newPts = new double[pts.size() + 1][3];
for( int j = 0; j < pts.size(); j++ ) {
newPts[j][0] = ((double[]) pts.get(j))[0];
newPts[j][1] = ((double[]) pts.get(j))[1];
newPts[j][2] = ((double[]) pts.get(j))[2];
}
newPts[pts.size()][0] = ((double[]) pts.get(0))[0];
newPts[pts.size()][1] = ((double[]) pts.get(0))[1];
newPts[pts.size()][2] = ((double[]) pts.get(0))[2];
} else {
for( int j = 0; j < pts.size(); j++ ) {
newPts[j][0] = ((double[]) pts.get(j))[0];
newPts[j][1] = ((double[]) pts.get(j))[1];
newPts[j][2] = ((double[]) pts.get(j))[2];
}
}
((DwgPolyline3D) pol).setPts(newPts);
} else {
// System.out.println("Encontrada polil�nea sin puntos ...");
// TODO: No se debe mandar nunca una polil�nea sin puntos, si esto
// ocurre es porque existe un error que hay que corregir ...
}
} else if (pol instanceof DwgLwPolyline && ((DwgLwPolyline) pol).getVertices() != null) {
int flags = ((DwgLwPolyline) pol).getFlag();
Point2D[] pts = ((DwgLwPolyline) pol).getVertices();
double[] bulges = ((DwgLwPolyline) pol).getBulges();
Point2D[] newPts = new Point2D[pts.length];
double[] newBulges = new double[bulges.length];
// TODO: Aqu� pueden existir casos no contemplados ...
// System.out.println("flags = " + flags);
if (flags == 512 || flags == 776 || flags == 768) {
newPts = new Point2D[pts.length + 1];
newBulges = new double[bulges.length + 1];
for( int j = 0; j < pts.length; j++ ) {
newPts[j] = (Point2D) pts[j];
}
newPts[pts.length] = (Point2D) pts[0];
newBulges[pts.length] = 0;
} else {
for( int j = 0; j < pts.length; j++ ) {
newPts[j] = (Point2D) pts[j];
}
}
if (pts.length > 0) {
((DwgLwPolyline) pol).setBulges(newBulges);
Point2D[] points = GisModelCurveCalculator.calculateGisModelBulge(newPts,
newBulges);
((DwgLwPolyline) pol).setVertices(points);
} else {
// System.out.println("Encontrada polil�nea sin puntos ...");
// TODO: No se debe mandar nunca una polil�nea sin puntos, si esto
// ocurre es porque existe un error que hay que corregir ...
}
}
}
} |
python | def _getLayers(self):
""" gets layers for the featuer service """
params = {"f": "json"}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._layers = []
if 'layers' in json_dict:
for l in json_dict["layers"]:
self._layers.append(
layer.FeatureLayer(url=self._url + "/%s" % l['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
) |
java | private List buildResourceTypeSelectWidgetList() {
List fileFormats = new ArrayList();
// get all OpenCms resource types
List resourceTypes = OpenCms.getResourceManager().getResourceTypes();
// put for every resource type type id and name into list object for select widget
Iterator iter = resourceTypes.iterator();
while (iter.hasNext()) {
I_CmsResourceType type = (I_CmsResourceType)iter.next();
// only xml resource types
if (type.isDirectEditable() && !type.getTypeName().toUpperCase().equals("JSP")) {
CmsSelectWidgetOption option = new CmsSelectWidgetOption(
new Integer(type.getTypeId()).toString(),
false,
type.getTypeName());
fileFormats.add(option);
}
}
return fileFormats;
} |
java | public static int consumeCRLF(final Buffer buffer) throws SipParseException {
try {
buffer.markReaderIndex();
final byte cr = buffer.readByte();
final byte lf = buffer.readByte();
if (cr == CR && lf == LF) {
return 2;
}
} catch (final IndexOutOfBoundsException e) {
// fall through
} catch (final IOException e) {
throw new SipParseException(buffer.getReaderIndex(), UNABLE_TO_READ_FROM_STREAM, e);
}
buffer.resetReaderIndex();
return 0;
} |
python | def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value |
python | def truncate(self, distance):
"""
Return a truncated version of the path.
Only one vertex (at the endpoint) will be added.
"""
position = np.searchsorted(self._cum_norm, distance)
offset = distance - self._cum_norm[position - 1]
if offset < constants.tol_path.merge:
truncated = self._points[:position + 1]
else:
vector = unitize(np.diff(self._points[np.arange(2) + position],
axis=0).reshape(-1))
vector *= offset
endpoint = self._points[position] + vector
truncated = np.vstack((self._points[:position + 1],
endpoint))
assert (np.linalg.norm(np.diff(truncated, axis=0),
axis=1).sum() - distance) < constants.tol_path.merge
return truncated |
python | def formatDecimalMark(value, decimalmark='.'):
"""
Dummy method to replace decimal mark from an input string.
Assumes that 'value' uses '.' as decimal mark and ',' as
thousand mark.
::value:: is a string
::returns:: is a string with the decimal mark if needed
"""
# We have to consider the possibility of working with decimals such as
# X.000 where those decimals are important because of the precission
# and significant digits matters
# Using 'float' the system delete the extre desimals with 0 as a value
# Example: float(2.00) -> 2.0
# So we have to save the decimal length, this is one reason we are usnig
# strings for results
rawval = str(value)
try:
return decimalmark.join(rawval.split('.'))
except:
return rawval |
java | public void beforeNullSafeOperation(SharedSessionContractImplementor session) {
ConfigurationHelper.setCurrentSessionFactory(session.getFactory());
if (this instanceof IntegratorConfiguredType) {
((IntegratorConfiguredType)this).applyConfiguration(session.getFactory());
}
} |
python | def select_by_key(self, key):
"""Selects an item by its key.
Args:
key (str): The unique string identifier of the item that have to be selected.
"""
self._selected_key = None
self._selected_item = None
for item in self.children.values():
item.attributes['selected'] = False
if key in self.children:
self.children[key].attributes['selected'] = True
self._selected_key = key
self._selected_item = self.children[key] |
java | private List<org.jfrog.hudson.pipeline.types.File> getBuildFilesList(Stream<? extends BaseBuildFileBean> buildFilesStream) {
return buildFilesStream
.filter(buildFile -> StringUtils.isNotBlank(buildFile.getLocalPath()))
.filter(buildFile -> StringUtils.isNotBlank(buildFile.getRemotePath()))
.map(org.jfrog.hudson.pipeline.types.File::new)
.distinct()
.collect(Collectors.toList());
} |
java | public final String stringReplace(
String toBeReplaced,
String toReplace,
String replacement
) {
Pattern pattern = Pattern.compile(toReplace);
Matcher match = pattern.matcher(toBeReplaced);
while (match.find()) {
toBeReplaced = match.replaceAll(replacement);
match = pattern.matcher(toBeReplaced);
}
return toBeReplaced;
} |
java | public static <T> Single<Set<T>> values(String key, Class<T> vClazz) {
return values(CacheService.CACHE_CONFIG_BEAN, key, vClazz);
} |
python | def execute(db_name):
"""Execute any pending work in the database stored in `db_name`,
recording the results.
This looks for any work in `db_name` which has no results, schedules it to
be executed, and records any results that arrive.
"""
try:
with use_db(db_name, mode=WorkDB.Mode.open) as work_db:
_update_progress(work_db)
config = work_db.get_config()
engine = get_execution_engine(config.execution_engine_name)
def on_task_complete(job_id, work_result):
work_db.set_result(job_id, work_result)
_update_progress(work_db)
log.info("Job %s complete", job_id)
log.info("Beginning execution")
engine(
work_db.pending_work_items,
config,
on_task_complete=on_task_complete)
log.info("Execution finished")
except FileNotFoundError as exc:
raise FileNotFoundError(
str(exc).replace('Requested file', 'Corresponding database',
1)) from exc |
python | def get_template(file):
''' Lookup a template class for the given filename or file
extension. Return nil when no implementation is found.
'''
pattern = str(file).lower()
while len(pattern) and not Lean.is_registered(pattern):
pattern = os.path.basename(pattern)
pattern = re.sub(r'^[^.]*\.?','',pattern)
# Try to find a preferred engine.
preferred_klass = Lean.preferred_mappings[pattern] if Lean.preferred_mappings.has_key(pattern) else None
if preferred_klass:
return preferred_klass
# Fall back to the general list of mappings
klasses = Lean.template_mappings[pattern]
# Try to find an engine which is already loaded
template = None
for klass in klasses:
if hasattr(klass,'is_engine_initialized') and callable(klass.is_engine_initialized):
if klass.is_engine_initialized():
template = klass
break
if template:
return template
# Try each of the classes until one succeeds. If all of them fails,
# we'll raise the error of the first class.
first_failure = None
for klass in klasses:
try:
return klass
except Exception, e:
if not first_failure:
first_failure = e
if first_failure:
raise Exception(first_failure) |
java | @Override
public T transformElement(Tuple2<Object, LinkedMapWritable> tuple,
DeepJobConfig<T, ? extends DeepJobConfig> config) {
try {
return (T) UtilES.getObjectFromJson(config.getEntityClass(), tuple._2());
} catch (Exception e) {
LOG.error("Cannot convert JSON: ", e);
throw new DeepTransformException("Could not transform from Json to Entity " + e.getMessage());
}
} |
java | protected void HandelStack()
{
// Find out what the operator does to the stack
int StackHandel = StackOpp();
if (StackHandel < 2)
{
// The operators that enlarge the stack by one
if (StackHandel==1)
PushStack();
// The operators that pop the stack
else
{
// Abs value for the for loop
StackHandel *= -1;
for (int i=0;i<StackHandel;i++)
PopStack();
}
}
// All other flush the stack
else
EmptyStack();
} |
java | private void complete(Symbol sym) throws CompletionFailure {
if (sym.kind == TYP) {
ClassSymbol c = (ClassSymbol)sym;
c.members_field = new Scope.ErrorScope(c); // make sure it's always defined
annotate.enterStart();
try {
completeOwners(c.owner);
completeEnclosing(c);
} finally {
// The flush needs to happen only after annotations
// are filled in.
annotate.enterDoneWithoutFlush();
}
fillIn(c);
} else if (sym.kind == PCK) {
PackageSymbol p = (PackageSymbol)sym;
try {
fillIn(p);
} catch (IOException ex) {
throw new CompletionFailure(sym, ex.getLocalizedMessage()).initCause(ex);
}
}
if (!filling)
annotate.flush(); // finish attaching annotations
} |
java | @Override
public boolean eIsSet(int featureID)
{
switch (featureID)
{
case TypesPackage.JVM_MEMBER__DECLARING_TYPE:
return getDeclaringType() != null;
case TypesPackage.JVM_MEMBER__VISIBILITY:
return visibility != VISIBILITY_EDEFAULT;
case TypesPackage.JVM_MEMBER__SIMPLE_NAME:
return SIMPLE_NAME_EDEFAULT == null ? simpleName != null : !SIMPLE_NAME_EDEFAULT.equals(simpleName);
case TypesPackage.JVM_MEMBER__IDENTIFIER:
return IDENTIFIER_EDEFAULT == null ? identifier != null : !IDENTIFIER_EDEFAULT.equals(identifier);
case TypesPackage.JVM_MEMBER__DEPRECATED:
return isSetDeprecated();
}
return super.eIsSet(featureID);
} |
python | def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# https://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
# In OIDC implicit flow it is possible to have a request_type that does not include the access_token!
# "id_token token" - return the access token and the id token
# "id_token" - don't return the access token
if "token" in request.response_type.split():
token = token_handler.create_token(request, refresh_token=False)
else:
token = {}
if request.state is not None:
token['state'] = request.state
for modifier in self._token_modifiers:
token = modifier(token, token_handler, request)
# In OIDC implicit flow it is possible to have a request_type that does
# not include the access_token! In this case there is no need to save a token.
if "token" in request.response_type.split():
self.request_validator.save_token(token, request)
return self.prepare_authorization_response(
request, token, {}, None, 302) |
java | @Nonnegative
private static int _appendNextCharacterAndAdvanceLoop (@Nonnull final String sLine,
@Nonnull final StringBuilder aSB,
@Nonnegative final int nIndex)
{
aSB.append (sLine.charAt (nIndex + 1));
return nIndex + 1;
} |
python | def _process_figure(value, fmt):
"""Processes the figure. Returns a dict containing figure properties."""
# pylint: disable=global-statement
global Nreferences # Global references counter
global has_unnumbered_figures # Flags unnumbered figures were found
global cursec # Current section
# Parse the image
attrs, caption = value[0]['c'][:2]
# Initialize the return value
fig = {'is_unnumbered': False,
'is_unreferenceable': False,
'is_tagged': False,
'attrs': attrs}
# Bail out if the label does not conform
if not LABEL_PATTERN.match(attrs[0]):
has_unnumbered_figures = True
fig['is_unnumbered'] = True
fig['is_unreferenceable'] = True
return fig
# Process unreferenceable figures
if attrs[0] == 'fig:': # Make up a unique description
attrs[0] = attrs[0] + str(uuid.uuid4())
fig['is_unreferenceable'] = True
unreferenceable.append(attrs[0])
# For html, hard-code in the section numbers as tags
kvs = PandocAttributes(attrs, 'pandoc').kvs
if numbersections and fmt in ['html', 'html5'] and 'tag' not in kvs:
if kvs['secno'] != cursec:
cursec = kvs['secno']
Nreferences = 1
kvs['tag'] = cursec + '.' + str(Nreferences)
Nreferences += 1
# Save to the global references tracker
fig['is_tagged'] = 'tag' in kvs
if fig['is_tagged']:
# Remove any surrounding quotes
if kvs['tag'][0] == '"' and kvs['tag'][-1] == '"':
kvs['tag'] = kvs['tag'].strip('"')
elif kvs['tag'][0] == "'" and kvs['tag'][-1] == "'":
kvs['tag'] = kvs['tag'].strip("'")
references[attrs[0]] = kvs['tag']
else:
Nreferences += 1
references[attrs[0]] = Nreferences
# Adjust caption depending on the output format
if fmt in ['latex', 'beamer']: # Append a \label if this is referenceable
if not fig['is_unreferenceable']:
value[0]['c'][1] += [RawInline('tex', r'\label{%s}'%attrs[0])]
else: # Hard-code in the caption name and number/tag
if isinstance(references[attrs[0]], int): # Numbered reference
value[0]['c'][1] = [RawInline('html', r'<span>'),
Str(captionname), Space(),
Str('%d:'%references[attrs[0]]),
RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space(), Str('%d:'%references[attrs[0]])]
value[0]['c'][1] += [Space()] + list(caption)
else: # Tagged reference
assert isinstance(references[attrs[0]], STRTYPES)
text = references[attrs[0]]
if text.startswith('$') and text.endswith('$'): # Math
math = text.replace(' ', r'\ ')[1:-1]
els = [Math({"t":"InlineMath", "c":[]}, math), Str(':')]
else: # Text
els = [Str(text+':')]
value[0]['c'][1] = \
[RawInline('html', r'<span>'), Str(captionname), Space()] + \
els + [RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space()] + els
value[0]['c'][1] += [Space()] + list(caption)
return fig |
java | public List<Node<K, V>> searchAll(Interval1D<K> interval) {
LinkedList<Node<K, V>> list = new LinkedList<>();
searchAll(root, interval, list);
return list;
} |
python | def params_to_blr(self, trans_handle, params):
"Convert parameter array to BLR and values format."
ln = len(params) * 2
blr = bs([5, 2, 4, 0, ln & 255, ln >> 8])
if self.accept_version < PROTOCOL_VERSION13:
values = bs([])
else:
# start with null indicator bitmap
null_indicator = 0
for i, p in enumerate(params):
if p is None:
null_indicator |= (1 << i)
n = len(params) // 8
if len(params) % 8 != 0:
n += 1
if n % 4: # padding
n += 4 - n % 4
null_indicator_bytes = []
for i in range(n):
null_indicator_bytes.append(null_indicator & 255)
null_indicator >>= 8
values = bs(null_indicator_bytes)
for p in params:
if (
(PYTHON_MAJOR_VER == 2 and type(p) == unicode) or
(PYTHON_MAJOR_VER == 3 and type(p) == str)
):
p = self.str_to_bytes(p)
t = type(p)
if p is None:
v = bs([])
blr += bs([14, 0, 0])
elif (
(PYTHON_MAJOR_VER == 2 and t == str) or
(PYTHON_MAJOR_VER == 3 and t == bytes)
):
if len(p) > MAX_CHAR_LENGTH:
v = self._create_blob(trans_handle, p)
blr += bs([9, 0])
else:
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
elif t == int:
v = bint_to_bytes(p, 4)
blr += bs([8, 0]) # blr_long
elif t == float and p == float("inf"):
v = b'\x7f\x80\x00\x00'
blr += bs([10])
elif t == decimal.Decimal or t == float:
if t == float:
p = decimal.Decimal(str(p))
(sign, digits, exponent) = p.as_tuple()
v = 0
ln = len(digits)
for i in range(ln):
v += digits[i] * (10 ** (ln - i - 1))
if sign:
v *= -1
v = bint_to_bytes(v, 8)
if exponent < 0:
exponent += 256
blr += bs([16, exponent])
elif t == datetime.date:
v = convert_date(p)
blr += bs([12])
elif t == datetime.time:
if p.tzinfo:
v = convert_time_tz(p)
blr += bs([28])
else:
v = convert_time(p)
blr += bs([13])
elif t == datetime.datetime:
if p.tzinfo:
v = convert_timestamp_tz(p)
blr += bs([29])
else:
v = convert_timestamp(p)
blr += bs([35])
elif t == bool:
v = bs([1, 0, 0, 0]) if p else bs([0, 0, 0, 0])
blr += bs([23])
else: # fallback, convert to string
p = p.__repr__()
if PYTHON_MAJOR_VER == 3 or (PYTHON_MAJOR_VER == 2 and type(p) == unicode):
p = self.str_to_bytes(p)
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
blr += bs([7, 0])
values += v
if self.accept_version < PROTOCOL_VERSION13:
values += bs([0]) * 4 if not p is None else bs([0xff, 0xff, 0xff, 0xff])
blr += bs([255, 76]) # [blr_end, blr_eoc]
return blr, values |
java | private void init(int tLen, byte[] src, int offset, int len) {
if (tLen < 0) {
throw new IllegalArgumentException(
"Length argument is negative");
}
this.tLen = tLen;
// Input sanity check
if ((src == null) ||(len < 0) || (offset < 0)
|| ((len + offset) > src.length)) {
throw new IllegalArgumentException("Invalid buffer arguments");
}
iv = new byte[len];
System.arraycopy(src, offset, iv, 0, len);
} |
python | def milestones(self, extra_params=None):
"""
All Milestones in this Space
"""
# Default params
params = {
'per_page': settings.MAX_PER_PAGE,
}
if extra_params:
params.update(extra_params)
return self.api._get_json(
Milestone,
space=self,
rel_path=self._build_rel_path('milestones/all'),
extra_params=params,
get_all=True, # Retrieve all milestones in the space
) |
python | def _build_params(self, location, provider_key, **kwargs):
"""Will be overridden according to the targetted web service"""
base_kwargs = {
'q': location,
'fuzzy': kwargs.get('fuzzy', 1.0),
'username': provider_key,
'maxRows': kwargs.get('maxRows', 1),
}
# check out for bbox in kwargs
bbox = kwargs.pop('proximity', None)
if bbox is not None:
bbox = BBox.factory(bbox)
base_kwargs.update(
{'east': bbox.east, 'west': bbox.west,
'north': bbox.north, 'south': bbox.south})
# look out for valid extra kwargs
supported_kwargs = set((
'name', 'name_equals', 'name_startsWith', 'startRow',
'country', 'countryBias', 'continentCode',
'adminCode1', 'adminCode2', 'adminCode3', 'cities',
'featureClass', 'featureCode',
'lang', 'type', 'style',
'isNameRequired', 'tag', 'operator', 'charset',
'east', 'west', 'north', 'south',
'orderby', 'inclBbox',
))
found_kwargs = supported_kwargs & set(kwargs.keys())
LOGGER.debug("Adding extra kwargs %s", found_kwargs)
# update base kwargs with extra ones
base_kwargs.update(dict(
[(extra, kwargs[extra]) for extra in found_kwargs]
))
return base_kwargs |
java | @Override
public SIDestinationAddress checkMessagingRequired(SIDestinationAddress requestDestAddr,
SIDestinationAddress replyDestAddr,
DestinationType destinationType,
String alternateUser)
throws SIConnectionDroppedException,
SIConnectionUnavailableException,
SIIncorrectCallException,
SITemporaryDestinationNotFoundException,
SIResourceException,
SINotAuthorizedException,
SINotPossibleInCurrentConfigurationException
{
if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {
SibTr.entry(this, TRACE, "checkMessagingRequired");
}
SIDestinationAddress retVal = _delegateConnection.checkMessagingRequired(requestDestAddr,
replyDestAddr,
destinationType,
alternateUser);
if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {
SibTr.exit(this, TRACE, "checkMessagingRequired", retVal);
}
return retVal;
} |
python | def get_maya_envpath(self):
"""Return the PYTHONPATH neccessary for running mayapy
If you start native mayapy, it will setup these paths.
You might want to prepend this to your path if running from
an external intepreter.
:returns: the PYTHONPATH that is used for running mayapy
:rtype: str
:raises: None
"""
opj = os.path.join
ml = self.get_maya_location()
mb = self.get_maya_bin()
msp = self.get_maya_sitepackage_dir()
pyzip = opj(mb, "python27.zip")
pydir = opj(ml, "Python")
pydll = opj(pydir, "DLLs")
pylib = opj(pydir, "lib")
pyplat = opj(pylib, "plat-win")
pytk = opj(pylib, "lib-tk")
path = os.pathsep.join((pyzip, pydll, pylib, pyplat, pytk, mb, pydir, msp))
return path |
java | public static List<Precedence> newPrecedence(VM vmBefore, Collection<VM> vmsAfter) {
return newPrecedence(Collections.singleton(vmBefore), vmsAfter);
} |
python | def coal(return_X_y=True):
"""coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html
"""
# y is counts
# recommend PoissonGAM
coal = pd.read_csv(PATH + '/coal.csv', index_col=0)
if return_X_y:
y, x = np.histogram(coal.values, bins=150)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return coal |
python | def endpoint_check(
first, node_first, s, second, node_second, t, intersections
):
r"""Check if curve endpoints are identical.
.. note::
This is a helper for :func:`tangent_bbox_intersection`. These
functions are used (directly or indirectly) by
:func:`_all_intersections` exclusively, and that function has a
Fortran equivalent.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:\mathbf{R}^2`).
node_first (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``first``.
s (float): The parameter corresponding to ``node_first``, so
expected to be one of ``0.0`` or ``1.0``.
second (SubdividedCurve): Second curve being intersected (assumed in
:math:\mathbf{R}^2`).
node_second (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``second``.
t (float): The parameter corresponding to ``node_second``, so
expected to be one of ``0.0`` or ``1.0``.
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
if _helpers.vector_close(node_first, node_second):
orig_s = (1 - s) * first.start + s * first.end
orig_t = (1 - t) * second.start + t * second.end
add_intersection(orig_s, orig_t, intersections) |
java | protected static void main(String args[]) {
int from = Integer.parseInt(args[0]);
int to = Integer.parseInt(args[1]);
statistics(from,to);
} |
python | def output_of(*cmd: Optional[str], **kwargs) -> str:
"""Invokes a subprocess and returns its output as a string.
Args:
cmd: Components of the command to execute, e.g. ["echo", "dog"].
**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as
a cwd (current working directory) argument.
Returns:
A (captured output, captured error output, return code) triplet. The
captured outputs will be None if the out or err parameters were not set
to an instance of TeeCapture.
Raises:
subprocess.CalledProcessError: The process returned a non-zero error
code and raise_on_fail was set.
"""
result = cast(str, run_cmd(*cmd,
log_run_to_stderr=False,
out=TeeCapture(),
**kwargs).out)
# Strip final newline.
if result.endswith('\n'):
result = result[:-1]
return result |
java | public void exclusiveOr (Area area) {
Area a = clone();
a.intersect(area);
add(area);
subtract(a);
} |
java | public ChangeRequest applyChangeRequest(
ChangeRequestInfo changeRequest, Dns.ChangeRequestOption... options) {
checkNotNull(changeRequest);
return dns.applyChangeRequest(getName(), changeRequest, options);
} |
java | @Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
final String domain = generateJmxDomain(metricName, group);
final Hashtable<String, String> table = generateJmxTable(group.getAllVariables());
AbstractBean jmxMetric;
ObjectName jmxName;
try {
jmxName = new ObjectName(domain, table);
} catch (MalformedObjectNameException e) {
/**
* There is an implementation error on our side if this occurs. Either the domain was modified and no longer
* conforms to the JMX domain rules or the table wasn't properly generated.
*/
LOG.debug("Implementation error. The domain or table does not conform to JMX rules." , e);
return;
}
if (metric instanceof Gauge) {
jmxMetric = new JmxGauge((Gauge<?>) metric);
} else if (metric instanceof Counter) {
jmxMetric = new JmxCounter((Counter) metric);
} else if (metric instanceof Histogram) {
jmxMetric = new JmxHistogram((Histogram) metric);
} else if (metric instanceof Meter) {
jmxMetric = new JmxMeter((Meter) metric);
} else {
LOG.error("Cannot add unknown metric type: {}. This indicates that the metric type " +
"is not supported by this reporter.", metric.getClass().getName());
return;
}
try {
synchronized (this) {
mBeanServer.registerMBean(jmxMetric, jmxName);
registeredMetrics.put(metric, jmxName);
}
} catch (NotCompliantMBeanException e) {
// implementation error on our side
LOG.debug("Metric did not comply with JMX MBean rules.", e);
} catch (InstanceAlreadyExistsException e) {
LOG.warn("A metric with the name " + jmxName + " was already registered.", e);
} catch (Throwable t) {
LOG.warn("Failed to register metric", t);
}
} |
python | def compute(self, inputVector, learn, activeArray):
"""
This method resembles the primary public method of the SpatialPooler class.
"""
super(SpatialPoolerWrapper, self).compute(inputVector, learn, activeArray)
self._updateAvgActivityPairs(activeArray) |
python | def Session(access_token=None, env=None):
"""Create an HTTP session.
Parameters
----------
access_token : str
Mapbox access token string (optional).
env : dict, optional
A dict that subsitutes for os.environ.
Returns
-------
requests.Session
"""
if env is None:
env = os.environ.copy()
access_token = (
access_token or
env.get('MapboxAccessToken') or
env.get('MAPBOX_ACCESS_TOKEN'))
session = requests.Session()
session.params.update(access_token=access_token)
session.headers.update({
'User-Agent': 'mapbox-sdk-py/{0} {1}'.format(
__version__, requests.utils.default_user_agent())})
return session |
python | async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
"""
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<') |
java | private static void convertUnicodeToHex(String str) {
try {
display("Unicode to hex: " + Utils.toHexBytes(Utils.toBytes(str)));
} catch (Exception e) {
}
} |
python | def temporary_file_path(root_dir=None, cleanup=True, suffix='', permissions=None):
"""
A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name |
java | @SuppressWarnings("unchecked")
public synchronized <T> T load(final PluggableService<T> service, final String providerName)
throws ProviderLoaderException {
final ProviderIdent ident = new ProviderIdent(service.getName(), providerName);
debug("loadInstance for " + ident + ": " + pluginJar);
if (null == pluginProviderDefs.get(ident)) {
final String[] strings = getClassnames();
for (final String classname : strings) {
final Class<?> cls;
try {
cls = loadClass(classname);
if (matchesProviderDeclaration(ident, cls)) {
pluginProviderDefs.put(ident, cls);
}
} catch (PluginException e) {
log.error("Failed to load class from " + pluginJar + ": classname: " + classname + ": "
+ e.getMessage());
}
}
}
final Class<T> cls = pluginProviderDefs.get(ident);
if (null != cls) {
try {
return createProviderForClass(service, cls);
} catch (PluginException e) {
throw new ProviderLoaderException(e, service.getName(), providerName);
}
}
return null;
} |
java | public void addPropertiesFieldBehavior(BaseField fldDisplay, String strProperty)
{
FieldListener listener = new CopyConvertersHandler(new PropertiesConverter(this, strProperty));
listener.setRespondsToMode(DBConstants.INIT_MOVE, false);
listener.setRespondsToMode(DBConstants.READ_MOVE, false);
fldDisplay.addListener(listener);
listener = new CopyConvertersHandler(fldDisplay, new PropertiesConverter(this, strProperty));
this.addListener(listener);
} |
java | public alluxio.grpc.PAclEntryType getType() {
alluxio.grpc.PAclEntryType result = alluxio.grpc.PAclEntryType.valueOf(type_);
return result == null ? alluxio.grpc.PAclEntryType.Owner : result;
} |
python | def get(scope, source, index):
"""
Returns a copy of the list item with the given index.
It is an error if an item with teh given index does not exist.
:type source: string
:param source: A list of strings.
:type index: string
:param index: A list of strings.
:rtype: string
:return: The cleaned up list of strings.
"""
try:
index = int(index[0])
except IndexError:
raise ValueError('index variable is required')
except ValueError:
raise ValueError('index is not an integer')
try:
return [source[index]]
except IndexError:
raise ValueError('no such item in the list') |
java | private String getDialogId(VaadinRequest request) {
String path = request.getPathInfo();
// remove the leading slash
return path != null ? path.substring(1) : null;
} |
java | private static Set<Long> getFeedItemIdsFromArgument(Function function) {
if (function.getLhsOperand().length == 1
&& function.getLhsOperand(0) instanceof RequestContextOperand) {
RequestContextOperand requestContextOperand =
(RequestContextOperand) function.getLhsOperand(0);
if (RequestContextOperandContextType.FEED_ITEM_ID.equals(
requestContextOperand.getContextType())
&& FunctionOperator.IN.equals(function.getOperator())) {
return Arrays.stream(function.getRhsOperand())
.filter(ConstantOperand.class::isInstance)
.map(argument -> ((ConstantOperand) argument).getLongValue())
.collect(Collectors.toSet());
}
}
return new HashSet<>();
} |
java | public static String getNativeLibraryVersion()
{
URL versionFile = SnappyLoader.class.getResource("/org/xerial/snappy/VERSION");
String version = "unknown";
try {
if (versionFile != null) {
InputStream in = null;
try {
Properties versionData = new Properties();
in = versionFile.openStream();
versionData.load(in);
version = versionData.getProperty("version", version);
if (version.equals("unknown")) {
version = versionData.getProperty("SNAPPY_VERSION", version);
}
version = version.trim().replaceAll("[^0-9\\.]", "");
}
finally {
if(in != null) {
in.close();
}
}
}
}
catch (IOException e) {
e.printStackTrace();
}
return version;
} |
python | def getFileSecurity(
self,
fileName,
securityInformation,
securityDescriptor,
lengthSecurityDescriptorBuffer,
lengthNeeded,
dokanFileInfo,
):
"""Get security attributes of a file.
:param fileName: name of file to get security for
:type fileName: ctypes.c_wchar_p
:param securityInformation: buffer for security information
:type securityInformation: PSECURITY_INFORMATION
:param securityDescriptor: buffer for security descriptor
:type securityDescriptor: PSECURITY_DESCRIPTOR
:param lengthSecurityDescriptorBuffer: length of descriptor buffer
:type lengthSecurityDescriptorBuffer: ctypes.c_ulong
:param lengthNeeded: length needed for the buffer
:type lengthNeeded: ctypes.POINTER(ctypes.c_ulong)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('getFileSecurity', fileName) |
python | def get_all(self, **options):
"""
Retrieves all records repetitively and returns a single list.
>>> airtable.get_all()
>>> airtable.get_all(view='MyView', fields=['ColA', '-ColB'])
>>> airtable.get_all(maxRecords=50)
[{'fields': ... }, ...]
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
formula (``str``, optional): Airtable formula.
See :any:`FormulaParam`.
Returns:
records (``list``): List of Records
>>> records = get_all(maxRecords=3, view='All')
"""
all_records = []
for records in self.get_iter(**options):
all_records.extend(records)
return all_records |
python | def grant_permission_to_users(self, permission, **kwargs): # noqa: E501
"""Grants a specific user permission to multiple users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.grant_permission_to_users(permission, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str permission: Permission to grant to the users. Please note that 'host_tag_management' is the equivalent of the 'Source Tag Management' permission (required)
:param list[str] body: list of users which should be revoked by specified permission
:return: UserModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501
else:
(data) = self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501
return data |
python | def GetSessionId(self):
'''Retrieves the VMSessionID for the current session. Call this function after calling
VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called,
VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.'''
sid = c_void_p()
ret = vmGuestLib.VMGuestLib_GetSessionId(self.handle.value, byref(sid))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return sid |
python | def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
FileEntry: parent file entry or None if not available.
"""
store_index = vshadow.VShadowPathSpecGetStoreIndex(self.path_spec)
if store_index is None:
return None
return self._file_system.GetRootFileEntry() |
python | def _GetSocket(self):
"""Establishes a connection to an nsrlsvr instance.
Returns:
socket._socketobject: socket connected to an nsrlsvr instance or None if
a connection cannot be established.
"""
try:
return socket.create_connection(
(self._host, self._port), self._SOCKET_TIMEOUT)
except socket.error as exception:
logger.error(
'Unable to connect to nsrlsvr with error: {0!s}.'.format(exception)) |
java | private Object executeExp(PageContext pc, SQL sql, Query qr, ZExp exp, int row) throws PageException {
if (exp instanceof ZConstant) return executeConstant(sql, qr, (ZConstant) exp, row);
else if (exp instanceof ZExpression) return executeExpression(pc, sql, qr, (ZExpression) exp, row);
throw new DatabaseException("unsupported sql statement [" + exp + "]", null, sql, null);
} |
java | public boolean isComplexQuery()
{
for (int i = 0; i < this.getRecordlistCount(); i++)
{
if (this.getRecordlistAt(i).getTable() instanceof org.jbundle.base.db.shared.MultiTable)
return true;
}
return false;
} |
java | public boolean remove()
{
if (_last1.key() == null)
throw new java.util.NoSuchElementException(
"remove() without calling next()");
boolean result = internalRemove();
return result;
} |
python | def execute(self, conn, logical_file_name, transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteDupicates. Expects db connection from upper layer.")
print(self.sql)
self.dbi.processData(self.sql, logical_file_name, conn, transaction) |
python | def compute_and_cache_missing_buckets(self, start_time, end_time,
untrusted_time, force_recompute=False):
"""
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data.
"""
if untrusted_time and not untrusted_time.tzinfo:
untrusted_time = untrusted_time.replace(tzinfo=tzutc())
events = self._compute_buckets(start_time, end_time, compute_missing=True,
cache=True, untrusted_time=untrusted_time,
force_recompute=force_recompute)
for event in events:
yield event |
java | public GrailsClass getArtefact(String artefactType, String name) {
ArtefactInfo info = getArtefactInfo(artefactType);
return info == null ? null : info.getGrailsClass(name);
} |
python | def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node |
python | def unpackSeptets(septets, numberOfSeptets=None, prevOctet=None, shift=7):
""" Unpacks the specified septets into octets
:param septets: Iterator or iterable containing the septets packed into octets
:type septets: iter(bytearray), bytearray or str
:param numberOfSeptets: The amount of septets to unpack (or None for all remaining in "septets")
:type numberOfSeptets: int or None
:return: The septets unpacked into octets
:rtype: bytearray
"""
result = bytearray()
if type(septets) == str:
septets = iter(rawStrToByteArray(septets))
elif type(septets) == bytearray:
septets = iter(septets)
if numberOfSeptets == None:
numberOfSeptets = MAX_INT # Loop until StopIteration
i = 0
for octet in septets:
i += 1
if shift == 7:
shift = 1
if prevOctet != None:
result.append(prevOctet >> 1)
if i <= numberOfSeptets:
result.append(octet & 0x7F)
prevOctet = octet
if i == numberOfSeptets:
break
else:
continue
b = ((octet << shift) & 0x7F) | (prevOctet >> (8 - shift))
prevOctet = octet
result.append(b)
shift += 1
if i == numberOfSeptets:
break
if shift == 7:
b = prevOctet >> (8 - shift)
if b:
# The final septet value still needs to be unpacked
result.append(b)
return result |
python | def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs) |
python | def checkpoint(self):
"""
Update the database to reflect in-memory changes made to this item; for
example, to make it show up in store.query() calls where it is now
valid, but was not the last time it was persisted to the database.
This is called automatically when in 'autocommit mode' (i.e. not in a
transaction) and at the end of each transaction for every object that
has been changed.
"""
if self.store is None:
raise NotInStore("You can't checkpoint %r: not in a store" % (self,))
if self.__deleting:
if not self.__everInserted:
# don't issue duplicate SQL and crap; we were created, then
# destroyed immediately.
return
self.store.executeSQL(self._baseDeleteSQL(self.store), [self.storeID])
# re-using OIDs plays havoc with the cache, and with other things
# as well. We need to make sure that we leave a placeholder row at
# the end of the table.
if self.__deletingObject:
# Mark this object as dead.
self.store.executeSchemaSQL(_schema.CHANGE_TYPE,
[-1, self.storeID])
# Can't do this any more:
# self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID])
# TODO: need to measure the performance impact of this, then do
# it to make sure things are in fact deleted:
# self.store.executeSchemaSQL(_schema.APP_VACUUM)
else:
assert self.__legacy__
# we're done...
if self.store.autocommit:
self.committed()
return
if self.__everInserted:
# case 1: we've been inserted before, either previously in this
# transaction or we were loaded from the db
if not self.__dirty__:
# we might have been checkpointed twice within the same
# transaction; just don't do anything.
return
self.store.executeSQL(*self._updateSQL())
else:
# case 2: we are in the middle of creating the object, we've never
# been inserted into the db before
schemaAttrs = self.getSchema()
insertArgs = [self.storeID]
for (ignoredName, attrObj) in schemaAttrs:
attrObjDuplicate, attributeValue = self.__dirty__[attrObj.attrname]
# assert attrObjDuplicate is attrObj
insertArgs.append(attributeValue)
# XXX this isn't atomic, gross.
self.store.executeSQL(self._baseInsertSQL(self.store), insertArgs)
self.__everInserted = True
# In case 1, we're dirty but we did an update, synchronizing the
# database, in case 2, we haven't been created but we issue an insert.
# In either case, the code in attributes.py sets the attribute *as well
# as* populating __dirty__, so we clear out dirty and we keep the same
# value, knowing it's the same as what's in the db.
self.__dirty__.clear()
if self.store.autocommit:
self.committed() |
python | def do_resolve(self, line):
"""resolve <identifier> Find all locations from which the given Science Object
can be downloaded."""
pid, = self._split_args(line, 1, 0)
self._command_processor.resolve(pid) |
python | def isFloat(nstr, schema):
"""
!~~isFloat
"""
if isinstance(nstr, (float, int, long)):
return True
elif not isinstance(nstr, basestring):
return False
try:
float(nstr)
except ValueError:
return False
return True |
python | def import_subview(self, idx, subview):
"""
Add the given subview to the corpus.
Args:
idx (str): An idx that is unique in the corpus for identifying the subview.
If already a subview exists with the given id it will be overridden.
subview (Subview): The subview to add.
"""
subview.corpus = self
self._subviews[idx] = subview |
python | def pretty_description(description, wrap_at=None, indent=0):
"""
Return a pretty formatted string given some text.
Args:
description (str): string to format.
wrap_at (int): maximum length of a line.
indent (int): level of indentation.
Returns:
str: pretty formatted string.
"""
if wrap_at is None or wrap_at < 0:
width = console_width(default=79)
if wrap_at is None:
wrap_at = width
else:
wrap_at += width
indent = ' ' * indent
text_wrapper = textwrap.TextWrapper(
width=wrap_at, replace_whitespace=False,
initial_indent=indent, subsequent_indent=indent)
new_desc = []
for line in description.split('\n'):
new_desc.append(line.replace('\n', '').strip())
while not new_desc[0]:
del new_desc[0]
while not new_desc[-1]:
del new_desc[-1]
separators = [i for i, l in enumerate(new_desc) if not l]
paragraphs = []
if separators:
start, end = 0, separators[0]
paragraphs.append(new_desc[start:end])
for i in range(len(separators) - 1):
start = end + 1
end = separators[i + 1]
paragraphs.append(new_desc[start:end])
paragraphs.append(new_desc[end + 1:])
return '\n\n'.join(text_wrapper.fill(' '.join(p)) for p in paragraphs)
return text_wrapper.fill(' '.join(new_desc)) |
python | def PrepareMatches(self, file_system):
"""Prepare find specification for matching.
Args:
file_system (FileSystem): file system.
"""
if self._location is not None:
self._location_segments = self._SplitPath(
self._location, file_system.PATH_SEPARATOR)
elif self._location_regex is not None:
path_separator = file_system.PATH_SEPARATOR
if path_separator == '\\':
# The backslash '\' is escaped within a regular expression.
path_separator = '\\\\'
self._location_segments = self._SplitPath(
self._location_regex, path_separator)
if self._location_segments is not None:
self._number_of_location_segments = len(self._location_segments) |
java | private Set<Field> getItemFields() {
Class next = Item.class;
Set<Field> fields = new HashSet<>(getFields(next));
while (next.getSuperclass() != Object.class) {
next = next.getSuperclass();
fields.addAll(getFields(next));
}
return fields;
} |
python | async def send(self, data, id=None, event=None, retry=None):
"""Send data using EventSource protocol
:param str data: The data field for the message.
:param str id: The event ID to set the EventSource object's last
event ID value to.
:param str event: The event's type. If this is specified, an event will
be dispatched on the browser to the listener for the specified
event name; the web site would use addEventListener() to listen
for named events. The default event type is "message".
:param int retry: The reconnection time to use when attempting to send
the event. [What code handles this?] This must be an integer,
specifying the reconnection time in milliseconds. If a non-integer
value is specified, the field is ignored.
"""
buffer = io.StringIO()
if id is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'id: {}'.format(id)))
buffer.write(self._sep)
if event is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'event: {}'.format(event)))
buffer.write(self._sep)
for chunk in self.LINE_SEP_EXPR.split(data):
buffer.write('data: {}'.format(chunk))
buffer.write(self._sep)
if retry is not None:
if not isinstance(retry, int):
raise TypeError('retry argument must be int')
buffer.write('retry: {}'.format(retry))
buffer.write(self._sep)
buffer.write(self._sep)
await self.write(buffer.getvalue().encode('utf-8')) |
java | public static boolean check(Map<String, Object> m, Map<String, Object> r) {
if (r.get(m.get("cmd")).equals("OK")) {
return true;
}
return false;
} |
python | def score(self, y_true, y_pred):
"""Calculate f1 score.
Args:
y_true (list): true sequences.
y_pred (list): predicted sequences.
Returns:
score: f1 score.
"""
score = f1_score(y_true, y_pred)
print(' - f1: {:04.2f}'.format(score * 100))
print(classification_report(y_true, y_pred, digits=4))
return score |
python | def resample_vinci(
fref,
fflo,
faff,
intrp = 0,
fimout = '',
fcomment = '',
outpath = '',
pickname = 'ref',
vc = '',
con = '',
vincipy_path = '',
atlas_resample = False,
atlas_ref_make = False,
atlas_ref_del = False,
close_vinci = False,
close_buff = True,
):
''' Resample the floating image <fflo> into the geometry of <fref>,
using the Vinci transformation output <faff> (an *.xml file).
Output the NIfTI file path of the resampled/resliced image.
'''
#---------------------------------------------------------------------------
#> output path
if outpath=='' and fimout!='' and '/' in fimout:
opth = os.path.dirname(fimout)
if opth=='':
opth = os.path.dirname(fflo)
fimout = os.path.basename(fimout)
elif outpath=='':
opth = os.path.dirname(fflo)
else:
opth = outpath
imio.create_dir(opth)
#> output floating and affine file names
if fimout=='':
if pickname=='ref':
fout = os.path.join(
opth,
'affine-ref-' \
+os.path.basename(fref).split('.nii')[0]+fcomment+'.nii.gz')
else:
fout = os.path.join(
opth,
'affine-flo-' \
+os.path.basename(fflo).split('.nii')[0]+fcomment+'.nii.gz')
else:
fout = os.path.join(
opth,
fimout.split('.')[0]+'.nii.gz')
#---------------------------------------------------------------------------
if vincipy_path=='':
try:
import resources
vincipy_path = resources.VINCIPATH
except:
raise NameError('e> could not import resources \
or find variable VINCIPATH in resources.py')
sys.path.append(vincipy_path)
try:
from VinciPy import Vinci_Bin, Vinci_Connect, Vinci_Core, Vinci_XML, Vinci_ImageT
except ImportError:
raise ImportError('e> could not import Vinci:\n \
check the variable VINCIPATH (path to Vinci) in resources.py')
#---------------------------------------------------------------------------
#> start Vinci core engine if it is not running/given
if vc=='' or con=='':
#> adopted from the Vinci's example RunMMMJob.py
bin = Vinci_Bin.Vinci_Bin()
con = Vinci_Connect.Vinci_Connect(bin)
vinci_binpath = con.StartMyVinci()
vc = Vinci_Core.Vinci_CoreCalc(con)
vc.StdProject()
#---------------------------------------------------------------------------
if int(intrp)==0:
interp_nn = True
else:
interp_nn = False
#---------------------------------------------------------------------------
#> change the reference image to be loaded as atlas
if atlas_resample and atlas_ref_make:
#> output file name for the extra reference image
fextref = os.path.join(opth, 'reference-as-atlas.nii.gz')
prc.nii_modify(fref, fimout=fextref, voxel_range=[0., 255.])
ref = Vinci_ImageT.newTemporary(
vc,
szFileName = fextref,
bLoadAsAtlas = atlas_resample)
if atlas_ref_del:
os.remove(fextref)
else:
ref = Vinci_ImageT.newTemporary(
vc,
szFileName = fref,
bLoadAsAtlas = atlas_resample)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#> reapply rigid transformation to new image
flo = Vinci_ImageT.newTemporary(
vc,
szFileName = fflo,
bLoadAsAtlas = atlas_resample)
rsl = flo.reapplyMMMTransform(
faff,
refImage = ref,
IsComputed=True)
rsl.saveYourselfAs(
bUseOffsetRotation = True,
bUseNextNeighbourInterp = interp_nn,
szFullFileName = fout)
#---------------------------------------------------------------------------
#> close image buffers for reference and floating
if close_buff:
ref.killYourself()
flo.killYourself()
rsl.killYourself()
if close_vinci: con.CloseVinci(True)
return fout |
python | def perlin(self, key, **kwargs):
"""Return perlin noise seede with the specified key.
For parameters, check the PerlinNoise class."""
if hasattr(key, "encode"):
key = key.encode('ascii')
value = zlib.adler32(key, self.seed)
return PerlinNoise(value, **kwargs) |
python | def pyprf_opt_brute(strCsvCnfg, objNspc, lgcTest=False, strPathHrf=None,
varRat=None):
"""
Function for optimizing given pRF paramaters using brute-force grid search.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
objNspc : object
Name space from command line arguments.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu
if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')):
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The functional data will be masked and demeaned:
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100.)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
print('---Number of voxels included in analysis: ' +
str(np.sum(aryLgcVar)))
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + \
'Cross-validation on GPU is currently not supported. ' + \
'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False
strErrMsg = 'Stopping program. ' + \
'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# derive number of feature for fitting
if varRat is not None:
# since there will be a beta parameter estimate both for the center and
# the surround, we multiply by 2
varNumFtr = int(2*cfg.switchHrfSet)
else:
varNumFtr = cfg.switchHrfSet
# *************************************************************************
# *************************************************************************
# Load previous pRF fitting results
print('---String to prior results provided by user:')
print(objNspc.strPthPrior)
# Load the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [objNspc.strPthPrior + '_x_pos.nii.gz',
objNspc.strPthPrior + '_y_pos.nii.gz',
objNspc.strPthPrior + '_SD.nii.gz',
objNspc.strPthPrior + '_eccentricity.nii.gz']
lstPrmInt, objHdr, aryAff = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])
# Convert list to array
assert len(lstPrmInt) == 1
aryIntGssPrm = lstPrmInt[0]
del(lstPrmInt)
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their nitial parameters, too
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
# *************************************************************************
# *************************************************************************
# *** Sort voxels by polar angle/previous parameters
# Calculate the polar angles that were found in independent localiser
aryPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
# Calculate the unique polar angles that are expected from grid search
aryUnqPlrAng = np.linspace(0.0, 2*np.pi, objNspc.varNumOpt2,
endpoint=False)
# Expected polar angle values are range from 0 to 2*pi, while
# the calculated angle values will range from -pi to pi
# Thus, bring empirical values from range -pi, pi to range 0, 2pi
aryPlrAng = (aryPlrAng + 2 * np.pi) % (2 * np.pi)
# For every empirically found polar angle get the index of the nearest
# theoretically expected polar angle, this is to offset small imprecisions
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Make sure that the maximum distance from a found polar angle to a grid
# point is smaller than the distance between two neighbor grid points
assert np.max(aryDstPlrAng) < np.divide(2*np.pi, objNspc.varNumOpt2)
# Update unique polar angles such that it contains only the ones which
# were found in data
aryUnqPlrAng = aryUnqPlrAng[np.unique(aryUnqPlrAngInd)]
# Update indices
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Get logical arrays that index voxels with particular polar angle
lstLgcUnqPlrAng = []
for indPlrAng in range(len(aryUnqPlrAng)):
lstLgcUnqPlrAng.append([aryUnqPlrAngInd == indPlrAng][0])
print('---Number of radial position options provided by user: ' +
str(objNspc.varNumOpt1))
print('---Number of angular position options provided by user: ' +
str(objNspc.varNumOpt2))
print('---Number of unique polar angles found in prior estimates: ' +
str(len(aryUnqPlrAng)))
print('---Maximum displacement in radial direction that is allowed: ' +
str(objNspc.varNumOpt3))
print('---Fitted modelled are restricted to stimulated area: ' +
str(objNspc.lgcRstrCentre))
# *************************************************************************
# *** Perform prf fitting
# Create array for collecting winner parameters
aryBstXpos = np.zeros((aryPlrAng.shape[0]))
aryBstYpos = np.zeros((aryPlrAng.shape[0]))
aryBstSd = np.zeros((aryPlrAng.shape[0]))
aryBstR2 = np.zeros((aryPlrAng.shape[0]))
aryBstBts = np.zeros((aryPlrAng.shape[0], varNumFtr))
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = np.zeros((aryPlrAng.shape[0],
len(cfg.lstPathNiiFunc)))
# loop over all found instances of polar angle/previous parameters
for indPlrAng in range(len(aryUnqPlrAng)):
print('------Polar angle number ' + str(indPlrAng+1) + ' out of ' +
str(len(aryUnqPlrAng)))
# get the polar angle for the current voxel batch
varPlrAng = np.array(aryUnqPlrAng[indPlrAng])
# get logical array to index voxels with this particular polar angle
lgcUnqPlrAng = lstLgcUnqPlrAng[indPlrAng]
# get prior eccentricities for current voxel batch
vecPrrEcc = aryIntGssPrm[lgcUnqPlrAng, 3]
print('---------Number of voxels of this polar angle: ' +
str(np.sum(lgcUnqPlrAng)))
# *********************************************************************
# *********************************************************************
# *** Create time course models for this particular polar angle
# Vector with the radial position:
vecRad = np.linspace(0.0, cfg.varExtXmax, objNspc.varNumOpt1,
endpoint=True)
# Get all possible combinations on the grid, using matrix indexing ij
# of output
aryRad, aryTht = np.meshgrid(vecRad, varPlrAng, indexing='ij')
# Flatten arrays to be able to combine them with meshgrid
vecRad = aryRad.flatten()
vecTht = aryTht.flatten()
# Convert from polar to cartesian
vecX, vecY = map_pol_to_crt(vecTht, vecRad)
# Vector with standard deviations pRF models (in degree of vis angle):
vecPrfSd = np.linspace(cfg.varPrfStdMin, cfg.varPrfStdMax,
cfg.varNumPrfSizes, endpoint=True)
# Create model parameters
varNumMdls = len(vecX) * len(vecPrfSd)
aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32)
varCntMdlPrms = 0
# Loop through x-positions:
for idxXY in range(0, len(vecX)):
# Loop through standard deviations (of Gaussian pRF models):
for idxSd in range(0, len(vecPrfSd)):
# Place index and parameters in array:
aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY]
aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY]
aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd]
# Increment parameter index:
varCntMdlPrms += 1
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryMdlParams[:, 0],
aryMdlParams[:, 1],
aryMdlParams[:, 2],
cfg.tplVslSpcSze,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryMdlParamsPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
if objNspc.lgcRstrCentre:
# Calculate the areas that were stimulated during the experiment
arySptExpInf = np.load(cfg.strSptExpInf)
arySptExpInf = np.rot90(arySptExpInf, k=3)
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# Get logical to exclude models with pRF centre outside stim area
lgcMdlInc = aryStimArea[aryMdlParamsPxl[:, 0].astype(np.int32),
aryMdlParamsPxl[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
aryMdlParamsPxl = aryMdlParamsPxl[lgcMdlInc, :]
# Create model time courses
aryPrfTc = model_creation_opt(dicCnfg, aryMdlParamsPxl,
strPathHrf=strPathHrf, varRat=varRat,
lgcPrint=False)
# The model time courses will be preprocessed such that they are
# smoothed (temporally) with same factor as the data and that they will
# be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp,
lgcPrint=False)
# *********************************************************************
# *** Create logical to restrict model fitting in radial direction
if objNspc.varNumOpt3 is not None:
# Calculate eccentricity of currently tested model parameters
vecMdlEcc = np.sqrt(np.add(np.square(aryMdlParams[:, 0]),
np.square(aryMdlParams[:, 1])))
# Compare model eccentricity against prior eccentricity
vecPrrEccGrd, vecMdlEccGrd = np.meshgrid(vecPrrEcc, vecMdlEcc,
indexing='ij')
# Consider allowed eccentricity shift as specified by user
lgcRstr = np.logical_and(np.less_equal(vecMdlEccGrd,
np.add(vecPrrEccGrd,
objNspc.varNumOpt3)),
np.greater(vecMdlEccGrd,
np.subtract(vecPrrEccGrd,
objNspc.varNumOpt3)
)
)
else:
lgcRstr = np.ones((np.sum(lgcUnqPlrAng),
aryMdlParams.shape[0]), dtype=np.bool)
# *********************************************************************
# *** Check for every voxel there is at least one model being tried
# Is there at least 1 model for each voxel?
lgcMdlPerVxl = np.greater(np.sum(lgcRstr, axis=1), 0)
print('---------Number of voxels fitted: ' + str(np.sum(lgcMdlPerVxl)))
# Those voxels for which no model would be tried, for example because
# the pRF parameters estimated in the prior were outside the stimulated
# area, are escluded from model fitting by setting their logical False
lgcUnqPlrAng[lgcUnqPlrAng] = lgcMdlPerVxl
# We need to update the index table for restricting model fitting
lgcRstr = lgcRstr[lgcMdlPerVxl, :]
# *********************************************************************
# *** Find best model for voxels with this particular polar angle
# Only perform the fitting if there are voxels with models to optimize
if np.any(lgcUnqPlrAng):
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Put logical for model restriction in list
lstRst = np.array_split(lgcRstr, cfg.varPar)
del(lgcRstr)
# Create list with chunks of func data for parallel processes:
lstFunc = np.array_split(aryFunc[lgcUnqPlrAng, :], cfg.varPar)
# CPU version (using numpy or cython for pRF finding):
if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')):
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu,
args=(idxPrc,
lstFunc[idxPrc],
aryPrfTc,
aryMdlParams,
cfg.strVersion,
cfg.lgcXval,
cfg.varNumXval,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu,
args=(idxPrc,
aryMdlParams,
lstFunc[idxPrc],
aryPrfTc,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start()
# Delete reference to list with function data (the data continues
# to exists in child process):
del(lstFunc)
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join()
# *****************************************************************
# *****************************************************************
# *** Prepare pRF finding results for export
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstTmpXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstTmpYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstTmpSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstTmpR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstTmpBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryTmpBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6,
inFormat='2D')
# Delete unneeded large objects:
del(lstPrfRes)
# *****************************************************************
# *****************************************************************
# Put findings for voxels with specific polar angle into ary with
# result for all voxels
aryBstXpos[lgcUnqPlrAng] = aryBstTmpXpos
aryBstYpos[lgcUnqPlrAng] = aryBstTmpYpos
aryBstSd[lgcUnqPlrAng] = aryBstTmpSd
aryBstR2[lgcUnqPlrAng] = aryBstTmpR2
aryBstBts[lgcUnqPlrAng, :] = aryBstTmpBts
if np.greater(cfg.varNumXval, 1):
aryBstR2Single[lgcUnqPlrAng, :] = aryTmpBstR2Single
# *****************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos),
np.square(aryBstYpos)))
# It is possible that after optimization the pRF has moved to location 0, 0
# In this cases, the polar angle parameter is arbitrary and will be
# assigned either 0 or pi. To preserve smoothness of the map, assign the
# initial polar angle value from independent localiser
lgcMvdOrgn = np.logical_and(aryBstXpos == 0.0, aryBstYpos == 0.0)
lgcMvdOrgn = np.logical_and(lgcMvdOrgn, aryBstSd > 0)
aryIntPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
aryPlrAng[lgcMvdOrgn] = np.copy(aryIntPlrAng[lgcMvdOrgn])
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2,
aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos_brute',
'_y_pos_brute',
'_SD_brute',
'_R2_brute',
'_polar_angle_brute',
'_eccentricity_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in
lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar,
tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') |
java | private static void appendCondition(DbMapping dbMapping, StringBuilder sql, Map<String, Object> values,
ResultSet rs) throws SQLException {
// 拼接主键
for (Map.Entry<String, String> entry : dbMapping.getTargetPk().entrySet()) {
String targetColumnName = entry.getKey();
String srcColumnName = entry.getValue();
if (srcColumnName == null) {
srcColumnName = targetColumnName;
}
sql.append(targetColumnName).append("=? AND ");
values.put(targetColumnName, rs.getObject(srcColumnName));
}
int len = sql.length();
sql.delete(len - 4, len);
} |
java | public synchronized Entry next()
{
if (tc.isEntryEnabled())
SibTr.entry(tc, "next");
// can only do anything if the cursor is still pointing in to a list
checkEntryParent();
Entry nextEntry = null;
synchronized(parentList)
{
//get the next entry in the list
nextEntry = getNextEntry();
//if the next entry is null
if(nextEntry == null)
{
//then hopefully we're at the end of the list
if(current == parentList.last)
{
//so move the cursor to the bottom of the list,
//not pointing to any actual entry
moveToBottom();
}
else if(!atBottom)
{
//it should not be possible for the next entry to be null but the current
//not be the last one in the list or already at the bottom
SIErrorException e = new SIErrorException(
nls.getFormattedMessage(
"INTERNAL_MESSAGING_ERROR_CWSIP0001",
new Object[] { "Cursor", "1:160:1.15" },
null));
FFDCFilter.processException(
e,
"com.ibm.ws.sib.processor.utils.linkedlist.Cursor.next",
"1:166:1.15",
this);
SibTr.exception(tc, e);
if (tc.isEntryEnabled())
SibTr.exit(tc, "next", e);
throw e;
}
}
else
{
//move the cursor to the next entry
moveCursor(nextEntry);
}
}
if (tc.isEntryEnabled())
SibTr.exit(tc, "next", nextEntry);
return nextEntry;
} |
python | def rmd_options_to_metadata(options):
"""
Parse rmd options and return a metadata dictionary
:param options:
:return:
"""
options = re.split(r'\s|,', options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(' ,')
others = others.lstrip(' ,')
chunk_options = parse_rmd_options(others)
language = 'R' if language == 'r' else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == '':
metadata['name'] = value
continue
else:
if update_metadata_from_rmd_options(name, value, metadata):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if ('active' in metadata or metadata.get('run_control', {}).get('frozen') is True) and 'eval' in metadata:
del metadata['eval']
return metadata.get('language') or language, metadata |
python | def get_absolute_path(cls, roots, path):
"""Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
"""
for root in roots:
abspath = os.path.abspath(os.path.join(root, path))
if abspath.startswith(root) and os.path.exists(abspath):
return abspath
# XXX TODO
return 'file-not-found' |
java | public String print(
TemporalAmount threeten,
TextWidth width
) {
return this.print(Duration.from(threeten), width);
} |
java | public void importArtifacts(SessionProvider sp, File folder) throws RepositoryException, FileNotFoundException
{
if (!folder.exists())
throw new FileNotFoundException("Source folder expected");
try
{
this.listErrorPom.clear();
importFilesToJCR(sp, folder);
}
catch (Exception e)
{
LOG.error("Exception during uploading local folder to JCR", e);
}
} |
python | def compose(*fns):
"""Return the function composed with the given functions
:param fns: functions
:returns: a function
>>> add2 = lambda x: x+2
>>> mult3 = lambda x: x*3
>>> new_fn = compose(add2, mult3)
>>> new_fn(2)
8
.. note:: compose(fn1, fn2, fn3) is the same as fn1(fn2(fn3))
which means that the last function provided is the first to be applied.
"""
def compose2(f, g):
return lambda x: f(g(x))
return reduce(compose2, fns) |
python | def add_to_loaded_modules(self, modules):
"""
Manually add in `modules` to be tracked by the module manager.
`modules` may be a single object or an iterable.
"""
modules = util.return_set(modules)
for module in modules:
if not isinstance(module, str):
module = module.__name__
self.loaded_modules.add(module) |
java | @Override
public MoreObjects.ToStringHelper toStringHelper() {
return toStringHelper(this).add("from", from).add("to", to).add("operation", operation);
} |
java | protected boolean isForbidden(String scheme, String host, int port, boolean openNonPrivPorts)
{
// Check port
Integer p = new Integer(port);
if (port > 0 && !_allowedConnectPorts.contains(p))
{
if (!openNonPrivPorts || port <= 1024)
return true;
}
// Must be a scheme that can be proxied.
if (scheme == null || !_ProxySchemes.containsKey(scheme))
return true;
// Must be in any defined white list
if (_proxyHostsWhiteList != null && !_proxyHostsWhiteList.contains(host))
return true;
// Must not be in any defined black list
if (_proxyHostsBlackList != null && _proxyHostsBlackList.contains(host))
return true;
return false;
} |
python | def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] |
java | public WebhookDefinition withFilters(WebhookFilterRule... filters) {
if (this.filters == null) {
setFilters(new java.util.ArrayList<WebhookFilterRule>(filters.length));
}
for (WebhookFilterRule ele : filters) {
this.filters.add(ele);
}
return this;
} |
java | private void checkHasDistinctElements(Collection<T> coll) {
// keep track of the already seen elements
NonIterableSet<T> seenElems = new NonIterableSet<T>(coll.size());
for(T elem : coll) {
if(!seenElems.add(elem)) {
throw new IllegalArgumentException("coll has duplicate element:" + elem);
}
}
} |
java | private static Object initializeEmbedded(EmbeddedMetadata embeddedMetadata, Object target) {
try {
ConstructorMetadata constructorMetadata = embeddedMetadata.getConstructorMetadata();
Object embeddedObject = null;
if (constructorMetadata.isClassicConstructionStrategy()) {
embeddedObject = embeddedMetadata.getReadMethod().invoke(target);
}
if (embeddedObject == null) {
embeddedObject = constructorMetadata.getConstructorMethodHandle().invoke();
}
return embeddedObject;
} catch (Throwable t) {
throw new EntityManagerException(t);
}
} |
java | @Override
public T borrowObject() throws Exception {
T redisClient = super.borrowObject();
if (redisClient != null) {
activeClients.add(redisClient);
}
return redisClient;
} |
java | public static String href(String param, String value) {
HttpServletRequest req = ContextUtils.getRequest();
String pagePath = (String) req.getAttribute(SiteController.PAGE_PATH);
if (pagePath == null)
return "javascript:void(0)";
StaticUrlProvider urlProvider = (StaticUrlProvider) req
.getAttribute(StaticUrlProvider.REQ_ATTR_KEY);
if (urlProvider == null)
return "javascript:void(0)";
String url = urlProvider.getUrl(param, value);
if (url == null)
return pagePath;
if (url.startsWith("/"))
return pagePath + url.substring(1);
return pagePath + url;
} |
python | def touch(path):
""" Creates a file located at the given path. """
with open(path, 'a') as f:
os.utime(path, None)
f.close() |
java | public WebApp getWebApp()
{
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) {
logger.logp(Level.FINE, CLASS_NAME,"getWebApp", "webapp -> "+ _webApp +" ,this -> " + this);
}
return _webApp;
} |
java | public final EObject ruleXExpressionInClosure() throws RecognitionException {
EObject current = null;
Token otherlv_2=null;
EObject lv_expressions_1_0 = null;
enterRule();
try {
// InternalXbaseWithAnnotations.g:3141:2: ( ( () ( ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )? )* ) )
// InternalXbaseWithAnnotations.g:3142:2: ( () ( ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )? )* )
{
// InternalXbaseWithAnnotations.g:3142:2: ( () ( ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )? )* )
// InternalXbaseWithAnnotations.g:3143:3: () ( ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )? )*
{
// InternalXbaseWithAnnotations.g:3143:3: ()
// InternalXbaseWithAnnotations.g:3144:4:
{
if ( state.backtracking==0 ) {
current = forceCreateModelElement(
grammarAccess.getXExpressionInClosureAccess().getXBlockExpressionAction_0(),
current);
}
}
// InternalXbaseWithAnnotations.g:3150:3: ( ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )? )*
loop56:
do {
int alt56=2;
int LA56_0 = input.LA(1);
if ( ((LA56_0>=RULE_STRING && LA56_0<=RULE_ID)||LA56_0==14||(LA56_0>=18 && LA56_0<=19)||LA56_0==26||(LA56_0>=42 && LA56_0<=43)||LA56_0==48||LA56_0==55||LA56_0==59||LA56_0==61||(LA56_0>=65 && LA56_0<=82)||LA56_0==84) ) {
alt56=1;
}
switch (alt56) {
case 1 :
// InternalXbaseWithAnnotations.g:3151:4: ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) ) (otherlv_2= ';' )?
{
// InternalXbaseWithAnnotations.g:3151:4: ( (lv_expressions_1_0= ruleXExpressionOrVarDeclaration ) )
// InternalXbaseWithAnnotations.g:3152:5: (lv_expressions_1_0= ruleXExpressionOrVarDeclaration )
{
// InternalXbaseWithAnnotations.g:3152:5: (lv_expressions_1_0= ruleXExpressionOrVarDeclaration )
// InternalXbaseWithAnnotations.g:3153:6: lv_expressions_1_0= ruleXExpressionOrVarDeclaration
{
if ( state.backtracking==0 ) {
newCompositeNode(grammarAccess.getXExpressionInClosureAccess().getExpressionsXExpressionOrVarDeclarationParserRuleCall_1_0_0());
}
pushFollow(FOLLOW_46);
lv_expressions_1_0=ruleXExpressionOrVarDeclaration();
state._fsp--;
if (state.failed) return current;
if ( state.backtracking==0 ) {
if (current==null) {
current = createModelElementForParent(grammarAccess.getXExpressionInClosureRule());
}
add(
current,
"expressions",
lv_expressions_1_0,
"org.eclipse.xtext.xbase.Xbase.XExpressionOrVarDeclaration");
afterParserOrEnumRuleCall();
}
}
}
// InternalXbaseWithAnnotations.g:3170:4: (otherlv_2= ';' )?
int alt55=2;
int LA55_0 = input.LA(1);
if ( (LA55_0==58) ) {
alt55=1;
}
switch (alt55) {
case 1 :
// InternalXbaseWithAnnotations.g:3171:5: otherlv_2= ';'
{
otherlv_2=(Token)match(input,58,FOLLOW_47); if (state.failed) return current;
if ( state.backtracking==0 ) {
newLeafNode(otherlv_2, grammarAccess.getXExpressionInClosureAccess().getSemicolonKeyword_1_1());
}
}
break;
}
}
break;
default :
break loop56;
}
} while (true);
}
}
if ( state.backtracking==0 ) {
leaveRule();
}
}
catch (RecognitionException re) {
recover(input,re);
appendSkippedTokens();
}
finally {
}
return current;
} |
python | def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.