language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def splitIntoLines(self, maxWidth, maxHeight, splitted=False):
"""
Split text into lines and calculate X positions. If we need more
space in height than available we return the rest of the text
"""
self.lines = []
self.height = 0
self.maxWidth = self.width = maxWidth
self.maxHeight = maxHeight
boxStack = []
style = self.style
x = 0
# Start with indent in first line of text
if not splitted:
x = style["textIndent"]
lenText = len(self)
pos = 0
while pos < lenText:
# Reset values for new line
posBegin = pos
line = Line(style)
# Update boxes for next line
for box in copy.copy(boxStack):
box["x"] = 0
line.append(BoxBegin(box))
while pos < lenText:
# Get fragment, its width and set X
frag = self[pos]
fragWidth = frag["width"]
frag["x"] = x
pos += 1
# Keep in mind boxes for next lines
if isinstance(frag, BoxBegin):
boxStack.append(frag)
elif isinstance(frag, BoxEnd):
boxStack.pop()
# If space or linebreak handle special way
if frag.isSoft:
if frag.isLF:
line.append(frag)
break
# First element of line should not be a space
if x == 0:
continue
# Keep in mind last possible line break
# The elements exceed the current line
elif fragWidth + x > maxWidth:
break
# Add fragment to line and update x
x += fragWidth
line.append(frag)
# Remove trailing white spaces
while line and line[-1].name in ("space", "br"):
line.pop()
# Add line to list
line.dumpFragments()
# if line:
self.height += line.doLayout(self.width)
self.lines.append(line)
# If not enough space for current line force to split
if self.height > maxHeight:
return posBegin
# Reset variables
x = 0
# Apply alignment
self.lines[- 1].isLast = True
for line in self.lines:
line.doAlignment(maxWidth, style["textAlign"])
return None |
java | public boolean insideRange(Calendar startDate,
Calendar endDate) {
// make a copy of the start time so that it is safe to modify it without
// affecting the input parameter
Calendar mutableStartDate = (Calendar)(startDate.clone());
return isInRange(mutableStartDate, endDate);
} |
python | def reject(self):
"""User rejected the rectangle."""
self.canvas.unsetMapTool(self.tool)
if self.previous_map_tool != self.tool:
self.canvas.setMapTool(self.previous_map_tool)
self.tool.reset()
self.extent_selector_closed.emit()
super(ExtentSelectorDialog, self).reject() |
java | public void marshall(RemoveTagsRequest removeTagsRequest, ProtocolMarshaller protocolMarshaller) {
if (removeTagsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(removeTagsRequest.getResourceId(), RESOURCEID_BINDING);
protocolMarshaller.marshall(removeTagsRequest.getTagsList(), TAGSLIST_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | @XmlElementDecl(namespace = "http://www.ibm.com/websphere/wim", name = "cn")
public JAXBElement<String> createCn(String value) {
return new JAXBElement<String>(_Cn_QNAME, String.class, null, value);
} |
python | def confirmMapIdentity(self, subject, vendorSpecific=None):
"""See Also: confirmMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.confirmMapIdentityResponse(subject, vendorSpecific)
return self._read_boolean_response(response) |
java | public void insert_us(final DeviceData deviceData, final int[] argin) {
final short[] val = new short[argin.length];
for (int i = 0 ; i<argin.length ; i++) {
val[i] = (short) (argin[i] & 0xFFFF);
}
DevVarUShortArrayHelper.insert(deviceData.getAny(), val);
} |
python | def reset(self, align=8, clip=80, code=False, derive=False,
detail=0, ignored=True, infer=False, limit=100, stats=0,
stream=None):
'''Reset options, state, etc.
The available options and default values are:
*align=8* -- size alignment
*clip=80* -- clip repr() strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics, see function **asizeof**
*stream=None* -- output stream for printing
See function **asizeof** for a description of the options.
'''
# options
self._align_ = align
self._clip_ = clip
self._code_ = code
self._derive_ = derive
self._detail_ = detail # for Asized only
self._infer_ = infer
self._limit_ = limit
self._stats_ = stats
self._stream = stream
if ignored:
self._ign_d = _kind_ignored
else:
self._ign_d = None
# clear state
self._clear()
self.set(align=align, code=code, stats=stats) |
python | def init(self):
"""
Performs a clone or a fetch, depending on whether the repository has
been previously cloned or not.
"""
if os.path.isdir(self.path):
self.fetch()
else:
self.clone() |
java | public Set<Page> getInlinks()
{
Session session = wiki.__getHibernateSession();
session.beginTransaction();
session.buildLockRequest(LockOptions.NONE).lock(hibernatePage);
// Have to copy links here since getPage later will close the session.
Set<Integer> pageIDs = new UnmodifiableArraySet<Integer>(hibernatePage.getInLinks());
session.getTransaction().commit();
Set<Page> pages = new HashSet<Page>();
for (int pageID : pageIDs) {
try {
pages.add(wiki.getPage(pageID));
}
catch (WikiApiException e) {
// Silently ignore if a page could not be found
// There may be inlinks that do not come from an existing page.
continue;
}
}
return pages;
} |
java | private static String generateStorageObjectName(String topologyName, String filename) {
return String.format("%s/%s", topologyName, filename);
} |
java | protected int createNonPersistentAutomaticTimers(String appName, String moduleName, List<AutomaticTimerBean> timerBeans) {
final boolean isTraceOn = TraceComponent.isAnyTracingEnabled();
if (isTraceOn && tc.isEntryEnabled())
Tr.entry(tc, "createNonPersistentAutomaticTimers: " + moduleName);
int numCreated = 0;
for (AutomaticTimerBean timerBean : timerBeans) {
if (timerBean.getNumNonPersistentTimers() != 0) {
for (TimerMethodData timerMethod : timerBean.getMethods()) {
for (TimerMethodData.AutomaticTimer timer : timerMethod.getAutomaticTimers()) {
if (!timer.isPersistent()) {
if (isTraceOn && tc.isDebugEnabled())
Tr.debug(tc, "creating non-persistent automatic timer " + timer);
createNonPersistentAutomaticTimer(timerBean, timer, timerMethod);
numCreated++;
}
}
}
}
}
if (isTraceOn && tc.isEntryEnabled())
Tr.exit(tc, "createNonPersistentAutomaticTimers: " + numCreated);
return numCreated;
} |
java | public static void setDefaultValues(List<ProtocolConfiguration> protocol_configs, List<Protocol> protocols,
StackType ip_version) throws Exception {
InetAddress default_ip_address=Util.getNonLoopbackAddress();
if(default_ip_address == null) {
log.warn(Util.getMessage("OnlyLoopbackFound"), ip_version);
default_ip_address=Util.getLocalhost(ip_version);
}
for(int i=0; i < protocol_configs.size(); i++) {
ProtocolConfiguration protocol_config=protocol_configs.get(i);
Protocol protocol=protocols.get(i);
String protocolName=protocol.getName();
// regenerate the Properties which were destroyed during basic property processing
Map<String,String> properties=new HashMap<>(protocol_config.getProperties());
Method[] methods=Util.getAllDeclaredMethodsWithAnnotations(protocol.getClass(), Property.class);
for(int j=0; j < methods.length; j++) {
if(isSetPropertyMethod(methods[j], protocol.getClass())) {
String propertyName=PropertyHelper.getPropertyName(methods[j]);
Object propertyValue=getValueFromProtocol(protocol, propertyName);
if(propertyValue == null) { // if propertyValue is null, check if there is a we can use
Property annotation=methods[j].getAnnotation(Property.class);
// get the default value for the method- check for InetAddress types
String defaultValue=null;
if(InetAddressInfo.isInetAddressRelated(methods[j])) {
defaultValue=ip_version == StackType.IPv4? annotation.defaultValueIPv4() : annotation.defaultValueIPv6();
if(defaultValue != null && !defaultValue.isEmpty()) {
Object converted=null;
try {
if(defaultValue.equalsIgnoreCase(Global.NON_LOOPBACK_ADDRESS))
converted=default_ip_address;
else
converted=PropertyHelper.getConvertedValue(protocol, methods[j], properties, defaultValue, true);
methods[j].invoke(protocol, converted);
}
catch(Exception e) {
throw new Exception("default could not be assigned for method " + propertyName + " in "
+ protocolName + " with default " + defaultValue, e);
}
log.debug("set property " + protocolName + "." + propertyName + " to default value " + converted);
}
}
}
}
}
//traverse class hierarchy and find all annotated fields and add them to the list if annotated
Field[] fields=Util.getAllDeclaredFieldsWithAnnotations(protocol.getClass(), Property.class);
for(int j=0; j < fields.length; j++) {
String propertyName=PropertyHelper.getPropertyName(fields[j], properties);
Object propertyValue=getValueFromProtocol(protocol, fields[j]);
if(propertyValue == null) {
// add to collection of @Properties with no user specified value
Property annotation=fields[j].getAnnotation(Property.class);
// get the default value for the field - check for InetAddress types
String defaultValue=null;
if(InetAddressInfo.isInetAddressRelated(fields[j])) {
defaultValue=ip_version == StackType.IPv4? annotation.defaultValueIPv4() : annotation.defaultValueIPv6();
if(defaultValue != null && !defaultValue.isEmpty()) {
// condition for invoking converter
if(defaultValue != null || !PropertyHelper.usesDefaultConverter(fields[j])) {
Object converted=null;
try {
if(defaultValue.equalsIgnoreCase(Global.NON_LOOPBACK_ADDRESS))
converted=default_ip_address;
else
converted=PropertyHelper.getConvertedValue(protocol, fields[j], properties, defaultValue, true);
if(converted != null)
Util.setField(fields[j], protocol, converted);
}
catch(Exception e) {
throw new Exception("default could not be assigned for field " + propertyName + " in "
+ protocolName + " with default value " + defaultValue, e);
}
log.debug("set property " + protocolName + "." + propertyName + " to default value " + converted);
}
}
}
}
}
}
} |
java | @Override
public void validate() throws OpsGenieClientValidationException {
super.validate();
if (username == null && id == null)
throw OpsGenieClientValidationException.missingMultipleMandatoryProperty(OpsGenieClientConstants.API.USERNAME, OpsGenieClientConstants.API.ID);
} |
java | public static String formatLongitude(final Longitude longitude,
final PointLocationFormatType formatType)
throws FormatterException
{
if (longitude == null)
{
throw new FormatterException("No point location provided");
}
if (formatType == null)
{
throw new FormatterException("No format type provided");
}
final String formatted;
switch (formatType)
{
case HUMAN_LONG:
formatted = longitude.toString();
break;
case HUMAN_MEDIUM:
formatted = formatLongitudeHumanMedium(longitude);
break;
case LONG:
formatted = formatLongitudeLong(longitude);
break;
case MEDIUM:
formatted = formatLongitudeMedium(longitude);
break;
case SHORT:
formatted = formatLongitudeShort(longitude);
break;
case DECIMAL:
formatted = formatLongitudeWithDecimals(longitude);
break;
default:
throw new FormatterException("Unsupported format type");
}
return formatted;
} |
java | public void fill(List<CmsSitemapEntryBean> entries) {
clear();
for (CmsSitemapEntryBean entry : entries) {
CmsLazyTreeItem item = createItem(entry);
addWidgetToList(item);
}
m_initialized = true;
onContentChange();
} |
java | private boolean isEditorCompatible(A_CmsXmlContentValue schemaType) throws CmsXmlException {
boolean result = false;
I_CmsXmlContentHandler contentHandler = schemaType.getContentDefinition().getContentHandler();
// We don't care about the old editor for the 'inheritable' widget configuration,
// so we're using the old getWidget method here
I_CmsWidget widget = contentHandler.getWidget(schemaType);
result = (widget == null) || (widget instanceof I_CmsADEWidget);
return result;
} |
java | @SuppressWarnings("nls")
private void load() {
URL url = Version.class.getResource("version.properties");
if (url == null) {
this.versionString = "Unknown";
this.versionDate = new Date().toString();
} else {
allProperties = new Properties();
try(InputStream is = url.openStream()){
allProperties.load(is);
this.versionString = allProperties.getProperty("version", "Unknown");
this.versionDate = allProperties.getProperty("date", new Date().toString());
this.vcsDescribe = allProperties.getProperty("git.commit.id.describe", "Non-Git Build");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} |
python | def dumps(self):
r"""Convert the container to a string in latex syntax."""
content = self.dumps_content()
if not content.strip() and self.omit_if_empty:
return ''
string = ''
start = Command(self.latex_name, arguments=self.arguments,
options=self.options)
string += start.dumps() + '{ \n'
if content != '':
string += content + '\n}'
else:
string += '}'
return string |
java | static PrefsTransform getLanguageTransform(TypeName type) {
String typeName = type.toString();
if (Integer.class.getCanonicalName().equals(typeName)) {
return new IntegerPrefsTransform(true);
}
if (Boolean.class.getCanonicalName().equals(typeName)) {
return new BooleanPrefsTransform(true);
}
if (Long.class.getCanonicalName().equals(typeName)) {
return new LongPrefsTransform(true);
}
if (Double.class.getCanonicalName().equals(typeName)) {
return new DoublePrefsTransform(true);
}
if (Float.class.getCanonicalName().equals(typeName)) {
return new FloatPrefsTransform(true);
}
if (Short.class.getCanonicalName().equals(typeName)) {
return new ShortPrefsTransform(true);
}
if (Byte.class.getCanonicalName().equals(typeName)) {
return new BytePrefsTransform(true);
}
if (Character.class.getCanonicalName().equals(typeName)) {
return new CharacterPrefsTransform(true);
}
if (String.class.getCanonicalName().equals(typeName)) {
return new StringPrefsTransform();
}
return null;
} |
python | def load_state_machine_from_path(base_path, state_machine_id=None):
"""Loads a state machine from the given path
:param base_path: An optional base path for the state machine.
:return: a tuple of the loaded container state, the version of the state and the creation time
:raises ValueError: if the provided path does not contain a valid state machine
"""
logger.debug("Loading state machine from path {0}...".format(base_path))
state_machine_file_path = os.path.join(base_path, STATEMACHINE_FILE)
state_machine_file_path_old = os.path.join(base_path, STATEMACHINE_FILE_OLD)
# was the root state specified as state machine base_path to load from?
if not os.path.exists(state_machine_file_path) and not os.path.exists(state_machine_file_path_old):
# catch the case that a state machine root file is handed
if os.path.exists(base_path) and os.path.isfile(base_path):
base_path = os.path.dirname(base_path)
state_machine_file_path = os.path.join(base_path, STATEMACHINE_FILE)
state_machine_file_path_old = os.path.join(base_path, STATEMACHINE_FILE_OLD)
if not os.path.exists(state_machine_file_path) and not os.path.exists(state_machine_file_path_old):
raise ValueError("Provided path doesn't contain a valid state machine: {0}".format(base_path))
state_machine_dict = storage_utils.load_objects_from_json(state_machine_file_path)
if 'used_rafcon_version' in state_machine_dict:
previously_used_rafcon_version = StrictVersion(state_machine_dict['used_rafcon_version']).version
active_rafcon_version = StrictVersion(rafcon.__version__).version
rafcon_newer_than_sm_version = "You are trying to load a state machine that was stored with an older " \
"version of RAFCON ({0}) than the one you are using ({1}).".format(
state_machine_dict['used_rafcon_version'], rafcon.__version__)
rafcon_older_than_sm_version = "You are trying to load a state machine that was stored with an newer " \
"version of RAFCON ({0}) than the one you are using ({1}).".format(
state_machine_dict['used_rafcon_version'], rafcon.__version__)
note_about_possible_incompatibility = "The state machine will be loaded with no guarantee of success."
if active_rafcon_version[0] > previously_used_rafcon_version[0]:
# this is the default case
# for a list of breaking changes please see: doc/breaking_changes.rst
# logger.warning(rafcon_newer_than_sm_version)
# logger.warning(note_about_possible_incompatibility)
pass
if active_rafcon_version[0] == previously_used_rafcon_version[0]:
if active_rafcon_version[1] > previously_used_rafcon_version[1]:
# this is the default case
# for a list of breaking changes please see: doc/breaking_changes.rst
# logger.info(rafcon_newer_than_sm_version)
# logger.info(note_about_possible_incompatibility)
pass
elif active_rafcon_version[1] == previously_used_rafcon_version[1]:
# Major and minor version of RAFCON and the state machine match
# It should be safe to load the state machine, as the patch level does not change the format
pass
else:
logger.warning(rafcon_older_than_sm_version)
logger.warning(note_about_possible_incompatibility)
else:
logger.warning(rafcon_older_than_sm_version)
logger.warning(note_about_possible_incompatibility)
state_machine = StateMachine.from_dict(state_machine_dict, state_machine_id)
if "root_state_storage_id" not in state_machine_dict:
root_state_storage_id = state_machine_dict['root_state_id']
state_machine.supports_saving_state_names = False
else:
root_state_storage_id = state_machine_dict['root_state_storage_id']
root_state_path = os.path.join(base_path, root_state_storage_id)
state_machine.file_system_path = base_path
dirty_states = []
state_machine.root_state = load_state_recursively(parent=state_machine, state_path=root_state_path,
dirty_states=dirty_states)
if len(dirty_states) > 0:
state_machine.marked_dirty = True
else:
state_machine.marked_dirty = False
hierarchy_level = 0
number_of_states, hierarchy_level = state_machine.root_state.get_states_statistics(hierarchy_level)
logger.debug("Loaded state machine ({1}) has {0} states. (Max hierarchy level {2})".format(
number_of_states, base_path, hierarchy_level))
logger.debug("Loaded state machine ({1}) has {0} transitions.".format(
state_machine.root_state.get_number_of_transitions(), base_path))
return state_machine |
python | def contains_group(store, path=None):
"""Return True if the store contains a group at the given logical path."""
path = normalize_storage_path(path)
prefix = _path_to_prefix(path)
key = prefix + group_meta_key
return key in store |
java | public <T extends IGeneratorParameter<S>, S> S get(Class<T> paramType) {
return getParameter(paramType).getValue();
} |
python | def findFirst(self, tableClass, comparison=None,
offset=None, sort=None, default=None):
"""
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
"""
limit = 1
for item in self.query(tableClass, comparison, limit, offset, sort):
return item
return default |
python | def ascii2h5(dat_fname, h5_fname):
"""
Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.
"""
table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')
filter_kwargs = dict(
chunks=True,
compression='gzip',
compression_opts=3)
# Filter out pixels with all zeros
idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)
with h5py.File(h5_fname, 'w') as f:
d = np.arange(0., 4.351, 0.15).astype('f4')
dset = f.create_dataset('dists', data=d, **filter_kwargs)
dset.attrs['description'] = 'Distances at which extinction is measured'
dset.attrs['units'] = 'kpc'
dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)
dset.attrs['description'] = 'Galactic (l, b) of each pixel'
dset.attrs['units'] = 'deg'
dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)
dset.attrs['description'] = 'Extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)
dset.attrs['description'] = 'Gaussian uncertainty in extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag' |
java | public void setFailed(java.util.Collection<BatchResultErrorEntry> failed) {
if (failed == null) {
this.failed = null;
return;
}
this.failed = new com.amazonaws.internal.SdkInternalList<BatchResultErrorEntry>(failed);
} |
java | public JcsegServer registerHandler()
{
String basePath = this.getClass().getPackage().getName()+".controller";
AbstractRouter router = new DynamicRestRouter(basePath, MainController.class);
router.addMapping("/extractor/keywords", KeywordsController.class);
router.addMapping("/extractor/keyphrase", KeyphraseController.class);
router.addMapping("/extractor/sentence", SentenceController.class);
router.addMapping("/extractor/summary", SummaryController.class);
router.addMapping("/tokenizer/default", TokenizerController.class);
/*
* the rest of path and dynamic rest checking will handler it
*/
//router.addMapping("/tokenizer/default", TokenizerController.class);
/*
* prepare standard handler
*/
StandardHandler stdHandler = new StandardHandler(config, resourcePool, router);
/*
* prepare the resource handler
*/
JcsegResourceHandler resourceHandler = new JcsegResourceHandler();
/*
* i am going to rewrite the path to handler mapping mechanism
* check the Router handler for more info
*/
GzipHandler gzipHandler = new GzipHandler();
HandlerList handlers = new HandlerList();
handlers.setHandlers(new Handler[]{stdHandler, resourceHandler});
gzipHandler.setHandler(handlers);
server.setHandler(gzipHandler);
return this;
} |
java | public int appendCount(TimeUnit unit, boolean omitCount,
boolean useDigitPrefix,
int count, int cv, boolean useSep,
String name, boolean last, StringBuffer sb) {
if (cv == ECountVariant.HALF_FRACTION && dr.halves == null) {
cv = ECountVariant.INTEGER;
}
if (!omitCount && useDigitPrefix && dr.digitPrefix != null) {
sb.append(dr.digitPrefix);
}
int index = unit.ordinal();
switch (cv) {
case ECountVariant.INTEGER: {
if (!omitCount) {
appendInteger(count/1000, 1, 10, sb);
}
} break;
case ECountVariant.INTEGER_CUSTOM: {
int val = count / 1000;
// only custom names we have for now
if (unit == TimeUnit.MINUTE &&
(dr.fiveMinutes != null || dr.fifteenMinutes != null)) {
if (val != 0 && val % 5 == 0) {
if (dr.fifteenMinutes != null && (val == 15 || val == 45)) {
val = val == 15 ? 1 : 3;
if (!omitCount) appendInteger(val, 1, 10, sb);
name = dr.fifteenMinutes;
index = 8; // hack
break;
}
if (dr.fiveMinutes != null) {
val = val / 5;
if (!omitCount) appendInteger(val, 1, 10, sb);
name = dr.fiveMinutes;
index = 9; // hack
break;
}
}
}
if (!omitCount) appendInteger(val, 1, 10, sb);
} break;
case ECountVariant.HALF_FRACTION: {
// 0, 1/2, 1, 1-1/2...
int v = count / 500;
if (v != 1) {
if (!omitCount) appendCountValue(count, 1, 0, sb);
}
if ((v & 0x1) == 1) {
// hack, using half name
if (v == 1 && dr.halfNames != null && dr.halfNames[index] != null) {
sb.append(name);
return last ? index : -1;
}
int solox = v == 1 ? 0 : 1;
if (dr.genders != null && dr.halves.length > 2) {
if (dr.genders[index] == EGender.F) {
solox += 2;
}
}
int hp = dr.halfPlacements == null
? EHalfPlacement.PREFIX
: dr.halfPlacements[solox & 0x1];
String half = dr.halves[solox];
String measure = dr.measures == null ? null : dr.measures[index];
switch (hp) {
case EHalfPlacement.PREFIX:
sb.append(half);
break;
case EHalfPlacement.AFTER_FIRST: {
if (measure != null) {
sb.append(measure);
sb.append(half);
if (useSep && !omitCount) {
sb.append(dr.countSep);
}
sb.append(name);
} else { // ignore sep completely
sb.append(name);
sb.append(half);
return last ? index : -1; // might use suffix
}
} return -1; // exit early
case EHalfPlacement.LAST: {
if (measure != null) {
sb.append(measure);
}
if (useSep && !omitCount) {
sb.append(dr.countSep);
}
sb.append(name);
sb.append(half);
} return last ? index : -1; // might use suffix
}
}
} break;
default: {
int decimals = 1;
switch (cv) {
case ECountVariant.DECIMAL2: decimals = 2; break;
case ECountVariant.DECIMAL3: decimals = 3; break;
default: break;
}
if (!omitCount) appendCountValue(count, 1, decimals, sb);
} break;
}
if (!omitCount && useSep) {
sb.append(dr.countSep);
}
if (!omitCount && dr.measures != null && index < dr.measures.length) {
String measure = dr.measures[index];
if (measure != null) {
sb.append(measure);
}
}
sb.append(name);
return last ? index : -1;
} |
python | def GetScriptHashesForVerifying(self):
"""
Get the script hash used for verification.
Raises:
Exception: if the verification script is invalid, or no header could be retrieved from the Blockchain.
Returns:
list: with a single UInt160 representing the next consensus node.
"""
# if this is the genesis block, we dont have a prev hash!
if self.PrevHash.Data == bytearray(32):
# logger.info("verificiation script %s" %(self.Script.ToJson()))
if type(self.Script.VerificationScript) is bytes:
return [bytearray(self.Script.VerificationScript)]
elif type(self.Script.VerificationScript) is bytearray:
return [self.Script.VerificationScript]
else:
raise Exception('Invalid Verification script')
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if prev_header is None:
raise Exception('Invalid operation')
return [prev_header.NextConsensus] |
java | public void setIndexInfo(String info) {
//ns=phobos_sindex:set=longevity:indexname=str_100_idx:num_bins=1:bins=str_100_bin:type=TEXT:sync_state=synced:state=RW;
//ns=test:set=Customers:indexname=mail_index_userss:bin=email:type=STRING:indextype=LIST:path=email:sync_state=synced:state=RW
if (!info.isEmpty()) {
String[] parts = info.split(":");
for (String part : parts) {
kvPut(part);
}
}
} |
python | def t_defexpr_CONTINUE(self, t):
r'[\\_]\r?\n'
t.lexer.lineno += 1
t.value = t.value[1:]
return t |
python | def _AnalyzeFileObject(self, mediator, file_object):
"""Processes a file-like object with analyzers.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_object (dfvfs.FileIO): file-like object to process.
"""
maximum_read_size = max([
analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])
hashers_only = True
for analyzer_object in self._analyzers:
if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):
hashers_only = False
break
file_size = file_object.get_size()
if (hashers_only and self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
return
file_object.seek(0, os.SEEK_SET)
data = file_object.read(maximum_read_size)
while data:
if self._abort:
break
for analyzer_object in self._analyzers:
if self._abort:
break
if (not analyzer_object.INCREMENTAL_ANALYZER and
file_size > analyzer_object.SIZE_LIMIT):
continue
if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and
self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
continue
self.processing_status = analyzer_object.PROCESSING_STATUS_HINT
analyzer_object.Analyze(data)
self.last_activity_timestamp = time.time()
data = file_object.read(maximum_read_size)
display_name = mediator.GetDisplayName()
for analyzer_object in self._analyzers:
if self._abort:
break
for result in analyzer_object.GetResults():
logger.debug((
'[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '
'file: {2:s}.').format(
result.attribute_name, result.attribute_value, display_name))
mediator.AddEventAttribute(
result.attribute_name, result.attribute_value)
analyzer_object.Reset()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING |
python | def command_health(self):
"""Check package health
"""
if len(self.args) == 1 and self.args[0] == "health":
PackageHealth(mode="").test()
elif (len(self.args) == 2 and self.args[0] == "health" and
self.args[1] == "--silent"):
PackageHealth(mode=self.args[1]).test()
else:
usage("") |
java | Entry<String, Point2d[]> createEntry(final IAtomContainer container) {
try {
final int n = container.getAtomCount();
final int[] ordering = new int[n];
final String smiles = cansmi(container, ordering);
// build point array that is in the canonical output order
final Point2d[] points = new Point2d[n];
for (int i = 0; i < n; i++) {
Point2d point = container.getAtom(i).getPoint2d();
if (point == null) {
logger.warn("Atom at index ", i, " did not have coordinates.");
return null;
}
points[ordering[i]] = point;
}
return new SimpleEntry<String, Point2d[]>(smiles, points);
} catch (CDKException e) {
logger.warn("Could not encode container as SMILES: ", e);
}
return null;
} |
java | private void flush(final Collection<Writer> writers) {
for (Writer writer : writers) {
try {
writer.flush();
} catch (Exception ex) {
InternalLogger.log(Level.ERROR, ex, "Failed to flush writer");
}
}
} |
python | def clean_up_datetime(obj_map):
"""convert datetime objects to dictionaries for storage"""
clean_map = {}
for key, value in obj_map.items():
if isinstance(value, datetime.datetime):
clean_map[key] = {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
'tzinfo': value.tzinfo
}
elif isinstance(value, dict):
clean_map[key] = clean_up_datetime(value)
elif isinstance(value, list):
if key not in clean_map:
clean_map[key] = []
if len(value) > 0:
for index, list_value in enumerate(value):
if isinstance(list_value, dict):
clean_map[key].append(clean_up_datetime(list_value))
else:
clean_map[key].append(list_value)
else:
clean_map[key] = value
else:
clean_map[key] = value
return clean_map |
java | public void setResourceComplianceSummaryItems(java.util.Collection<ResourceComplianceSummaryItem> resourceComplianceSummaryItems) {
if (resourceComplianceSummaryItems == null) {
this.resourceComplianceSummaryItems = null;
return;
}
this.resourceComplianceSummaryItems = new com.amazonaws.internal.SdkInternalList<ResourceComplianceSummaryItem>(resourceComplianceSummaryItems);
} |
java | protected ISFSArray parseObjectArray(GetterMethodCover method,
Object[] array) {
return parseObjectArray(method.getReturnClass(),
array);
} |
java | public Register getRegister(int index) {
if (registers == null) {
throw new IndexOutOfBoundsException("No registers defined!");
}
if (index < 0) {
throw new IndexOutOfBoundsException("Negative index: " + index);
}
if (index >= getWordCount()) {
throw new IndexOutOfBoundsException(index + " > " + getWordCount());
}
return registers[index];
} |
python | def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD") |
python | def adapt(obj, to_cls):
"""
Will adapt `obj` to an instance of `to_cls`.
First sees if `obj` has an `__adapt__` method and uses it to adapt. If that fails
it checks if `to_cls` has an `__adapt__` classmethod and uses it to adapt. IF that
fails, MRO is used. If that
fails, a `TypeError` is raised.
"""
if obj is None:
return obj
elif isinstance(obj, to_cls):
return obj
errors = []
if hasattr(obj, '__adapt__') and obj.__adapt__:
try:
return obj.__adapt__(to_cls)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((obj.__adapt__, ex_type, ex, tb))
if hasattr(to_cls, '__adapt__') and to_cls.__adapt__:
try:
return to_cls.__adapt__(obj)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((to_cls.__adapt__, ex_type, ex, tb))
for k in get_adapter_path(obj, to_cls):
if k in __adapters__:
try:
return __adapters__[k](obj, to_cls)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((__adapters__[k], ex_type, ex, tb))
break
raise AdaptErrors('Could not adapt %r to %r' % (obj, to_cls), errors=errors) |
python | def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats |
python | def _sample_variant_file_in_population(x):
"""Check if a sample file is the same as the population file.
This is true for batches where we don't extract into samples and do not
run decomposition for gemini.
'"""
if "population" in x:
a = _get_project_vcf(x)
b = _get_variant_file(x, ("vrn_file",))
decomposed = tz.get_in(("population", "decomposed"), x)
if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and
vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])):
return True
return False |
python | def _guessunit(self):
"""Guess the unit of the period as the largest one, which results in
an integer duration.
"""
if not self.days % 1:
return 'd'
elif not self.hours % 1:
return 'h'
elif not self.minutes % 1:
return 'm'
elif not self.seconds % 1:
return 's'
else:
raise ValueError(
'The stepsize is not a multiple of one '
'second, which is not allowed.') |
python | def user_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#delete-user"
api_path = "/api/v2/users/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) |
java | @TargetApi(Build.VERSION_CODES.LOLLIPOP)
private static Drawable getDrawable(Context context, @DrawableRes int drawableResId) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
return context.getDrawable(drawableResId);
} else {
return context.getResources().getDrawable(drawableResId);
}
} |
java | public static TimeZone toTimeZone(final DateTimeZone dtz) {
TimeZone tz = new TimeZone() {
@Override
public void setRawOffset(int n) {
throw new UnsupportedOperationException();
}
@Override
public boolean useDaylightTime() {
long firstTransition = MILLIS_SINCE_1_JAN_2000_UTC;
return firstTransition != dtz.nextTransition(firstTransition);
}
@Override
public boolean inDaylightTime(Date d) {
long t = d.getTime();
return dtz.getStandardOffset(t) != dtz.getOffset(t);
}
@Override
public int getRawOffset() {
return dtz.getStandardOffset(0);
}
@Override
public int getOffset(long instant) {
// This method is not abstract, but it normally calls through to the
// method below.
// It's optimized here since there's a direct equivalent in
// DateTimeZone.
// DateTimeZone and java.util.TimeZone use the same
// epoch so there's no translation of instant required.
return dtz.getOffset(instant);
}
@Override
public int getOffset(
int era, int year, int month, int day, int dayOfWeek,
int milliseconds) {
int millis = milliseconds; // milliseconds is day in standard time
int hour = millis / MILLISECONDS_PER_HOUR;
millis %= MILLISECONDS_PER_HOUR;
int minute = millis / MILLISECONDS_PER_MINUTE;
millis %= MILLISECONDS_PER_MINUTE;
int second = millis / MILLISECONDS_PER_SECOND;
millis %= MILLISECONDS_PER_SECOND;
if (era == GregorianCalendar.BC) {
year = -(year - 1);
}
// get the time in UTC in case a timezone has changed it's standard
// offset, e.g. rid of a half hour from UTC.
DateTime dt = null;
try {
dt = new DateTime(year, month + 1, day, hour, minute,
second, millis, dtz);
} catch (IllegalArgumentException ex) {
// Java does not complain if you try to convert a Date that does not
// exist due to the offset shifting forward, but Joda time does.
// Since we're trying to preserve the semantics of TimeZone, shift
// forward over the gap so that we're on a time that exists.
// This assumes that the DST correction is one hour long or less.
if (hour < 23) {
dt = new DateTime(year, month + 1, day, hour + 1, minute,
second, millis, dtz);
} else { // Some timezones shift at midnight.
Calendar c = new GregorianCalendar();
c.clear();
c.setTimeZone(TimeZone.getTimeZone("UTC"));
c.set(year, month, day, hour, minute, second);
c.add(Calendar.HOUR_OF_DAY, 1);
int year2 = c.get(Calendar.YEAR),
month2 = c.get(Calendar.MONTH),
day2 = c.get(Calendar.DAY_OF_MONTH),
hour2 = c.get(Calendar.HOUR_OF_DAY);
dt = new DateTime(year2, month2 + 1, day2, hour2, minute,
second, millis, dtz);
}
}
// since millis is in standard time, we construct the equivalent
// GMT+xyz timezone and use that to convert.
int offset = dtz.getStandardOffset(dt.getMillis());
DateTime stdDt = new DateTime(
year, month + 1, day, hour, minute,
second, millis, DateTimeZone.forOffsetMillis(offset));
return getOffset(stdDt.getMillis());
}
@Override
public String toString() {
return dtz.toString();
}
@Override
public boolean equals(Object that) {
if (!(that instanceof TimeZone)) {
return false;
}
TimeZone thatTz = (TimeZone) that;
return getID().equals(thatTz.getID()) && hasSameRules(thatTz);
}
@Override
public int hashCode() {
return getID().hashCode();
}
private static final long serialVersionUID = 58752546800455L;
};
// Now fix the tzids. DateTimeZone has a bad habit of returning
// "+06:00" when it should be "GMT+06:00"
String newTzid = cleanUpTzid(dtz.getID());
tz.setID(newTzid);
return tz;
} |
java | public Thread newThread(String name, Runnable task) {
Assert.hasText(name, "Thread name must be specified");
Assert.notNull(task, "Thread task must not be null");
Thread thread = new Thread(getThreadGroup(), task, name);
thread.setContextClassLoader(getContextClassLoader());
thread.setDaemon(isDaemon());
thread.setPriority(getPriority());
thread.setUncaughtExceptionHandler(getUncaughtExceptionHandler());
return thread;
} |
java | private void addContainerproperties() {
/* Create HierarchicalContainer container */
container.addContainerProperty(SPUILabelDefinitions.NAME, String.class, null);
container.addContainerProperty(SPUILabelDefinitions.VAR_CREATED_BY, String.class, null);
container.addContainerProperty(SPUILabelDefinitions.VAR_CREATED_DATE, Date.class, null);
container.addContainerProperty(SPUILabelDefinitions.VAR_LAST_MODIFIED_BY, String.class, null, false, true);
container.addContainerProperty(SPUILabelDefinitions.VAR_LAST_MODIFIED_DATE, String.class, null, false, true);
container.addContainerProperty(SPUILabelDefinitions.VAR_TARGET_STATUS, TargetUpdateStatus.class, null);
container.addContainerProperty(SPUILabelDefinitions.VAR_DESC, String.class, "", false, true);
container.addContainerProperty(ASSIGN_DIST_SET, DistributionSet.class, null, false, true);
container.addContainerProperty(INSTALL_DIST_SET, DistributionSet.class, null, false, true);
container.addContainerProperty(SPUILabelDefinitions.ASSIGNED_DISTRIBUTION_NAME_VER, String.class, "");
container.addContainerProperty(SPUILabelDefinitions.INSTALLED_DISTRIBUTION_NAME_VER, String.class, null);
} |
java | private SegmentIdWithShardSpec getSegment(
final InputRow row,
final String sequenceName,
final boolean skipSegmentLineageCheck
) throws IOException
{
synchronized (segments) {
final DateTime timestamp = row.getTimestamp();
final SegmentIdWithShardSpec existing = getAppendableSegment(timestamp, sequenceName);
if (existing != null) {
return existing;
} else {
// Allocate new segment.
final SegmentsForSequence segmentsForSequence = segments.get(sequenceName);
final SegmentIdWithShardSpec newSegment = segmentAllocator.allocate(
row,
sequenceName,
segmentsForSequence == null ? null : segmentsForSequence.lastSegmentId,
// send lastSegmentId irrespective of skipSegmentLineageCheck so that
// unique constraint for sequence_name_prev_id_sha1 does not fail for
// allocatePendingSegment in IndexerSQLMetadataStorageCoordinator
skipSegmentLineageCheck
);
if (newSegment != null) {
for (SegmentIdWithShardSpec identifier : appenderator.getSegments()) {
if (identifier.equals(newSegment)) {
throw new ISE(
"WTF?! Allocated segment[%s] which conflicts with existing segment[%s].",
newSegment,
identifier
);
}
}
log.info("New segment[%s] for row[%s] sequenceName[%s].", newSegment, row, sequenceName);
addSegment(sequenceName, newSegment);
} else {
// Well, we tried.
log.warn("Cannot allocate segment for timestamp[%s], sequenceName[%s]. ", timestamp, sequenceName);
}
return newSegment;
}
}
} |
python | async def download_file_by_id(self, file_id: base.String, destination=None,
timeout: base.Integer = 30, chunk_size: base.Integer = 65536,
seek: base.Boolean = True):
"""
Download file by file_id to destination
if You want to automatically create destination (:class:`io.BytesIO`) use default
value of destination and handle result of this method.
:param file_id: str
:param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO`
:param timeout: int
:param chunk_size: int
:param seek: bool - go to start of file when downloading is finished
:return: destination
"""
file = await self.get_file(file_id)
return await self.download_file(file_path=file.file_path, destination=destination,
timeout=timeout, chunk_size=chunk_size, seek=seek) |
python | def materialize_entity(ctx, etype, unique=None):
'''
Low-level routine for creating a BIBFRAME resource. Takes the entity (resource) type and a data mapping
according to the resource type. Implements the Libhub Resource Hash Convention
As a convenience, if a vocabulary base is provided in the context, concatenate it to etype and the data keys
ctx - context information governing creation of the new entity
etype - type IRI for th enew entity
unique - list of key/value tuples of data to use in generating its unique ID, or None in which case one is just randomly generated
'''
params = {}
if ctx.base:
etype = ctx.base + etype
unique_computed = []
for k, v in unique:
k = k if iri.is_absolute(k) else iri.absolutize(k, ctx.base)
v = v(ctx) if callable(v) else v
unique_computed.append((k, v))
if unique_computed:
plaintext = json.dumps([etype, unique_computed], cls=OrderedJsonEncoder)
eid = ctx.idgen.send(plaintext)
else:
#We only have a type; no other distinguishing data. Generate a random hash
eid = next(ctx.idgen)
return eid |
java | public QuestionStructure getMainPart(String question, List<edu.stanford.nlp.ling.Word> words) {
QuestionStructure questionStructure = new QuestionStructure();
questionStructure.setQuestion(question);
Tree tree = LP.apply(words);
LOG.info("句法树: ");
tree.pennPrint();
questionStructure.setTree(tree);
GrammaticalStructure gs = GSF.newGrammaticalStructure(tree);
if(gs == null){
return null;
}
//获取依存关系
Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true);
questionStructure.setTdls(tdls);
Map<String, String> map = new HashMap<>();
String top = null;
String root = null;
LOG.info("句子依存关系:");
//依存关系
List<String> dependencies = new ArrayList<>();
for (TypedDependency tdl : tdls) {
String item = tdl.toString();
dependencies.add(item);
LOG.info("\t" + item);
if (item.startsWith("top")) {
top = item;
}
if (item.startsWith("root")) {
root = item;
}
int start = item.indexOf("(");
int end = item.lastIndexOf(")");
item = item.substring(start + 1, end);
String[] attr = item.split(",");
String k = attr[0].trim();
String v = attr[1].trim();
String value = map.get(k);
if (value == null) {
map.put(k, v);
} else {
//有值
value += ":";
value += v;
map.put(k, value);
}
}
questionStructure.setDependencies(dependencies);
String mainPartForTop = null;
String mainPartForRoot = null;
if (top != null) {
mainPartForTop = topPattern(top, map);
}
if (root != null) {
mainPartForRoot = rootPattern(root, map);
}
questionStructure.setMainPartForTop(mainPartForTop);
questionStructure.setMainPartForRoot(mainPartForRoot);
if (questionStructure.getMainPart() == null) {
LOG.error("未能识别主谓宾:" + question);
} else {
LOG.info("主谓宾:" + questionStructure.getMainPart());
}
return questionStructure;
} |
java | public void setTables(final List<Table> tables) {
this.tablesMap.clear();
for (final Table table : tables)
this.addTableConfig(table.getConfigEntry());
} |
java | public SFBaseResultSet execute(String sql,
Map<String, ParameterBindingDTO>
parametersBinding,
CallingMethod caller)
throws SQLException, SFException
{
sanityCheckQuery(sql);
session.injectedDelay();
if (logger.isDebugEnabled())
{
logger.debug("execute: {}", SecretDetector.maskAWSSecret(sql));
}
String trimmedSql = sql.trim();
if (trimmedSql.length() >= 20
&& trimmedSql.toLowerCase().startsWith(
"set-sf-property"))
{
executeSetProperty(sql);
return null;
}
return executeQuery(sql, parametersBinding, false, caller);
} |
java | public static Type getAndCheckType(EntityDataModel entityDataModel, Class<?> javaType) {
Type type = entityDataModel.getType(javaType);
if (type == null) {
throw new ODataSystemException("No type found in the entity data model for Java type: "
+ javaType.getName());
}
return type;
} |
java | public void packSymbols(int first, int end) {
int src;
int dest;
boolean touched;
dest = first;
for (src = first; src < end; src++) {
touched = renameSymbol(src, dest);
if (touched) {
dest++;
} else {
// src not found; dest stays unused
}
}
} |
python | def aes_encrypt(self, plain, sec_key, enable_b64=True):
"""
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据
- ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据``
- msg长度为16位数, 不足则补 'ascii \\0'
.. warning::
msg长度为16位数, 不足则补 'ascii \\0'
:param plain:
:type plain: str
:param sec_key:
:type sec_key: str
:param enable_b64:
:type enable_b64: bool
:return:
:rtype:
"""
plain = helper.to_str(plain)
sec_key = helper.to_str(sec_key)
# 如果msg长度不为16倍数, 需要补位 '\0'
plain += '\0' * (self.bs - len(plain) % self.bs)
# 使用生成的 key, iv 加密
plain = helper.to_bytes(plain)
cipher = self.aes_obj(sec_key).encrypt(plain)
# 是否返回 base64 编码数据
cip = base64.b64encode(cipher) if enable_b64 else cipher
return helper.to_str(cip) |
java | public static ProjectWriter getProjectWriter(String name) throws InstantiationException, IllegalAccessException
{
int index = name.lastIndexOf('.');
if (index == -1)
{
throw new IllegalArgumentException("Filename has no extension: " + name);
}
String extension = name.substring(index + 1).toUpperCase();
Class<? extends ProjectWriter> fileClass = WRITER_MAP.get(extension);
if (fileClass == null)
{
throw new IllegalArgumentException("Cannot write files of type: " + name);
}
ProjectWriter file = fileClass.newInstance();
return (file);
} |
python | def prompt_file(prompt, default=None, must_exist=True, is_dir=False,
show_default=True, prompt_suffix=': ', color=None):
"""
Prompt a filename using using glob for autocompetion.
If must_exist is True (default) then you can be sure that the value returned
is an existing filename or directory name.
If is_dir is True, this will show only the directories for the completion.
"""
if must_exist:
while True:
r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default,
prompt_suffix=prompt_suffix, color=color)
if os.path.exists(r):
break
print('This path does not exist.')
else:
r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default,
prompt_suffix=prompt_suffix, color=color)
return r |
python | def analyze(
self, trees: List[ET.ElementTree], comments: OrderedDict
) -> Dict:
outputDict = {}
ast = []
# Parse through the ast once to identify and grab all the functions
# present in the Fortran file.
for tree in trees:
self.loadFunction(tree)
# Parse through the ast tree a second time to convert the XML ast
# format to a format that can be used to generate Python statements.
for tree in trees:
ast += self.parseTree(tree, ParseState())
"""
Find the entry point for the Fortran file.
The entry point for a conventional Fortran file is always the PROGRAM
section. This 'if' statement checks for the presence of a PROGRAM
segment.
If not found, the entry point can be any of the functions or
subroutines in the file. So, all the functions and subroutines of the
program are listed and included as the possible entry point.
"""
if self.entryPoint:
entry = {"program": self.entryPoint[0]}
else:
entry = {}
if self.functionList:
entry["function"] = self.functionList
if self.subroutineList:
entry["subroutine"] = self.subroutineList
# Load the functions list and Fortran ast to a single data structure
# which can be pickled and hence is portable across various scripts and
# usages.
outputDict["ast"] = ast
outputDict["functionList"] = self.functionList
outputDict["comments"] = comments
return outputDict |
python | def fac2real(pp_file=None,factors_file="factors.dat",out_file="test.ref",
upper_lim=1.0e+30,lower_lim=-1.0e+30,fill_value=1.0e+30):
"""A python replication of the PEST fac2real utility for creating a
structure grid array from previously calculated kriging factors (weights)
Parameters
----------
pp_file : (str)
PEST-type pilot points file
factors_file : (str)
PEST-style factors file
out_file : (str)
filename of array to write. If None, array is returned, else
value of out_file is returned. Default is "test.ref".
upper_lim : (float)
maximum interpolated value in the array. Values greater than
upper_lim are set to fill_value
lower_lim : (float)
minimum interpolated value in the array. Values less than lower_lim
are set to fill_value
fill_value : (float)
the value to assign array nodes that are not interpolated
Returns
-------
arr : numpy.ndarray
if out_file is None
out_file : str
if out_file it not None
Example
-------
``>>>import pyemu``
``>>>pyemu.utils.geostats.fac2real("hkpp.dat",out_file="hk_layer_1.ref")``
"""
if pp_file is not None and isinstance(pp_file,str):
assert os.path.exists(pp_file)
# pp_data = pd.read_csv(pp_file,delim_whitespace=True,header=None,
# names=["name","parval1"],usecols=[0,4])
pp_data = pp_file_to_dataframe(pp_file)
pp_data.loc[:,"name"] = pp_data.name.apply(lambda x: x.lower())
elif pp_file is not None and isinstance(pp_file,pd.DataFrame):
assert "name" in pp_file.columns
assert "parval1" in pp_file.columns
pp_data = pp_file
else:
raise Exception("unrecognized pp_file arg: must be str or pandas.DataFrame, not {0}"\
.format(type(pp_file)))
assert os.path.exists(factors_file)
f_fac = open(factors_file,'r')
fpp_file = f_fac.readline()
if pp_file is None and pp_data is None:
pp_data = pp_file_to_dataframe(fpp_file)
pp_data.loc[:, "name"] = pp_data.name.apply(lambda x: x.lower())
fzone_file = f_fac.readline()
ncol,nrow = [int(i) for i in f_fac.readline().strip().split()]
npp = int(f_fac.readline().strip())
pp_names = [f_fac.readline().strip().lower() for _ in range(npp)]
# check that pp_names is sync'd with pp_data
diff = set(list(pp_data.name)).symmetric_difference(set(pp_names))
if len(diff) > 0:
raise Exception("the following pilot point names are not common " +\
"between the factors file and the pilot points file " +\
','.join(list(diff)))
arr = np.zeros((nrow,ncol),dtype=np.float) + fill_value
pp_dict = {int(name):val for name,val in zip(pp_data.index,pp_data.parval1)}
try:
pp_dict_log = {name:np.log10(val) for name,val in zip(pp_data.index,pp_data.parval1)}
except:
pp_dict_log = {}
#for i in range(nrow):
# for j in range(ncol):
while True:
line = f_fac.readline()
if len(line) == 0:
#raise Exception("unexpected EOF in factors file")
break
try:
inode,itrans,fac_data = parse_factor_line(line)
except Exception as e:
raise Exception("error parsing factor line {0}:{1}".format(line,str(e)))
#fac_prods = [pp_data.loc[pp,"value"]*fac_data[pp] for pp in fac_data]
if itrans == 0:
fac_sum = sum([pp_dict[pp] * fac_data[pp] for pp in fac_data])
else:
fac_sum = sum([pp_dict_log[pp] * fac_data[pp] for pp in fac_data])
if itrans != 0:
fac_sum = 10**fac_sum
#col = ((inode - 1) // nrow) + 1
#row = inode - ((col - 1) * nrow)
row = ((inode-1) // ncol) + 1
col = inode - ((row - 1) * ncol)
#arr[row-1,col-1] = np.sum(np.array(fac_prods))
arr[row - 1, col - 1] = fac_sum
arr[arr<lower_lim] = lower_lim
arr[arr>upper_lim] = upper_lim
#print(out_file,arr.min(),pp_data.parval1.min(),lower_lim)
if out_file is not None:
np.savetxt(out_file,arr,fmt="%15.6E",delimiter='')
return out_file
return arr |
java | private String doKerberosAuth(HttpServletRequest request)
throws HttpAuthenticationException {
// Try authenticating with the http/_HOST principal
if (httpUGI != null) {
try {
return httpUGI.doAs(new HttpKerberosServerAction(request, httpUGI));
} catch (Exception e) {
LOG.info("Failed to authenticate with http/_HOST kerberos principal, " +
"trying with hive/_HOST kerberos principal");
}
}
// Now try with hive/_HOST principal
try {
return serviceUGI.doAs(new HttpKerberosServerAction(request, serviceUGI));
} catch (Exception e) {
LOG.error("Failed to authenticate with hive/_HOST kerberos principal");
throw new HttpAuthenticationException(e);
}
} |
java | private static OptionalEntity<BadWord> getEntity(final CreateForm form, final String username, final long currentTime) {
switch (form.crudMode) {
case CrudMode.CREATE:
return OptionalEntity.of(new BadWord()).map(entity -> {
entity.setCreatedBy(username);
entity.setCreatedTime(currentTime);
return entity;
});
case CrudMode.EDIT:
if (form instanceof EditForm) {
return ComponentUtil.getComponent(BadWordService.class).getBadWord(((EditForm) form).id);
}
break;
default:
break;
}
return OptionalEntity.empty();
} |
java | private boolean rule2(double sample) {
if (!hasMean()) {
return false;
}
if (sample > mean.getResult()) {
if (rule2Count > 0) {
++rule2Count;
} else {
rule2Count = 1;
}
} else {
if (rule2Count < 0) {
--rule2Count;
} else {
rule2Count = -1;
}
}
return Math.abs(rule2Count) >= 9;
} |
python | def create_html_select(
options,
name=None,
selected=None,
disabled=None,
multiple=False,
attrs=None,
**other_attrs):
"""
Create an HTML select box.
>>> print create_html_select(["foo", "bar"], selected="bar", name="baz")
<select name="baz">
<option selected="selected" value="bar">
bar
</option>
<option value="foo">
foo
</option>
</select>
>>> print create_html_select([("foo", "oof"), ("bar", "rab")], selected="bar", name="baz")
<select name="baz">
<option value="foo">
oof
</option>
<option selected="selected" value="bar">
rab
</option>
</select>
@param options: this can either be a sequence of strings, or a sequence
of couples or a map of C{key->value}. In the former case, the C{select}
tag will contain a list of C{option} tags (in alphabetical order),
where the C{value} attribute is not specified. In the latter case,
the C{value} attribute will be set to the C{key}, while the body
of the C{option} will be set to C{value}.
@type options: sequence or map
@param name: the name of the form element.
@type name: string
@param selected: optional key(s)/value(s) to select by default. In case
a map has been used for options.
@type selected: string (or list of string)
@param disabled: optional key(s)/value(s) to disable.
@type disabled: string (or list of string)
@param multiple: whether a multiple select box must be created.
@type mutable: bool
@param attrs: optional attributes to create the select tag.
@type attrs: dict
@param other_attrs: other optional attributes.
@return: the HTML output.
@rtype: string
@note: the values and keys will be escaped for HTML.
@note: it is important that parameter C{value} is always
specified, in case some browser plugin play with the
markup, for eg. when translating the page.
"""
body = []
if selected is None:
selected = []
elif isinstance(selected, (str, unicode)):
selected = [selected]
if disabled is None:
disabled = []
elif isinstance(disabled, (str, unicode)):
disabled = [disabled]
if name is not None and multiple and not name.endswith('[]'):
name += "[]"
if isinstance(options, dict):
items = options.items()
items.sort(lambda item1, item2: cmp(item1[1], item2[1]))
elif isinstance(options, (list, tuple)):
options = list(options)
items = []
for item in options:
if isinstance(item, (str, unicode)):
items.append((item, item))
elif isinstance(item, (tuple, list)) and len(item) == 2:
items.append(tuple(item))
else:
raise ValueError(
'Item "%s" of incompatible type: %s' % (item, type(item)))
else:
raise ValueError('Options of incompatible type: %s' % type(options))
for key, value in items:
option_attrs = {}
if key in selected:
option_attrs['selected'] = 'selected'
if key in disabled:
option_attrs['disabled'] = 'disabled'
body.append(
create_tag(
"option",
body=value,
escape_body=True,
value=key,
attrs=option_attrs))
if attrs is None:
attrs = {}
if name is not None:
attrs['name'] = name
if multiple:
attrs['multiple'] = 'multiple'
return create_tag(
"select",
body='\n'.join(body),
attrs=attrs,
**other_attrs) |
java | public static boolean toBoolean(final String str, final String trueString, final String falseString) {
if (str == trueString) {
return true;
} else if (str == falseString) {
return false;
} else if (str != null) {
if (str.equals(trueString)) {
return true;
} else if (str.equals(falseString)) {
return false;
}
}
// no match
throw new IllegalArgumentException("The String did not match either specified value");
} |
java | public void setGlobalScopes(Iterable<GlobalScope> globalScopes) {
this.globalScopes = Lists.newArrayList(Iterables.concat(globalScopes, serviceGlobalScopes));
} |
python | def import_rsakey_from_private_pem(pem, scheme='rsassa-pss-sha256', password=None):
"""
<Purpose>
Import the private RSA key stored in 'pem', and generate its public key
(which will also be included in the returned rsakey object). In addition,
a keyid identifier for the RSA key is generated. The object returned
conforms to 'securesystemslib.formats.RSAKEY_SCHEMA' and has the form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': keyid,
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The private key is a string in PEM format.
>>> rsa_key = generate_rsa_key()
>>> scheme = rsa_key['scheme']
>>> private = rsa_key['keyval']['private']
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> rsa_key2 = import_rsakey_from_private_pem(encrypted_pem, scheme, passphrase)
>>> securesystemslib.formats.RSAKEY_SCHEMA.matches(rsa_key)
True
>>> securesystemslib.formats.RSAKEY_SCHEMA.matches(rsa_key2)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an rsakey object.
scheme:
The signature scheme used by the imported key.
password: (optional)
The password, or passphrase, to decrypt the private part of the RSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'pem' specifies
an unsupported key type.
<Side Effects>
None.
<Returns>
A dictionary containing the RSA keys and other identifying information.
Conforms to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'pem' have the correct format?
# This check will ensure 'pem' conforms to
# 'securesystemslib.formats.PEMRSA_SCHEMA'.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(pem)
# Is 'scheme' properly formatted?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
else:
logger.debug('The password/passphrase is unset. The PEM is expected'
' to be unencrypted.')
# Begin building the RSA key dictionary.
rsakey_dict = {}
keytype = 'rsa'
public = None
private = None
# Generate the public and private RSA keys. The pyca/cryptography library
# performs the actual crypto operations.
public, private = \
securesystemslib.pyca_crypto_keys.create_rsa_public_and_private_from_pem(
pem, password)
public = extract_pem(public, private_pem=False)
private = extract_pem(private, private_pem=True)
# Generate the keyid of the RSA key. 'key_value' corresponds to the
# 'keyval' entry of the 'RSAKEY_SCHEMA' dictionary. The private key
# information is not included in the generation of the 'keyid' identifier.
# Convert any '\r\n' (e.g., Windows) newline characters to '\n' so that a
# consistent keyid is generated.
key_value = {'public': public.replace('\r\n', '\n'),
'private': ''}
keyid = _get_keyid(keytype, scheme, key_value)
# Build the 'rsakey_dict' dictionary. Update 'key_value' with the RSA
# private key prior to adding 'key_value' to 'rsakey_dict'.
key_value['private'] = private
rsakey_dict['keytype'] = keytype
rsakey_dict['scheme'] = scheme
rsakey_dict['keyid'] = keyid
rsakey_dict['keyval'] = key_value
return rsakey_dict |
java | void writeObjectStart() throws IOException {
output.append('{');
currentIndent = currentIndent + indent;
commaDepth++;
commaState.set(commaDepth, false);
} |
java | public ServiceCall<Environment> createEnvironment(CreateEnvironmentOptions createEnvironmentOptions) {
Validator.notNull(createEnvironmentOptions, "createEnvironmentOptions cannot be null");
String[] pathSegments = { "v1/environments" };
RequestBuilder builder = RequestBuilder.post(RequestBuilder.constructHttpUrl(getEndPoint(), pathSegments));
builder.query("version", versionDate);
Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v1", "createEnvironment");
for (Entry<String, String> header : sdkHeaders.entrySet()) {
builder.header(header.getKey(), header.getValue());
}
builder.header("Accept", "application/json");
final JsonObject contentJson = new JsonObject();
contentJson.addProperty("name", createEnvironmentOptions.name());
if (createEnvironmentOptions.description() != null) {
contentJson.addProperty("description", createEnvironmentOptions.description());
}
if (createEnvironmentOptions.size() != null) {
contentJson.addProperty("size", createEnvironmentOptions.size());
}
builder.bodyJson(contentJson);
return createServiceCall(builder.build(), ResponseConverterUtils.getObject(Environment.class));
} |
python | def api(server, command, *args, **kwargs):
'''
Call the Spacewalk xmlrpc api.
CLI Example:
.. code-block:: bash
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create MyGroup Description
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create arguments='["MyGroup", "Description"]'
State Example:
.. code-block:: yaml
create_group:
salt.runner:
- name: spacewalk.api
- server: spacewalk01.domain.com
- command: systemgroup.create
- arguments:
- MyGroup
- Description
'''
if 'arguments' in kwargs:
arguments = kwargs['arguments']
else:
arguments = args
call = '{0} {1}'.format(command, arguments)
try:
client, key = _get_session(server)
except Exception as exc:
err_msg = 'Exception raised when connecting to spacewalk server ({0}): {1}'.format(server, exc)
log.error(err_msg)
return {call: err_msg}
namespace, method = command.split('.')
endpoint = getattr(getattr(client, namespace), method)
try:
output = endpoint(key, *arguments)
except Exception as e:
output = 'API call failed: {0}'.format(e)
return {call: output} |
java | public final void synpred38_DRL5Expressions_fragment() throws RecognitionException {
// src/main/resources/org/drools/compiler/lang/DRL5Expressions.g:654:9: ( DOT ID )
// src/main/resources/org/drools/compiler/lang/DRL5Expressions.g:654:10: DOT ID
{
match(input,DOT,FOLLOW_DOT_in_synpred38_DRL5Expressions3848); if (state.failed) return;
match(input,ID,FOLLOW_ID_in_synpred38_DRL5Expressions3850); if (state.failed) return;
}
} |
python | def dumps(value,encoding=None):
"""dumps(object,encoding=None) -> string
This function dumps a python object as a tnetstring.
"""
# This uses a deque to collect output fragments in reverse order,
# then joins them together at the end. It's measurably faster
# than creating all the intermediate strings.
# If you're reading this to get a handle on the tnetstring format,
# consider the _gdumps() function instead; it's a standard top-down
# generator that's simpler to understand but much less efficient.
q = deque()
_rdumpq(q,0,value,encoding)
return "".join(q) |
java | private void waitForCallback(final String uuid, final long timeout) throws TimeoutException {
synchronized (lock) {
this.message = null;
this.hasTimeout = true;
try {
lock.wait(timeout);
} catch (InterruptedException iex) {
logger.warn("", iex);
}
// remove timed out callback
if (this.hasTimeout) {
peer.unregisterCallback(uuid);
throw new TimeoutException();
}
}
} |
java | public <R> Plan0<R> then(Func7<T1, T2, T3, T4, T5, T6, T7, R> selector) {
if (selector == null) {
throw new NullPointerException();
}
return new Plan7<T1, T2, T3, T4, T5, T6, T7, R>(this, selector);
} |
java | private boolean hasOutputChanged(ChannelHandlerContext ctx, boolean first) {
if (observeOutput) {
// We can take this shortcut if the ChannelPromises that got passed into write()
// appear to complete. It indicates "change" on message level and we simply assume
// that there's change happening on byte level. If the user doesn't observe channel
// writability events then they'll eventually OOME and there's clearly a different
// problem and idleness is least of their concerns.
if (lastChangeCheckTimeStamp != lastWriteTime) {
lastChangeCheckTimeStamp = lastWriteTime;
// But this applies only if it's the non-first call.
if (!first) {
return true;
}
}
Channel channel = ctx.channel();
Unsafe unsafe = channel.unsafe();
ChannelOutboundBuffer buf = unsafe.outboundBuffer();
if (buf != null) {
int messageHashCode = System.identityHashCode(buf.current());
long pendingWriteBytes = buf.totalPendingWriteBytes();
if (messageHashCode != lastMessageHashCode || pendingWriteBytes != lastPendingWriteBytes) {
lastMessageHashCode = messageHashCode;
lastPendingWriteBytes = pendingWriteBytes;
if (!first) {
return true;
}
}
long flushProgress = buf.currentProgress();
if (flushProgress != lastFlushProgress) {
lastFlushProgress = flushProgress;
if (!first) {
return true;
}
}
}
}
return false;
} |
python | def interpolate(self, factor, minFont, maxFont,
round=True, suppressError=True):
"""
Interpolate all possible data in the font.
>>> font.interpolate(0.5, otherFont1, otherFont2)
>>> font.interpolate((0.5, 2.0), otherFont1, otherFont2, round=False)
The interpolation occurs on a 0 to 1.0 range where **minFont**
is located at 0 and **maxFont** is located at 1.0. **factor**
is the interpolation value. It may be less than 0 and greater
than 1.0. It may be a :ref:`type-int-float` or a tuple of
two :ref:`type-int-float`. If it is a tuple, the first
number indicates the x factor and the second number indicates
the y factor. **round** indicates if the result should be
rounded to integers. **suppressError** indicates if incompatible
data should be ignored or if an error should be raised when
such incompatibilities are found.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minFont, BaseFont):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, minFont.__class__.__name__))
if not isinstance(maxFont, BaseFont):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, maxFont.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minFont, maxFont,
round=round, suppressError=suppressError) |
java | public static <T extends Number & Comparable<?>> NumberOperation<T> numberOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new NumberOperation<T>(type, operator, args);
} |
python | def export_polydata_str(obj, **kwargs):
""" Saves control points or evaluated points in VTK Polydata format (string).
Please see the following document for details: http://www.vtk.org/VTK/img/file-formats.pdf
Keyword Arguments:
* ``point_type``: **ctrlpts** for control points or **evalpts** for evaluated points
* ``tessellate``: tessellates the points (works only for surfaces)
:param obj: geometry object
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:return: contents of the VTK file
:rtype: str
:raises GeomdlException: point type is not supported
:raises UserWarning: file title is bigger than 256 characters
"""
# Get keyword arguments
point_type = kwargs.get('point_type', "evalpts")
file_title = kwargs.get('title', "geomdl " + repr(obj)) # file title
tessellate = kwargs.get('tessellate', False)
# Input validation
possible_types = ['ctrlpts', 'evalpts']
if point_type not in possible_types:
raise exch.GeomdlException("Please choose a valid point type option. " +
"Possible types:", ", ".join([str(t) for t in possible_types]))
# Check for VTK standards for the file title
if len(file_title) >= 256:
file_title = file_title[0:255] # slice the array into 255 characters, we will add new line character later
warnings.warn("VTK standard restricts the file title with 256 characters. New file title is:", file_title)
# Find number of edges in a single tessellated structure
tsl_dim = 4 if point_type == "ctrlpts" else 3
# Initialize lists
str_p = ""
str_v = ""
str_f = ""
# Count number of vertices and faces
v_offset = 0
f_offset = 0
# Loop through all geometry objects
for o in obj:
# Prepare data array
if point_type == "ctrlpts":
if tessellate and o.pdimension == 2:
tsl = abstract.tessellate.QuadTessellate()
tsl.tessellate(o.ctrlpts, size_u=o.ctrlpts_size_u, size_v=o.ctrlpts_size_v)
data_array = ([v.data for v in tsl.vertices], [q.data for q in tsl.faces])
else:
data_array = (o.ctrlpts, [])
elif point_type == "evalpts":
if tessellate and o.pdimension == 2:
o.tessellate()
data_array = ([v.data for v in o.vertices], [t.data for t in o.faces])
else:
data_array = (o.evalpts, [])
else:
data_array = ([], [])
# Prepare point and vertex data
for ipt, pt in enumerate(data_array[0]):
str_p += " ".join(str(c) for c in pt) + "\n"
str_v += "1 " + str(ipt + v_offset) + "\n"
# Prepare polygon data
if data_array[1]:
for pt in data_array[1]:
str_f += str(tsl_dim) + " " + " ".join(str(c + v_offset) for c in pt) + "\n"
# Update face offset
f_offset += len(data_array[1])
# Update vertex offset
v_offset += len(data_array[0])
# Start generating the file content
line = "# vtk DataFile Version 3.0\n"
line += file_title + "\n"
line += "ASCII\n"
# Define geometry/topology
line += "DATASET POLYDATA\n"
# Add point data to the file
line += "POINTS " + str(v_offset) + " FLOAT\n"
line += str_p
# Add vertex data to the file
line += "VERTICES " + str(v_offset) + " " + str(2 * v_offset) + "\n"
line += str_v
# Add polygon data to the file
if tessellate:
line += "POLYGONS " + str(f_offset) + " " + str((tsl_dim + 1) * f_offset) + "\n"
line += str_f
# Add dataset attributes to the file
line += "POINT_DATA " + str(v_offset) + "\n"
if f_offset > 0:
line += "CELL_DATA " + str(f_offset) + "\n"
# Return generated file content
return line |
python | def create_storage_policy(policy_name, policy_dict, service_instance=None):
'''
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
'''
log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
# Hardcode the storage profile resource type
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
# Set name argument
policy_dict['name'] = policy_name
log.trace('Setting policy values in policy_update_spec')
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {'create_storage_policy': True} |
java | @Override
public ChronoZonedDateTime<D> plus(long amountToAdd, TemporalUnit unit) {
if (unit instanceof ChronoUnit) {
return with(dateTime.plus(amountToAdd, unit));
}
return ChronoZonedDateTimeImpl.ensureValid(getChronology(), unit.addTo(this, amountToAdd)); /// TODO: Generics replacement Risk!
} |
java | protected long getDelta (long timeStamp, long maxValue)
{
boolean even = (evenBase > oddBase);
long base = even ? evenBase : oddBase;
long delta = timeStamp - base;
// make sure this timestamp is not sufficiently old that we can't
// generate a delta time with it
if (delta < 0) {
String errmsg = "Time stamp too old for conversion to delta time";
throw new IllegalArgumentException(errmsg);
}
// see if it's time to swap
if (delta > maxValue) {
if (even) {
setOddBase(timeStamp);
} else {
setEvenBase(timeStamp);
}
delta = 0;
}
// if we're odd, we need to mark the value as such
if (!even) {
delta = (-1 - delta);
}
return delta;
} |
java | public static java.util.List<com.liferay.commerce.product.model.CPOption> getCPOptions(
int start, int end) {
return getService().getCPOptions(start, end);
} |
java | public UnicodeSet retainAll(UnicodeSet c) {
checkFrozen();
retain(c.list, c.len, 0);
strings.retainAll(c.strings);
return this;
} |
java | private LinkedHashMap<String, String> getSortList(boolean includeType) {
LinkedHashMap<String, String> list = new LinkedHashMap<String, String>();
list.put(SortParams.title_asc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_TITLE_ASC_0));
list.put(SortParams.title_desc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_TITLE_DECS_0));
list.put(
SortParams.dateLastModified_asc.name(),
Messages.get().key(Messages.GUI_SORT_LABEL_DATELASTMODIFIED_ASC_0));
list.put(
SortParams.dateLastModified_desc.name(),
Messages.get().key(Messages.GUI_SORT_LABEL_DATELASTMODIFIED_DESC_0));
list.put(SortParams.path_asc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_PATH_ASC_0));
list.put(SortParams.path_desc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_PATH_DESC_0));
if (includeType) {
list.put(SortParams.type_asc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_TYPE_ASC_0));
list.put(SortParams.type_desc.name(), Messages.get().key(Messages.GUI_SORT_LABEL_TYPE_DESC_0));
}
return list;
} |
python | def is_sock_ok(self, timeout_select):
"""check if socket is OK"""
self._socket_lock.acquire()
try:
ret = self._is_socket_ok(timeout_select)
finally:
self._socket_lock.release()
return ret |
python | def _list_audio_files(self, root, skip_rows=0):
"""Populates synsets - a map of index to label for the data items.
Populates the data in the dataset, making tuples of (data, label)
"""
self.synsets = []
self.items = []
if not self._train_csv:
# The audio files are organized in folder structure with
# directory name as label and audios in them
self._folder_structure(root)
else:
# train_csv contains mapping between filename and label
self._csv_labelled_dataset(root, skip_rows=skip_rows)
# Generating the synset.txt file now
if not os.path.exists("./synset.txt"):
with open("./synset.txt", "w") as synsets_file:
for item in self.synsets:
synsets_file.write(item+os.linesep)
print("Synsets is generated as synset.txt")
else:
warnings.warn("Synset file already exists in the current directory! Not generating synset.txt.") |
java | public PreparedStatement prepareStatement(final String sql) throws SQLException {
return internalPrepareStatement(sql,
ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY,
Statement.NO_GENERATED_KEYS);
} |
java | public static void unescapeJava(final String text, final Writer writer)
throws IOException {
if (writer == null) {
throw new IllegalArgumentException("Argument 'writer' cannot be null");
}
if (text == null) {
return;
}
if (text.indexOf('\\') < 0) {
// Fail fast, avoid more complex (and less JIT-table) method to execute if not needed
writer.write(text);
return;
}
JavaEscapeUtil.unescape(new InternalStringReader(text), writer);
} |
python | def _request(self, url, method='GET', params=None, api_call=None, json_encoded=False):
"""Internal request method"""
method = method.lower()
params = params or {}
func = getattr(self.client, method)
if isinstance(params, dict) and json_encoded is False:
params, files = _transparent_params(params)
else:
params = params
files = list()
requests_args = {}
for k, v in self.client_args.items():
# Maybe this should be set as a class variable and only done once?
if k in ('timeout', 'allow_redirects', 'stream', 'verify'):
requests_args[k] = v
if method == 'get' or method == 'delete':
requests_args['params'] = params
else:
# Check for json_encoded so we will sent params as "data" or "json"
if json_encoded:
data_key = 'json'
else:
data_key = 'data'
requests_args.update({
data_key: params,
'files': files,
})
try:
response = func(url, **requests_args)
except requests.RequestException as e:
raise TwythonError(str(e))
# create stash for last function intel
self._last_call = {
'api_call': api_call,
'api_error': None,
'cookies': response.cookies,
'headers': response.headers,
'status_code': response.status_code,
'url': response.url,
'content': response.text,
}
# greater than 304 (not modified) is an error
if response.status_code > 304:
error_message = self._get_error_message(response)
self._last_call['api_error'] = error_message
ExceptionType = TwythonError
if response.status_code == 429:
# Twitter API 1.1, always return 429 when
# rate limit is exceeded
ExceptionType = TwythonRateLimitError
elif response.status_code == 401 or 'Bad Authentication data' \
in error_message:
# Twitter API 1.1, returns a 401 Unauthorized or
# a 400 "Bad Authentication data" for invalid/expired
# app keys/user tokens
ExceptionType = TwythonAuthError
raise ExceptionType(
error_message,
error_code=response.status_code,
retry_after=response.headers.get('X-Rate-Limit-Reset'))
content = ''
try:
if response.status_code == 204:
content = response.content
else:
content = response.json()
except ValueError:
if response.content != '':
raise TwythonError('Response was not valid JSON. \
Unable to decode.')
return content |
python | def _get_module_path(name, fail=False, install_path=None):
""" Find the path to the jpy jni modules. """
import imp
module = imp.find_module(name)
if not module and fail:
raise RuntimeError("can't find module '" + name + "'")
path = module[1]
if not path and fail:
raise RuntimeError("module '" + name + "' is missing a file path")
if install_path:
return os.path.join(install_path, os.path.split(path)[1])
return path |
python | def next(self):
"""
Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches.
"""
try:
event = self.wait()
except Exception:
self.stop()
raise
try:
self.dispatch()
finally:
self.cont() |
java | public static boolean cholL( DMatrix5x5 A ) {
A.a11 = Math.sqrt(A.a11);
A.a12 = 0;
A.a13 = 0;
A.a14 = 0;
A.a15 = 0;
A.a21 = (A.a21)/A.a11;
A.a22 = Math.sqrt(A.a22-A.a21*A.a21);
A.a23 = 0;
A.a24 = 0;
A.a25 = 0;
A.a31 = (A.a31)/A.a11;
A.a32 = (A.a32-A.a31*A.a21)/A.a22;
A.a33 = Math.sqrt(A.a33-A.a31*A.a31-A.a32*A.a32);
A.a34 = 0;
A.a35 = 0;
A.a41 = (A.a41)/A.a11;
A.a42 = (A.a42-A.a41*A.a21)/A.a22;
A.a43 = (A.a43-A.a41*A.a31-A.a42*A.a32)/A.a33;
A.a44 = Math.sqrt(A.a44-A.a41*A.a41-A.a42*A.a42-A.a43*A.a43);
A.a45 = 0;
A.a51 = (A.a51)/A.a11;
A.a52 = (A.a52-A.a51*A.a21)/A.a22;
A.a53 = (A.a53-A.a51*A.a31-A.a52*A.a32)/A.a33;
A.a54 = (A.a54-A.a51*A.a41-A.a52*A.a42-A.a53*A.a43)/A.a44;
A.a55 = Math.sqrt(A.a55-A.a51*A.a51-A.a52*A.a52-A.a53*A.a53-A.a54*A.a54);
return !UtilEjml.isUncountable(A.a55);
} |
java | public List<DbHistory> getPipePropertyHistory(String pipeName,
String propertyName) throws DevFailed {
return database.getClassPipePropertyHistory(className, pipeName, propertyName);
} |
java | public static int decodeInteger(ByteBuffer buf) {
DerId id = DerId.decode(buf);
if (!id.matches(DerId.TagClass.UNIVERSAL, DerId.EncodingType.PRIMITIVE, ASN1_INTEGER_TAG_NUM)) {
throw new IllegalArgumentException("Expected INTEGER identifier, received " + id);
}
int len = DerUtils.decodeLength(buf);
if (buf.remaining() < len) {
throw new IllegalArgumentException("Insufficient content for INTEGER");
}
int value = 0;
for (int i = 0; i < len; i++) {
value = (value << 8) + (0xff & buf.get());
}
return value;
} |
python | def import_lookup_class(lookup_class):
"""
Import lookup_class as a dotted base and ensure it extends LookupBase
"""
from selectable.base import LookupBase
if isinstance(lookup_class, string_types):
mod_str, cls_str = lookup_class.rsplit('.', 1)
mod = import_module(mod_str)
lookup_class = getattr(mod, cls_str)
if not issubclass(lookup_class, LookupBase):
raise TypeError('lookup_class must extend from selectable.base.LookupBase')
return lookup_class |
python | def red(numbers):
"""Encode the deltas to reduce entropy."""
line = 0
deltas = []
for value in numbers:
deltas.append(value - line)
line = value
return b64encode(compress(b''.join(chr(i).encode('latin1') for i in deltas))).decode('latin1') |
python | def install(cls, uninstallable, prefix, path_items, root=None, warning=None):
"""Install an importer for modules found under ``path_items`` at the given import ``prefix``.
:param bool uninstallable: ``True`` if the installed importer should be uninstalled and any
imports it performed be un-imported when ``uninstall`` is called.
:param str prefix: The import prefix the installed importer will be responsible for.
:param path_items: The paths relative to ``root`` containing modules to expose for import under
``prefix``.
:param str root: The root path of the distribution containing the vendored code. NB: This is the
the path to the pex code, which serves as the root under which code is vendored
at ``pex/vendor/_vendored``.
:param str warning: An optional warning to emit if any imports are made through the installed
importer.
:return:
"""
root = cls._abs_root(root)
importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix))
vendor_importer = cls(root=root,
importables=importables,
uninstallable=uninstallable,
warning=warning)
sys.meta_path.insert(0, vendor_importer)
_tracer().log('Installed {}'.format(vendor_importer), V=3)
return vendor_importer |
python | def add_checkpoint(html_note, counter):
"""Recursively adds checkpoints to html tree.
"""
if html_note.text:
html_note.text = (html_note.text + CHECKPOINT_PREFIX +
str(counter) + CHECKPOINT_SUFFIX)
else:
html_note.text = (CHECKPOINT_PREFIX + str(counter) +
CHECKPOINT_SUFFIX)
counter += 1
for child in html_note.iterchildren():
counter = add_checkpoint(child, counter)
if html_note.tail:
html_note.tail = (html_note.tail + CHECKPOINT_PREFIX +
str(counter) + CHECKPOINT_SUFFIX)
else:
html_note.tail = (CHECKPOINT_PREFIX + str(counter) +
CHECKPOINT_SUFFIX)
counter += 1
return counter |
Subsets and Splits