language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | private static Type[] getImplicitBounds(final TypeVariable<?> typeVariable) {
Assert.requireNonNull(typeVariable, "typeVariable");
final Type[] bounds = typeVariable.getBounds();
return bounds.length == 0 ? new Type[]{Object.class} : normalizeUpperBounds(bounds);
} |
java | public static JsonValue parse(Reader reader) throws IOException {
if (reader == null) {
throw new NullPointerException(READER_IS_NULL);
}
DefaultHandler handler = new DefaultHandler();
new JsonParser(handler).parse(reader);
return handler.getValue();
} |
python | def get_kinto_records(kinto_client, bucket, collection, permissions,
config=None):
"""Return all the kinto records for this bucket/collection."""
# Create bucket if needed
try:
kinto_client.create_bucket(id=bucket, if_not_exists=True)
except KintoException as e:
if hasattr(e, 'response') and e.response.status_code == 403:
# The user cannot create buckets on this server, ignore the creation.
pass
try:
kinto_client.create_collection(id=collection, bucket=bucket,
permissions=permissions, if_not_exists=True)
except KintoException as e:
if hasattr(e, 'response') and e.response.status_code == 403:
# The user cannot create collection on this bucket, ignore the creation.
pass
return kinto_client.get_records(bucket=bucket, collection=collection) |
java | @Override
public void registerStatsStorageListener(StatsStorageListener listener) {
if (!this.listeners.contains(listener)) {
this.listeners.add(listener);
}
} |
java | @NotNull public static <T> Observable<Response<T>> from(@NotNull final ApolloCall<T> call) {
checkNotNull(call, "call == null");
return Observable.create(new ObservableOnSubscribe<Response<T>>() {
@Override public void subscribe(final ObservableEmitter<Response<T>> emitter) throws Exception {
cancelOnObservableDisposed(emitter, call);
call.enqueue(new ApolloCall.Callback<T>() {
@Override public void onResponse(@NotNull Response<T> response) {
if (!emitter.isDisposed()) {
emitter.onNext(response);
}
}
@Override public void onFailure(@NotNull ApolloException e) {
Exceptions.throwIfFatal(e);
if (!emitter.isDisposed()) {
emitter.onError(e);
}
}
@Override public void onStatusEvent(@NotNull ApolloCall.StatusEvent event) {
if (event == ApolloCall.StatusEvent.COMPLETED && !emitter.isDisposed()) {
emitter.onComplete();
}
}
});
}
});
} |
python | def get(self):
"""Get options."""
opts = current_app.config['RECORDS_REST_SORT_OPTIONS'].get(
self.search_index)
sort_fields = []
if opts:
for key, item in sorted(opts.items(), key=lambda x: x[1]['order']):
sort_fields.append(
{key: dict(
title=item['title'],
default_order=item.get('default_order', 'asc'))}
)
return jsonify(dict(
sort_fields=sort_fields,
max_result_window=self.max_result_window,
default_media_type=self.default_media_type,
search_media_types=sorted(self.search_media_types),
item_media_types=sorted(self.item_media_types),
)) |
java | public boolean exitsMongoDbDataBase(String dataBaseName) {
List<String> dataBaseList = mongoClient.getDatabaseNames();
return dataBaseList.contains(dataBaseName);
} |
python | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._memcache.decr(self._prefix + key, value) |
java | private void provideDefaultReturnType() {
if (contents.getSourceNode() != null && contents.getSourceNode().isAsyncGeneratorFunction()) {
// Set the return type of a generator function to:
// @return {!AsyncGenerator<?>}
ObjectType generatorType = typeRegistry.getNativeObjectType(ASYNC_GENERATOR_TYPE);
returnType =
typeRegistry.createTemplatizedType(
generatorType, typeRegistry.getNativeType(UNKNOWN_TYPE));
return;
} else if (contents.getSourceNode() != null && contents.getSourceNode().isGeneratorFunction()) {
// Set the return type of a generator function to:
// @return {!Generator<?>}
ObjectType generatorType = typeRegistry.getNativeObjectType(GENERATOR_TYPE);
returnType =
typeRegistry.createTemplatizedType(
generatorType, typeRegistry.getNativeType(UNKNOWN_TYPE));
return;
}
JSType inferredReturnType = typeRegistry.getNativeType(UNKNOWN_TYPE);
if (!contents.mayHaveNonEmptyReturns()
&& !contents.mayHaveSingleThrow()
&& !contents.mayBeFromExterns()) {
// Infer return types for non-generator functions.
// We need to be extremely conservative about this, because of two
// competing needs.
// 1) If we infer the return type of f too widely, then we won't be able
// to assign f to other functions.
// 2) If we infer the return type of f too narrowly, then we won't be
// able to override f in subclasses.
// So we only infer in cases where the user doesn't expect to write
// @return annotations--when it's very obvious that the function returns
// nothing.
inferredReturnType = typeRegistry.getNativeType(VOID_TYPE);
returnTypeInferred = true;
}
if (contents.getSourceNode() != null && contents.getSourceNode().isAsyncFunction()) {
// Set the return type of an async function:
// @return {!Promise<?>} or @return {!Promise<undefined>}
ObjectType promiseType = typeRegistry.getNativeObjectType(PROMISE_TYPE);
returnType = typeRegistry.createTemplatizedType(promiseType, inferredReturnType);
} else {
returnType = inferredReturnType;
}
} |
java | public IndexBrowser browse(Query params) throws AlgoliaException {
return new IndexBrowser(client, encodedIndexName, params, null, RequestOptions.empty);
} |
java | @Override
public Request<CreateTransitGatewayVpcAttachmentRequest> getDryRunRequest() {
Request<CreateTransitGatewayVpcAttachmentRequest> request = new CreateTransitGatewayVpcAttachmentRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
} |
java | public static systemcore[] get(nitro_service service, systemcore_args args) throws Exception{
systemcore obj = new systemcore();
options option = new options();
option.set_args(nitro_util.object_to_string_withoutquotes(args));
systemcore[] response = (systemcore[])obj.get_resources(service, option);
return response;
} |
python | def serialize_smarttag(ctx, document, el, root):
"Serializes smarttag."
if ctx.options['smarttag_span']:
_span = etree.SubElement(root, 'span', {'class': 'smarttag', 'data-smarttag-element': el.element})
else:
_span = root
for elem in el.elements:
_ser = ctx.get_serializer(elem)
if _ser:
_td = _ser(ctx, document, elem, _span)
else:
if isinstance(elem, doc.Text):
children = list(_span)
if len(children) == 0:
_text = _span.text or u''
_span.text = u'{}{}'.format(_text, elem.text)
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, elem.text)
fire_hooks(ctx, document, el, _span, ctx.get_hook('smarttag'))
return root |
python | def uuid(self):
'''Universally unique identifier for an instance of a :class:`Model`.
'''
pk = self.pkvalue()
if not pk:
raise self.DoesNotExist(
'Object not saved. Cannot obtain universally unique id')
return self.get_uuid(pk) |
java | private void handleServerTextChannel(JsonNode jsonChannel) {
long channelId = jsonChannel.get("id").asLong();
api.getTextChannelById(channelId).map(c -> ((ServerTextChannelImpl) c)).ifPresent(channel -> {
String oldTopic = channel.getTopic();
String newTopic = jsonChannel.has("topic") && !jsonChannel.get("topic").isNull()
? jsonChannel.get("topic").asText() : "";
if (!oldTopic.equals(newTopic)) {
channel.setTopic(newTopic);
ServerTextChannelChangeTopicEvent event =
new ServerTextChannelChangeTopicEventImpl(channel, newTopic, oldTopic);
api.getEventDispatcher().dispatchServerTextChannelChangeTopicEvent(
(DispatchQueueSelector) channel.getServer(), channel.getServer(), channel, event);
}
boolean oldNsfwFlag = channel.isNsfw();
boolean newNsfwFlag = jsonChannel.get("nsfw").asBoolean();
if (oldNsfwFlag != newNsfwFlag) {
channel.setNsfwFlag(newNsfwFlag);
ServerChannelChangeNsfwFlagEvent event =
new ServerChannelChangeNsfwFlagEventImpl(channel, newNsfwFlag, oldNsfwFlag);
api.getEventDispatcher().dispatchServerChannelChangeNsfwFlagEvent(
(DispatchQueueSelector) channel.getServer(), null, channel.getServer(), channel, event);
}
int oldSlowmodeDelay = channel.getSlowmodeDelayInSeconds();
int newSlowmodeDelay = jsonChannel.get("rate_limit_per_user").asInt(0);
if (oldSlowmodeDelay != newSlowmodeDelay) {
channel.setSlowmodeDelayInSeconds(newSlowmodeDelay);
ServerTextChannelChangeSlowmodeEvent event =
new ServerTextChannelChangeSlowmodeEventImpl(channel, oldSlowmodeDelay, newSlowmodeDelay);
api.getEventDispatcher().dispatchServerTextChannelChangeSlowmodeEvent(
(DispatchQueueSelector) channel.getServer(), channel.getServer(), channel, event
);
}
});
} |
java | private void cacheFormatCodes()
{
CTNumFmts numFmts = stylesheet.getNumFmts();
if(numFmts != null)
{
List list = numFmts.getNumFmt();
for(int i = 0; i < list.size(); i++)
addFormatCode((CTNumFmt)list.get(i));
}
}
/**
* Creates a new workbook object.
* @param os The output stream for the workbook
* @param existing An existing workbook to add to
* @return The new workbook object
* @throws IOException if the workbook cannot be written
*/
public static XlsxWorkbook createWorkbook(OutputStream os, Workbook existing)
throws IOException
{
try
{
SpreadsheetMLPackage pkg = null;
if(existing != null)
pkg = (SpreadsheetMLPackage)((XlsxWorkbook)existing).pkg;
else
pkg = SpreadsheetMLPackage.createPackage();
return new XlsxWorkbook(pkg, os);
}
catch(Docx4JException e)
{
throw new IOException(e);
}
}
/**
* Creates a sheet in the workbook with the given name and lines of data.
* @param columns The column definitions for the worksheet
* @param lines The list of lines to be added to the worksheet
* @param sheetName The name of the worksheet to be added
* @return The worksheet created
* @throws IOException if the sheet cannot be created
*/
@Override
public XlsxWorksheet createSheet(FileColumn[] columns, List<String[]> lines, String sheetName)
throws IOException
{
if(pkg == null)
return null;
WorksheetPart wsp = null;
try
{
int sheetId = worksheets.size()+1;
PartName sheetPart = new PartName("/xl/worksheets/sheet"+sheetId+".xml");
wsp = pkg.createWorksheetPart(sheetPart, sheetName, (long)sheetId);
}
catch(InvalidFormatException e)
{
throw new IOException(e);
}
catch(javax.xml.bind.JAXBException e)
{
throw new IOException(e);
}
org.xlsx4j.sml.Worksheet sheet = (org.xlsx4j.sml.Worksheet)wsp.getJaxbElement();
SheetData sheetData = sheet.getSheetData();
// Create a default font for body and a bold font for header
if(fonts.getFont().size() == 0)
{
fonts.getFont().add(getFont("Arial", 10, 2, false)); // fontId = 0
fonts.getFont().add(getFont("Arial", 10, 2, true)); // fontId = 1
}
// Create default styles
getXfPos(getXf(0L, 0L, 0, false));
if(cellStyles.getCellStyle().size() == 0)
cellStyles.getCellStyle().add(new CTCellStyle());
if(cellStyleXfs.getXf().size() == 0)
cellStyleXfs.getXf().add(new CTXf());
cacheFormatCodes();
// Default border
if(borders.getBorder().size() == 0)
{
CTBorder border = new CTBorder();
border.setLeft(new CTBorderPr());
border.setRight(new CTBorderPr());
border.setTop(new CTBorderPr());
border.setBottom(new CTBorderPr());
border.setDiagonal(new CTBorderPr());
borders.getBorder().add(border);
}
// Default fill
if(fills.getFill().size() == 0)
{
CTFill fill = new CTFill();
CTPatternFill patternFill = new CTPatternFill();
patternFill.setPatternType(STPatternType.NONE);
fill.setPatternFill(patternFill);
fills.getFill().add(fill);
}
int[] colWidths = appendRows(sheetData, columns, lines, sheetName);
// Get the column count from the 1st row
int numColumns = 0;
List rows = sheetData.getRow();
if(rows != null && rows.size() > 0)
{
Row r = (Row)rows.get(0);
numColumns = r.getC().size();
}
// Set the column to autosize
Cols cols = new Cols();
sheet.getCols().add(cols);
for(int i = 0; i < numColumns; i++)
{
Col col = new Col();
col.setMin(i+1);
col.setMax(i+1);
if(colWidths != null && i < colWidths.length)
col.setWidth(colWidths[i]+1.0d);
cols.getCol().add(col);
}
XlsxWorksheet ret = new XlsxWorksheet(this, wsp);
worksheets.put(sheetName, ret);
return ret;
}
/**
* Adds the given lines of data to an existing sheet in the workbook.
* @param columns The column definitions for the worksheet
* @param lines The list of lines to be added to the worksheet
* @param sheetName The name of the worksheet to be added
*/
@Override
public void appendToSheet(FileColumn[] columns, List<String[]> lines, String sheetName)
{
XlsxWorksheet sheet = getSheet(sheetName);
if(sheet != null)
appendRows(sheet.getSheetData(), columns, lines, sheetName);
}
/**
* Appends the given lines to the bottom of the given sheet.
* @param sheetData The sheet to add the lines to
* @param columns The column definitions for the worksheet
* @param lines The list of lines to be added to the worksheet
* @param sheetName The name of the worksheet to be added
* @return The array of column widths following the addition
*/
private int[] appendRows(SheetData sheetData, FileColumn[] columns,
List<String[]> lines, String sheetName)
{
int[] colWidths = null;
int size = sheetData.getRow().size();
for(int i = 0; i < lines.size(); i++)
{
String[] line = lines.get(i);
if((i+size) > MAX_ROWS)
{
logger.severe("the worksheet '"+sheetName
+"' has exceeded the maximum rows and will be truncated");
break;
}
Row row = Context.getsmlObjectFactory().createRow();
if(i == 0)
colWidths = new int[line.length];
row.setR(i+size+1L);
for(int j = 0; j < line.length; j++)
{
Cell cell = Context.getsmlObjectFactory().createCell();
String data = line[j];
if(data == null)
data = "";
// Calculate the max column width for auto-fit
if(colWidths != null && j < colWidths.length)
{
int len = data.length();
int pos = data.indexOf("\n");
if(pos != -1)
len = pos;
if(len > MAX_WIDTH)
len = MAX_WIDTH;
if(len > colWidths[j])
colWidths[j] = len;
}
// Get a cell with the correct formatting
short type = FileColumn.STRING_TYPE;
short align = FileColumn.ALIGN_GENERAL;
boolean wrap = false;
long numFmtId = 0L;
String format = null;
if(columns != null && j < columns.length)
{
FileColumn column = columns[j];
setCellAttributes(cell, column, j, i+size, data, sheetName);
type = column.getType();
align = column.getAlign();
wrap = column.getWrap();
format = column.getFormat();
}
else // Try to figure out the type of the column
{
try
{
setNumberCell(cell, data);
}
catch(NumberFormatException e)
{
setSharedStringCell(cell, data, sheetName);
}
}
if(cell != null)
{
STCellType t = cell.getT();
String v = cell.getV();
if(t == STCellType.S)
{
numFmtId = 49L;
if(i > 0 && type == FileColumn.DATETIME_TYPE)
{
if(format != null && format.length() > 0)
{
numFmtId = getFormatId(format);
}
else
{
Double d = 0.0d;
if(v != null && v.length() > 0)
d = Double.parseDouble(v);
if(d > 1.0d)
numFmtId = 22L;
else
numFmtId = 20L;
}
}
}
else if(t == STCellType.N)
{
if(format != null && format.length() > 0)
numFmtId = getFormatId(format);
else
numFmtId = 1L;
}
CTXf xf = getXf((i+size) == 0 && hasHeaders() ? 1L : 0L,
numFmtId, align, wrap);
int pos = getXfPos(xf);
if(pos > 0)
cell.setS((long)pos);
cell.setR(getColumn(j)+Integer.toString(i+1+size));
row.getC().add(cell);
}
}
sheetData.getRow().add(row);
}
return colWidths;
}
/**
* Returns the column reference from the column number.
* <P>
* eg. 0=A, 1=B, etc
* @param i The column number
* @return The column reference from the column number
*/
private String getColumn(int i)
{
int mult = i/26;
int rem = i%26;
StringBuffer buff = new StringBuffer();
if(mult > 0)
buff.append((char)('@'+mult));
buff.append((char)('A'+rem));
return buff.toString();
}
/**
* Returns the position of the given Xf in the list.
* <P>
* If the item is not present in the list,
* the new item is added and its position returned.
* @param xf The Xf to be checked
* @return The position of the given Xf in the list
*/
private int getXfPos(CTXf xf)
{
int ret = -1;
List list = cellXfs.getXf();
long xfFontId = 0L;
if(xf.getFontId() != null)
xfFontId = xf.getFontId().longValue();
boolean xfApplyFont = false;
if(xf.isApplyFont() != null)
xfApplyFont = xf.isApplyFont().booleanValue();
long xfNumFmtId = 0L;
if(xf.getNumFmtId() != null)
xfNumFmtId = xf.getNumFmtId().longValue();
boolean xfApplyNumberFormat = false;
if(xf.isApplyNumberFormat() != null)
xfApplyNumberFormat = xf.isApplyNumberFormat().booleanValue();
boolean xfApplyAlignment = false;
if(xf.isApplyAlignment() != null)
xfApplyAlignment = xf.isApplyAlignment().booleanValue();
boolean xfWrap = false;
STHorizontalAlignment xfHoriz = STHorizontalAlignment.GENERAL;
CTCellAlignment xfAlign = xf.getAlignment();
if(xfAlign != null)
{
Boolean b = xfAlign.isWrapText();
if(b != null)
xfWrap = b.booleanValue();
if(xfAlign.getHorizontal() != null)
xfHoriz = xfAlign.getHorizontal();
}
for(int i = 0; i < list.size() && ret == -1; i++)
{
CTXf item = (CTXf)list.get(i);
long itemFontId = 0L;
if(item.getFontId() != null)
itemFontId = item.getFontId().longValue();
boolean itemApplyFont = false;
if(item.isApplyFont() != null)
itemApplyFont = item.isApplyFont().booleanValue();
long itemNumFmtId = 0L;
if(item.getNumFmtId() != null)
itemNumFmtId = item.getNumFmtId().longValue();
boolean itemApplyNumberFormat = false;
if(item.isApplyNumberFormat() != null)
itemApplyNumberFormat = item.isApplyNumberFormat().booleanValue();
boolean itemApplyAlignment = false;
if(item.isApplyAlignment() != null)
itemApplyAlignment = item.isApplyAlignment().booleanValue();
boolean itemWrap = false;
STHorizontalAlignment itemHoriz = STHorizontalAlignment.GENERAL;
CTCellAlignment itemAlign = item.getAlignment();
if(itemAlign != null)
{
Boolean b = itemAlign.isWrapText();
if(b != null)
itemWrap = b.booleanValue();
if(itemAlign.getHorizontal() != null)
itemHoriz = itemAlign.getHorizontal();
}
if(xfFontId == itemFontId
&& xfApplyFont == itemApplyFont
&& xfNumFmtId == itemNumFmtId
&& xfApplyNumberFormat == itemApplyNumberFormat
&& xfWrap == itemWrap
&& xfHoriz == itemHoriz
&& xfApplyAlignment == itemApplyAlignment)
{
ret = i;
}
}
if(ret == -1)
{
ret = list.size();
list.add(xf);
}
return ret;
}
/**
* Returns a new CTXf object.
* @param fontId The id of the font
* @param numFmtId The number format of the cell
* @param align The alignment of the cell
* @param wrap <CODE>true</CODE> if line wrapping should be enabled for the cell
* @return The new CTXf object
*/
private CTXf getXf(long fontId, long numFmtId, int align, boolean wrap)
{
CTXf ret = new CTXf();
ret.setXfId(0L);
ret.setBorderId(0L);
ret.setFillId(0L);
if(numFmtId > 0L)
{
ret.setApplyNumberFormat(true);
ret.setNumFmtId(numFmtId);
}
if(fontId > 0L)
{
ret.setApplyFont(true);
ret.setFontId(fontId);
}
if(align != FileColumn.ALIGN_GENERAL || wrap)
{
ret.setApplyAlignment(true);
CTCellAlignment ca = new CTCellAlignment();
if(align != FileColumn.ALIGN_GENERAL)
{
STHorizontalAlignment a = STHorizontalAlignment.GENERAL;
if(align == FileColumn.ALIGN_CENTRE)
a = STHorizontalAlignment.CENTER;
else if(align == FileColumn.ALIGN_LEFT)
a = STHorizontalAlignment.LEFT;
else if(align == FileColumn.ALIGN_RIGHT)
a = STHorizontalAlignment.RIGHT;
else if(align == FileColumn.ALIGN_JUSTIFY)
a = STHorizontalAlignment.JUSTIFY;
else if(align == FileColumn.ALIGN_FILL)
a = STHorizontalAlignment.FILL;
ca.setHorizontal(a);
}
if(wrap)
ca.setWrapText(true);
ret.setAlignment(ca);
}
return ret;
}
/**
* Returns the formatted object for the given cell.
* @param cell The cell to set attributes for
* @param column The column definition to take the attributes from
* @param col The column number of the cell
* @param row The row number of the cell
* @param data The data in the cell
* @param sheetName The name of the worksheet containing the cell
* @return The formatted object for the given cell
*/
private void setCellAttributes(Cell cell, FileColumn column, int col, int row,
String data, String sheetName)
{
short type = column.getType();
try
{
if((row == 0 && hasHeaders()) || data.length() == 0 || data.equals("null"))
{
setSharedStringCell(cell, data, sheetName);
}
else if(type == FileColumn.STRING_TYPE || type == FileColumn.NO_TYPE)
{
setSharedStringCell(cell, data, sheetName);
}
else if(type == FileColumn.NUMBER_TYPE)
{
setNumberCell(cell, data);
}
else if(type == FileColumn.BOOLEAN_TYPE)
{
setBooleanCell(cell, data);
}
else if(type == FileColumn.DATETIME_TYPE)
{
setDateCell(cell, data);
}
}
catch(NumberFormatException e)
{
logger.severe("column has illegal type or format (data="+data
+", column=["+column.toString()+"]): "+e.getMessage());
setSharedStringCell(cell, data, sheetName);
}
}
/**
* Sets the given cell to the "S" (shared string) type.
* @param cell The cell to set
* @param data The data in the cell
* @param sheetName The name of the worksheet containing the cell
*/
private void setSharedStringCell(Cell cell, String data, String sheetName)
{
if(stringMap == null)
{
stringMap = new HashMap();
List list = strings.getSi();
for(int i = 0; i < list.size(); i++)
{
CTRst ctrst = (CTRst)list.get(i);
stringMap.put(ctrst.getT(), new Integer(i));
}
}
// Truncate the cell text if > 32767
if(data.length() > MAX_LENGTH)
{
logger.severe("Truncating cell in sheet '"+sheetName
+"' as maximum length exceeded: "+data.length());
data = data.substring(0, MAX_LENGTH-1);
}
// Remove any illegal characters from the value
String converted = StringUtilities.stripNonValidXMLCharacters(data);
if(!data.equals(converted))
{
logger.severe("Removed illegal characters from cell in sheet '"
+sheetName+"': "+data);
data = converted;
}
cell.setT(STCellType.S);
int pos = -1;
Integer str = (Integer)stringMap.get(data);
if(str == null)
{
CTRst crt = new CTRst();
CTXstringWhitespace csw = new CTXstringWhitespace();
csw.setValue(data);
crt.setT(csw);
strings.getSi().add(crt);
pos = stringMap.size();
stringMap.put(data, new Integer(pos));
}
else
{
pos = str.intValue();
}
cell.setV(Integer.toString(pos));
}
/**
* Sets the given cell to the "N" (number) type.
* @param cell The cell to set
* @param data The data in the cell
*/
private void setNumberCell(Cell cell, String data)
{
double num = Double.parseDouble(data);
cell.setT(STCellType.N);
cell.setV(data);
}
/**
* Sets the given cell to the "S" (string) type with a date format.
* @param cell The cell to set
* @param data The data in the cell
*/
private void setDateCell(Cell cell, String data)
{
long dt = Long.parseLong(data);
dt += getOffset(dt);
double d = dt/86400000d; // Convert millis to days
if(d > 1.0d) // Times stored as fraction of a day
d += 25569d; // Add offset because excel dates are from 1900
cell.setV(Double.toString(d));
}
/**
* Sets the given cell to the "B" (boolean) type.
* @param cell The cell to set
* @param data The data in the cell
*/
private void setBooleanCell(Cell cell, String data)
{
cell.setT(STCellType.B);
cell.setV(data.equals("TRUE") ? "1" : "0");
}
/**
* Returns a new CTFont object with the given attributes.
* @param name The name of the font
* @param sz The size of the font
* @param family The family of the font
* @param bold <CODE>true</CODE> if the font should be bold
* @return The new CTFont object with the given attributes
*/
private CTFont getFont(String name, int sz, int family, boolean bold)
{
CTFont ret = new CTFont();
List list = ret.getNameOrCharsetOrFamily();
list.add(new JAXBElement(new QName(URI, "name"),
CTFontName.class, getFontName(name)));
list.add(new JAXBElement(new QName(URI, "sz"),
CTFontSize.class, getFontSize(sz)));
list.add(new JAXBElement(new QName(URI, "family"),
CTIntProperty.class, getIntProperty(family)));
if(bold)
list.add(new JAXBElement(new QName(URI, "b"),
CTBooleanProperty.class, getBooleanProperty(bold)));
return ret;
}
/**
* Returns a new CTFontName object.
* @param name The name of the font
* @return The new CTFontName object
*/
private CTFontName getFontName(String name)
{
CTFontName ret = new CTFontName();
ret.setVal(name);
return ret;
}
/**
* Returns a new CTFontSize object.
* @param sz The size of the font
* @return The new CTFontSize object
*/
private CTFontSize getFontSize(int sz)
{
CTFontSize ret = new CTFontSize();
ret.setVal(sz);
return ret;
}
/**
* Returns a new CTIntProperty object.
* @param i The value of the property
* @return The new CTIntProperty object
*/
private CTIntProperty getIntProperty(int i)
{
CTIntProperty ret = new CTIntProperty();
ret.setVal(i);
return ret;
}
/**
* Returns a new CTBooleanProperty object.
* @param b The value of the property
* @return The new CTBooleanProperty object
*/
private CTBooleanProperty getBooleanProperty(boolean b)
{
CTBooleanProperty ret = new CTBooleanProperty();
ret.setVal(b);
return ret;
}
/**
* Write the workbook.
* @throws IOException if the workbook cannot be written
*/
@Override
public void write() throws IOException
{
if(pkg != null)
{
try
{
if(outputStream != null)
{
Save saver = new Save(pkg);
saver.save(outputStream);
}
else if(file != null)
{
pkg.save(file);
}
}
catch(Docx4JException e)
{
throw new IOException(e);
}
}
}
/**
* Close the workbook.
*/
@Override
public void close()
{
pkg = null;
file = null;
outputStream = null;
wbp = null;
if(sheets != null)
sheets.clear();
prp = null;
wbrp = null;
sharedStrings = null;
strings = null;
styles = null;
numFmts = null;
if(stringMap != null)
stringMap.clear();
worksheets.clear();
}
/**
* Initialises the JAXB contexts
*/
public static void initJaxbContexts()
{
if(!jaxbInitialised)
{
logger.info("Initialising xlsx subsystem...");
new org.docx4j.jaxb.Context();
new org.xlsx4j.jaxb.Context();
logger.info("Initialisation of xlsx subsystem complete");
jaxbInitialised = true;
}
}
/**
* Prints information on the fonts for this workbook.
*/
public void printFontInfo()
{
if(fonts != null)
{
List font = fonts.getFont();
for(int i = 0; i < font.size(); i++)
{
CTFont f = (CTFont)font.get(i);
List names = f.getNameOrCharsetOrFamily();
for(int j = 0; j < names.size(); j++)
{
JAXBElement elem = (JAXBElement)names.get(j);
logger.info("fonts: i="+i+" name="+elem.getName()
+" value="+elem.getValue()
+" obj="+elem.getValue().getClass().getName());
}
}
}
}
private static Map builtinNumFmts = new HashMap();
static
{
// Add the built in formats to the cache
builtinNumFmts.put("0", new Long(1L));
builtinNumFmts.put("0.00", new Long(2L));
builtinNumFmts.put("#,##0", new Long(3L));
builtinNumFmts.put("#,##0.00", new Long(4L));
builtinNumFmts.put("0%", new Long(9L));
builtinNumFmts.put("0.00%", new Long(10L));
builtinNumFmts.put("0.00E+00", new Long(11L));
builtinNumFmts.put("# ?/?", new Long(12L));
builtinNumFmts.put("# ??/??", new Long(13L));
builtinNumFmts.put("mm-dd-yy", new Long(14L));
builtinNumFmts.put("d-mmm-yy", new Long(15L));
builtinNumFmts.put("d-mmm", new Long(16L));
builtinNumFmts.put("mmm-yy", new Long(17L));
builtinNumFmts.put("h:mm AM/PM", new Long(18L));
builtinNumFmts.put("h:mm:ss AM/PM", new Long(19L));
builtinNumFmts.put("h:mm", new Long(20L));
builtinNumFmts.put("h:mm:ss", new Long(21L));
builtinNumFmts.put("m/d/yy h:mm", new Long(22L));
builtinNumFmts.put("[$-404]e/m/d", new Long(27L));
builtinNumFmts.put("m/d/yy", new Long(30L));
builtinNumFmts.put("[$-404]e/m/d", new Long(36L));
builtinNumFmts.put("#,##0 ;(#,##0)", new Long(37L));
builtinNumFmts.put("#,##0 ;[Red](#,##0)", new Long(38L));
builtinNumFmts.put("#,##0.00;(#,##0.00)", new Long(39L));
builtinNumFmts.put("#,##0.00;[Red](#,##0.00)", new Long(40L));
builtinNumFmts.put("mm:ss", new Long(45L));
builtinNumFmts.put("[h]:mm:ss", new Long(46L));
builtinNumFmts.put("mmss.0", new Long(47L));
builtinNumFmts.put("##0.0E+0", new Long(48L));
builtinNumFmts.put("@", new Long(49L));
builtinNumFmts.put("[$-404]e/m/d", new Long(50L));
builtinNumFmts.put("[$-404]e/m/d", new Long(57L));
}
protected SpreadsheetMLPackage pkg;
private File file;
private OutputStream outputStream;
private WorkbookPart wbp;
private List sheets;
private RelationshipsPart prp, wbrp;
private SharedStrings sharedStrings;
private CTSst strings;
private Map<String,XlsxWorksheet> worksheets = new HashMap<String,XlsxWorksheet>();
private Styles styles;
private Map numFmts = null;
private CTStylesheet stylesheet;
private Map stringMap = null;
private CTFonts fonts;
private CTCellXfs cellXfs;
private CTCellStyles cellStyles;
private CTCellStyleXfs cellStyleXfs;
private CTBorders borders;
private CTFills fills;
private static boolean jaxbInitialised = false;
} |
java | public Future<ChangeMessageVisibilityResult> changeMessageVisibility(ChangeMessageVisibilityRequest request,
AsyncHandler<ChangeMessageVisibilityRequest, ChangeMessageVisibilityResult> handler) {
QueueBufferCallback<ChangeMessageVisibilityRequest, ChangeMessageVisibilityResult> callback = null;
if (handler != null) {
callback = new QueueBufferCallback<ChangeMessageVisibilityRequest, ChangeMessageVisibilityResult>(handler, request);
}
QueueBufferFuture<ChangeMessageVisibilityRequest, ChangeMessageVisibilityResult> future =
sendBuffer.changeMessageVisibility(request, callback);
future.setBuffer(this);
return future;
} |
python | def metadata(self, delete=False):
"""
Gets the metadata.
"""
if delete:
return self._session.delete(self.__v1() + "/metadata").json()
else:
return self._session.get(self.__v1() + "/metadata").json() |
python | def parse_verbosity(self, args):
'''parse_verbosity will take an argument object, and return the args
passed (from a dictionary) to a list
Parameters
==========
args: the argparse argument objects
'''
flags = []
if args.silent is True:
flags.append('--silent')
elif args.quiet is True:
flags.append('--quiet')
elif args.debug is True:
flags.append('--debug')
elif args.verbose is True:
flags.append('-' + 'v' * args.verbose)
return flags |
java | @SuppressWarnings("unchecked")
D minusYears(long yearsToSubtract) {
return (yearsToSubtract == Long.MIN_VALUE ? ((ChronoLocalDateImpl<D>)plusYears(Long.MAX_VALUE)).plusYears(1) : plusYears(-yearsToSubtract));
} |
java | private FileStatus createFileStatus(StoredObject tmp, Container cObj,
String hostName, Path path) throws IllegalArgumentException, IOException {
String newMergedPath = getMergedPath(hostName, path, tmp.getName());
return new FileStatus(tmp.getContentLength(), false, 1, blockSize,
Utils.lastModifiedAsLong(tmp.getLastModified()), 0, null,
null, null, new Path(newMergedPath));
} |
java | public void marshall(Diagnostics diagnostics, ProtocolMarshaller protocolMarshaller) {
if (diagnostics == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(diagnostics.getErrorCode(), ERRORCODE_BINDING);
protocolMarshaller.marshall(diagnostics.getScriptName(), SCRIPTNAME_BINDING);
protocolMarshaller.marshall(diagnostics.getMessage(), MESSAGE_BINDING);
protocolMarshaller.marshall(diagnostics.getLogTail(), LOGTAIL_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def move_to(self, position):
"""Set the Coordinator to a specific endpoint or time, or load state from a token.
:param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a
:attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>`
"""
if isinstance(position, collections.abc.Mapping):
move = _move_stream_token
elif hasattr(position, "timestamp") and callable(position.timestamp):
move = _move_stream_time
elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]:
move = _move_stream_endpoint
else:
raise InvalidPosition("Don't know how to move to position {!r}".format(position))
move(self, position) |
java | public Iterator<Runnable> buildIterator() {
return new Iterator<Runnable>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Runnable next() {
return null;
}
@Override
public void remove() {
}
};
} |
python | def is_running(self):
"""Check if ZAP is running."""
try:
result = requests.get(self.proxy_url)
except RequestException:
return False
if 'ZAP-Header' in result.headers.get('Access-Control-Allow-Headers', []):
return True
raise ZAPError('Another process is listening on {0}'.format(self.proxy_url)) |
java | public static Document parseEscapedXmlString(String input) throws UnmarshalException {
String deEscapedXml = deEscapeXml(input);
return parseXmlString(deEscapedXml);
} |
python | def payload(self):
"""
Picks out the payload from the different parts of the signed/encrypted
JSON Web Token. If the content type is said to be 'jwt' deserialize the
payload into a Python object otherwise return as-is.
:return: The payload
"""
_msg = as_unicode(self.part[1])
# If not JSON web token assume JSON
if "cty" in self.headers and self.headers["cty"].lower() != "jwt":
pass
else:
try:
_msg = json.loads(_msg)
except ValueError:
pass
return _msg |
python | def get_result(self):
"""
Returns an http response object.
"""
timeout = 60
if self.method == "GET":
timeout = 360
headers = {
"Authorization": "Bearer " + self.key,
"content-type": "application/json"
}
response = None
try:
response = getattr(requests, self.method.lower())(
self.url,
headers=headers,
params=self.data,
data=self.data,
timeout=timeout,
)
# Return the response.
return response
except requests.exceptions.RequestException:
error = {
"objeto": "error",
"tipo": "error_api",
"codigo_error": "404",
"mensaje": "conexion...",
"mensaje_usuario": "¡Error de conexion!",
}
return error |
python | def make_template_paths(template_file, paths=None):
"""
Make up a list of template search paths from given 'template_file'
(absolute or relative path to the template file) and/or 'paths' (a list of
template search paths given by user).
NOTE: User-given 'paths' will take higher priority over a dir of
template_file.
:param template_file: Absolute or relative path to the template file
:param paths: A list of template search paths
:return: List of template paths ([str])
>>> make_template_paths("/path/to/a/template")
['/path/to/a']
>>> make_template_paths("/path/to/a/template", ["/tmp"])
['/tmp', '/path/to/a']
>>> os.chdir("/tmp")
>>> make_template_paths("./path/to/a/template")
['/tmp/path/to/a']
>>> make_template_paths("./path/to/a/template", ["/tmp"])
['/tmp', '/tmp/path/to/a']
"""
tmpldir = os.path.abspath(os.path.dirname(template_file))
return [tmpldir] if paths is None else paths + [tmpldir] |
java | @TargetApi(Build.VERSION_CODES.JELLY_BEAN_MR1)
public void setCompoundDrawablesRelative (Drawable start, Drawable top, Drawable end, Drawable bottom){
if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1)
mInputView.setCompoundDrawablesRelative(start, top, end, bottom);
else
mInputView.setCompoundDrawables(start, top, end, bottom);
} |
java | public static Hours from(TemporalAmount amount) {
if (amount instanceof Hours) {
return (Hours) amount;
}
Objects.requireNonNull(amount, "amount");
int hours = 0;
for (TemporalUnit unit : amount.getUnits()) {
long value = amount.get(unit);
if (value != 0) {
long[] converted = Temporals.convertAmount(value, unit, HOURS);
if (converted[1] != 0) {
throw new DateTimeException(
"Amount could not be converted to a whole number of hours: " + value + " " + unit);
}
hours = Math.addExact(hours, Math.toIntExact(converted[0]));
}
}
return of(hours);
} |
python | def combinatorics(self):
"""
Returns mutually exclusive/inclusive mappings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each mapping key, the first dict has as value the set of mutually exclusive mappings while
the second dict has as value the set of mutually inclusive mappings.
"""
f = self.__matrix.mean(axis=0)
candidates = np.where((f < 1) & (f > 0))[0]
exclusive, inclusive = defaultdict(set), defaultdict(set)
for i, j in it.combinations(candidates, 2):
xor = np.logical_xor(self.__matrix[:, i], self.__matrix[:, j])
if xor.all():
exclusive[self.hg.mappings[i]].add(self.hg.mappings[j])
exclusive[self.hg.mappings[j]].add(self.hg.mappings[i])
if (~xor).all():
inclusive[self.hg.mappings[i]].add(self.hg.mappings[j])
inclusive[self.hg.mappings[j]].add(self.hg.mappings[i])
return exclusive, inclusive |
python | def _close_writable(self):
"""
Close the object in write mode.
"""
# Wait parts upload completion
for part in self._write_futures:
part['ETag'] = part.pop('response').result()['ETag']
# Complete multipart upload
with _handle_client_error():
try:
self._client.complete_multipart_upload(
MultipartUpload={'Parts': self._write_futures},
UploadId=self._upload_args['UploadId'],
**self._client_kwargs)
except _ClientError:
# Clean up if failure
self._client.abort_multipart_upload(
UploadId=self._upload_args['UploadId'],
**self._client_kwargs)
raise |
java | public java.util.List<InstanceCapacity> getAvailableInstanceCapacity() {
if (availableInstanceCapacity == null) {
availableInstanceCapacity = new com.amazonaws.internal.SdkInternalList<InstanceCapacity>();
}
return availableInstanceCapacity;
} |
java | public static Interval[][] calculateTemporalDistance( Interval[][] constraintMatrix ) {
Interval[][] result = new Interval[constraintMatrix.length][];
for( int i = 0; i < result.length; i++ ) {
result[i] = new Interval[constraintMatrix[i].length];
for( int j = 0; j < result[i].length; j++ ) {
result[i][j] = constraintMatrix[i][j].clone();
}
}
for( int k = 0; k < result.length; k++ ) {
for( int i = 0; i < result.length; i++ ) {
for( int j = 0; j < result.length; j++ ) {
Interval interval = result[i][k].clone();
interval.add( result[k][j] );
result[i][j].intersect( interval);
}
}
}
return result;
} |
python | def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Simple method to exact date values from a Plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key.
"""
for root, key, datetime_value in interface.RecurseKey(top_level):
if not isinstance(datetime_value, datetime.datetime):
continue
event_data = plist_event.PlistTimeEventData()
event_data.key = key
event_data.root = root
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
python | def prune_empty_node(node, seen):
"""
Recursively remove empty branches and return whether this makes the node
itself empty.
The ``seen`` parameter is used to avoid infinite recursion due to cycles
(you never know).
"""
if node.methods:
return False
if id(node) in seen:
return True
seen = seen | {id(node)}
for branch in list(node.branches):
if prune_empty_node(branch, seen):
node.branches.remove(branch)
else:
return False
return True |
java | protected void addItemView(View itemView, int childIndex) {
final ViewGroup currentParent = (ViewGroup) itemView.getParent();
if (currentParent != null) {
currentParent.removeView(itemView);
}
((ViewGroup) mMenuView).addView(itemView, childIndex);
} |
python | def freq_filt(bma):
"""
This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end
See separate utility freq_analysis.py
"""
"""
Want to fit linear function to artifact line in freq space,
Then mask everything near that line at distances of ~5-200 pixels,
Or whatever the maximum CCD artifact dimension happens to be,
This will depend on scaling - consult CCD map for interval
"""
#Fill ndv with random data
bf = malib.randomfill(bma)
import scipy.fftpack
f = scipy.fftpack.fft2(bf)
ff = scipy.fftpack.fftshift(f)
#Ben suggested a Hahn filter here, remove the low frequency, high amplitude information
#Then do a second fft?
#np.log(np.abs(ff))
#perc = malib.calcperc(np.real(ff), perc=(80, 95))
#malib.iv(numpy.real(ff), clim=perc)
#See http://scipy-lectures.github.io/advanced/image_processing/
#Starting at a,b, compute argmax along vertical axis for restricted range
#Fit line to the x and y argmax values
#Mask [argmax[y]-1:argmax[y]+1]
#Create radial mask
ff_dim = np.array(ff.shape)
a,b = ff_dim/2
n = ff_dim.max()
y,x = np.ogrid[-a:n-a, -b:n-b]
r1 = 40
r2 = 60
ff_mask = np.ma.make_mask(ff)
radial_mask = (r1**2 <= x**2 + y**2) & (x**2 + y**2 < r2**2)
#Note issues with rounding indices here
#Hacked in +1 for testing
ff_mask[:] = radial_mask[a-ff_dim[0]/2:a+ff_dim[0], b-ff_dim[1]/2:b+1+ff_dim[1]/2]
#Combine radial and line mask
#Convert mask to 0-1, then feather
fm = ff * ff_mask
#Inverse fft
bf_filt = scipy.fftpack.ifft2(scipy.fftpack.ifftshift(fm))
#Apply original mask
bf_filt = np.ma.masked_array(bf_filt, bma.mask) |
python | def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra)) |
java | public static long parseJsonValueToLong(JsonValue value) {
try {
return value.asLong();
} catch (UnsupportedOperationException e) {
String s = value.asString().replace("\"", "");
return Long.parseLong(s);
}
} |
python | def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result |
python | def update_service_definitions(self, service_definitions):
"""UpdateServiceDefinitions.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_0.location.models.VssJsonCollectionWrapper>` service_definitions:
"""
content = self._serialize.body(service_definitions, 'VssJsonCollectionWrapper')
self._send(http_method='PATCH',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='5.0-preview.1',
content=content) |
python | def rr_absent(name, HostedZoneId=None, DomainName=None, PrivateZone=False,
Name=None, Type=None, SetIdentifier=None,
region=None, key=None, keyid=None, profile=None):
'''
Ensure the Route53 record is deleted.
name
The name of the state definition. This will be used for Name if the latter is
not provided.
HostedZoneId
The ID of the zone to delete the record from. Exclusive with DomainName.
DomainName
The domain name of the zone to delete the record from. Exclusive with HostedZoneId.
PrivateZone
Set to True if the RR to be removed is in a private zone, False if public.
Name
Name of the resource record.
Type
The record type (A, NS, MX, TXT, etc.)
SetIdentifier
Valid for Weighted, Latency, Geolocation, and Failover resource record sets only.
An identifier that differentiates among multiple resource record sets that have the same
combination of DNS name and type. The value of SetIdentifier must be unique for each
resource record set that has the same combination of DNS name and type. Omit SetIdentifier
for any other types of record sets.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
'''
Name = Name if Name else name
if Type is None:
raise SaltInvocationError("'Type' is a required parameter when deleting resource records.")
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
args = {'Id': HostedZoneId, 'Name': DomainName, 'PrivateZone': PrivateZone,
'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
zone = __salt__['boto3_route53.find_hosted_zone'](**args)
if not zone:
ret['comment'] = 'Route 53 {} hosted zone {} not found'.format('private' if PrivateZone
else 'public', DomainName)
log.info(ret['comment'])
return ret
zone = zone[0]
HostedZoneId = zone['HostedZone']['Id']
recordsets = __salt__['boto3_route53.get_resource_records'](HostedZoneId=HostedZoneId,
StartRecordName=Name, StartRecordType=Type, region=region, key=key, keyid=keyid,
profile=profile)
if SetIdentifier and recordsets:
log.debug('Filter recordsets %s by SetIdentifier %s.', recordsets, SetIdentifier)
recordsets = [r for r in recordsets if r.get('SetIdentifier') == SetIdentifier]
log.debug('Resulted in recordsets %s.', recordsets)
if not recordsets:
ret['comment'] = 'Route 53 resource record {} with type {} already absent.'.format(
Name, Type)
return ret
elif len(recordsets) > 1:
ret['comment'] = 'Given criteria matched more than one ResourceRecordSet.'
log.error(ret['comment'])
ret['result'] = False
return ret
ResourceRecordSet = recordsets[0]
if __opts__['test']:
ret['comment'] = 'Route 53 resource record {} with type {} would be deleted.'.format(
Name, Type)
ret['result'] = None
return ret
ChangeBatch = {
'Changes': [
{
'Action': 'DELETE',
'ResourceRecordSet': ResourceRecordSet,
}
]
}
if __salt__['boto3_route53.change_resource_record_sets'](HostedZoneId=HostedZoneId,
ChangeBatch=ChangeBatch, region=region, key=key, keyid=keyid, profile=profile):
ret['comment'] = 'Route 53 resource record {} with type {} deleted.'.format(Name, Type)
log.info(ret['comment'])
ret['changes']['old'] = ResourceRecordSet
ret['changes']['new'] = None
else:
ret['comment'] = 'Failed to delete Route 53 resource record {} with type {}.'.format(Name,
Type)
log.error(ret['comment'])
ret['result'] = False
return ret |
java | @Override
public final T process(final Map<String, Object> pReqVars,
final T pEntity, final IRequestData pRequestData) throws Exception {
SeSeller ses = findSeSeller.find(pReqVars, pRequestData.getUserName());
pEntity.setSeller(ses);
if (pEntity.getIsNew()) {
this.srvOrm.insertEntity(pReqVars, pEntity);
pEntity.setIsNew(false);
} else {
T entOld = this.srvOrm.retrieveEntity(pReqVars, pEntity);
if (!entOld.getSeller().getItsId().getItsId()
.equals(pEntity.getSeller().getItsId().getItsId())) {
throw new ExceptionWithCode(ExceptionWithCode.FORBIDDEN,
"Attempt to update smb. else's entity: user/entity/EID/SEOLDID/SEID - "
+ pRequestData.getUserName() + "/" + pEntity.getClass()
.getSimpleName() + "/" + pEntity.getItsId() + "/" + entOld
.getSeller().getItsId().getItsId() + "/" + pEntity.getSeller()
.getItsId().getItsId());
}
this.srvOrm.updateEntity(pReqVars, pEntity);
}
return pEntity;
} |
python | def _generate_struct_class_properties(self, ns, data_type):
"""
Each field of the struct has a corresponding setter and getter.
The setter validates the value being set.
"""
for field in data_type.fields:
field_name = fmt_func(field.name)
field_name_reserved_check = fmt_func(field.name, check_reserved=True)
if is_nullable_type(field.data_type):
field_dt = field.data_type.data_type
dt_nullable = True
else:
field_dt = field.data_type
dt_nullable = False
# generate getter for field
self.emit('@property')
self.emit('def {}(self):'.format(field_name_reserved_check))
with self.indent():
self.emit('"""')
if field.doc:
self.emit_wrapped_text(
self.process_doc(field.doc, self._docf))
# Sphinx wants an extra line between the text and the
# rtype declaration.
self.emit()
self.emit(':rtype: {}'.format(
self._python_type_mapping(ns, field_dt)))
self.emit('"""')
self.emit('if self._{}_present:'.format(field_name))
with self.indent():
self.emit('return self._{}_value'.format(field_name))
self.emit('else:')
with self.indent():
if dt_nullable:
self.emit('return None')
elif field.has_default:
self.emit('return {}'.format(
self._generate_python_value(ns, field.default)))
else:
self.emit(
"raise AttributeError(\"missing required field '%s'\")"
% field_name
)
self.emit()
# generate setter for field
self.emit('@{}.setter'.format(field_name_reserved_check))
self.emit('def {}(self, val):'.format(field_name_reserved_check))
with self.indent():
if dt_nullable:
self.emit('if val is None:')
with self.indent():
self.emit('del self.{}'.format(field_name_reserved_check))
self.emit('return')
if is_user_defined_type(field_dt):
self.emit('self._%s_validator.validate_type_only(val)' %
field_name)
else:
self.emit('val = self._{}_validator.validate(val)'.format(field_name))
self.emit('self._{}_value = val'.format(field_name))
self.emit('self._{}_present = True'.format(field_name))
self.emit()
# generate deleter for field
self.emit('@{}.deleter'.format(field_name_reserved_check))
self.emit('def {}(self):'.format(field_name_reserved_check))
with self.indent():
self.emit('self._{}_value = None'.format(field_name))
self.emit('self._{}_present = False'.format(field_name))
self.emit() |
python | def potcar_spec( filename ):
"""
Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.
Args:
filename (Str): The name of the POTCAR file to process.
Returns:
(Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.
{ 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }
"""
p_spec = {}
with open( filename, 'r' ) as f:
potcars = re.split('(End of Dataset\n)', f.read() )
potcar_md5sums = [ md5sum( ''.join( pair ) ) for pair in zip( potcars[::2], potcars[1:-1:2] ) ]
for this_md5sum in potcar_md5sums:
for ps in potcar_sets:
for p, p_md5sum in potcar_md5sum_data[ ps ].items():
if this_md5sum == p_md5sum:
p_spec[ p ] = ps
if len( p_spec ) != len( potcar_md5sums ):
raise ValueError( 'One or more POTCARs did not have matching md5 hashes' )
return p_spec |
python | def get_registered_layer(name):
"""
Args:
name (str): the name of the layer, e.g. 'Conv2D'
Returns:
the wrapped layer function, or None if not registered.
"""
ret = _LAYER_REGISTRY.get(name, None)
if ret == _NameConflict:
raise KeyError("Layer named '{}' is registered with `@layer_register` more than once!".format(name))
return ret |
java | public JNDIContentRepositoryBuilder withContextProperty(final String name, final Object value) {
contextProperties.put(name, value);
return this;
} |
java | public static List<RoboconfCompletionProposal> buildProposalsFromMap( Map<String,String> candidates, String prefix ) {
List<RoboconfCompletionProposal> result = new ArrayList<> ();
for( Map.Entry<String,String> entry : candidates.entrySet()) {
// Matching candidate?
String candidate = entry.getKey();
if( ! startsWith( candidate, prefix ))
continue;
// No description => basic proposal
if( Utils.isEmptyOrWhitespaces( entry.getValue())) {
result.add( basicProposal( candidate, prefix ));
}
// Otherwise, show the description
else {
result.add( new RoboconfCompletionProposal(
candidate,
candidate,
entry.getValue(),
prefix.length()));
}
}
return result;
} |
python | def findHomography(self, img, drawMatches=False):
'''
Find homography of the image through pattern
comparison with the base image
'''
print("\t Finding points...")
# Find points in the next frame
img = self._prepareImage(img)
features, descs = self.detector.detectAndCompute(img, None)
######################
# TODO: CURRENTLY BROKEN IN OPENCV3.1 - WAITNG FOR NEW RELEASE 3.2
# matches = self.matcher.knnMatch(descs,#.astype(np.float32),
# self.base_descs,
# k=3)
# print("\t Match Count: ", len(matches))
# matches_subset = self._filterMatches(matches)
# its working alternative (for now):
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches_subset = bf.match(descs, self.base_descs)
######################
# matches = bf.knnMatch(descs,self.base_descs, k=2)
# # Apply ratio test
# matches_subset = []
# medDist = np.median([m.distance for m in matches])
# matches_subset = [m for m in matches if m.distance < medDist]
# for m in matches:
# print(m.distance)
# for m,n in matches:
# if m.distance < 0.75*n.distance:
# matches_subset.append([m])
if not len(matches_subset):
raise Exception('no matches found')
print("\t Filtered Match Count: ", len(matches_subset))
distance = sum([m.distance for m in matches_subset])
print("\t Distance from Key Image: ", distance)
averagePointDistance = distance / (len(matches_subset))
print("\t Average Distance: ", averagePointDistance)
kp1 = []
kp2 = []
for match in matches_subset:
kp1.append(self.base_features[match.trainIdx])
kp2.append(features[match.queryIdx])
# /self._fH #scale with _fH, if image was resized
p1 = np.array([k.pt for k in kp1])
p2 = np.array([k.pt for k in kp2]) # /self._fH
H, status = cv2.findHomography(p1, p2,
cv2.RANSAC, # method
5.0 # max reprojection error (1...10)
)
if status is None:
raise Exception('no homography found')
else:
inliers = np.sum(status)
print('%d / %d inliers/matched' % (inliers, len(status)))
inlierRatio = inliers / len(status)
if self.minInlierRatio > inlierRatio or inliers < self.minInliers:
raise Exception('bad fit!')
# scale with _fH, if image was resized
# see
# http://answers.opencv.org/question/26173/the-relationship-between-homography-matrix-and-scaling-images/
s = np.eye(3, 3)
s[0, 0] = 1 / self._fH
s[1, 1] = 1 / self._fH
H = s.dot(H).dot(np.linalg.inv(s))
if drawMatches:
# s0,s1 = img.shape
# out = np.empty(shape=(s0,s1,3), dtype=np.uint8)
img = draw_matches(self.base8bit, self.base_features, img, features,
matches_subset[:20], # None,#out,
# flags=2
thickness=5
)
return (H, inliers, inlierRatio, averagePointDistance,
img, features,
descs, len(matches_subset)) |
java | void processAckExpected(
long ackExpStamp,
int priority,
Reliability reliability,
SIBUuid12 stream)
throws SIResourceException
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "processAckExpected", new Long(ackExpStamp));
_internalOutputStreamManager.processAckExpected(ackExpStamp, priority, reliability, stream);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "processAckExpected");
} |
python | def load_result_json(result_path, json_file_name):
"""load_result_json."""
json_path = os.path.join(result_path, json_file_name)
_list = []
if os.path.isfile(json_path):
with open(json_path) as json_data:
try:
_list = json.load(json_data)
except ValueError as err:
logger.error(
'Failed to load json: {}, {}'.format(json_path, err))
return _list |
python | def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams |
python | def selectis(table, field, value, complement=False):
"""Select rows where the given field `is` the given value."""
return selectop(table, field, value, operator.is_, complement=complement) |
python | def initialize_CART_rot(self, s):
"""
Sets current specimen to s and calculates the data necessary to plot
the specimen plots (zijderveld, specimen eqarea, M/M0)
Parameters
----------
s: specimen to set as the GUI's current specimen
"""
self.s = s # only place in code where self.s is to be set directly
if self.orthogonal_box.GetValue() == "X=East":
self.ORTHO_PLOT_TYPE = 'E-W'
elif self.orthogonal_box.GetValue() == "X=North":
self.ORTHO_PLOT_TYPE = 'N-S'
elif self.orthogonal_box.GetValue() == "X=best fit line dec":
self.ORTHO_PLOT_TYPE = 'PCA_dec'
else:
self.ORTHO_PLOT_TYPE = 'ZIJ'
if self.COORDINATE_SYSTEM == 'geographic':
# self.CART_rot=self.Data[self.s]['zij_rotated_geo']
self.zij = array(self.Data[self.s]['zdata_geo'])
self.zijblock = self.Data[self.s]['zijdblock_geo']
elif self.COORDINATE_SYSTEM == 'tilt-corrected':
# self.CART_rot=self.Data[self.s]['zij_rotated_tilt']
self.zij = array(self.Data[self.s]['zdata_tilt'])
self.zijblock = self.Data[self.s]['zijdblock_tilt']
else:
# self.CART_rot=self.Data[self.s]['zij_rotated']
self.zij = array(self.Data[self.s]['zdata'])
self.zijblock = self.Data[self.s]['zijdblock']
if self.COORDINATE_SYSTEM == 'geographic':
if self.ORTHO_PLOT_TYPE == 'N-S':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_geo'], 0.)
elif self.ORTHO_PLOT_TYPE == 'E-W':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_geo'], 90.)
elif self.ORTHO_PLOT_TYPE == 'PCA_dec':
if 'specimen_dec' in list(self.current_fit.pars.keys()) and type(self.current_fit.pars['specimen_dec']) != str:
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_geo'], self.current_fit.pars['specimen_dec'])
else:
self.CART_rot = Rotate_zijderveld(self.Data[self.s]['zdata_geo'], pmag.cart2dir(
self.Data[self.s]['zdata_geo'][0])[0])
else:
self.CART_rot = Rotate_zijderveld(self.Data[self.s]['zdata_geo'], pmag.cart2dir(
self.Data[self.s]['zdata_geo'][0])[0])
elif self.COORDINATE_SYSTEM == 'tilt-corrected':
if self.ORTHO_PLOT_TYPE == 'N-S':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_tilt'], 0.)
elif self.ORTHO_PLOT_TYPE == 'E-W':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_tilt'], 90)
elif self.ORTHO_PLOT_TYPE == 'PCA_dec':
if 'specimen_dec' in list(self.current_fit.pars.keys()) and type(self.current_fit.pars['specimen_dec']) != str:
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata_tilt'], self.current_fit.pars['specimen_dec'])
else:
self.CART_rot = Rotate_zijderveld(self.Data[self.s]['zdata_tilt'], pmag.cart2dir(
self.Data[self.s]['zdata_tilt'][0])[0])
else:
self.CART_rot = Rotate_zijderveld(self.Data[self.s]['zdata_tilt'], pmag.cart2dir(
self.Data[self.s]['zdata_tilt'][0])[0])
else:
if self.ORTHO_PLOT_TYPE == 'N-S':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata'], 0.)
elif self.ORTHO_PLOT_TYPE == 'E-W':
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata'], 90)
elif self.ORTHO_PLOT_TYPE == 'PCA_dec':
if 'specimen_dec' in list(self.current_fit.pars.keys()) and type(self.current_fit.pars['specimen_dec']) != str:
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata'], self.current_fit.pars['specimen_dec'])
else: # Zijderveld
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata'], pmag.cart2dir(self.Data[self.s]['zdata'][0])[0])
else: # Zijderveld
self.CART_rot = Rotate_zijderveld(
self.Data[self.s]['zdata'], pmag.cart2dir(self.Data[self.s]['zdata'][0])[0])
self.zij_norm = array([row/sqrt(sum(row**2)) for row in self.zij])
# remove bad data from plotting:
self.CART_rot_good = []
self.CART_rot_bad = []
for i in range(len(self.CART_rot)):
if self.Data[self.s]['measurement_flag'][i] == 'g':
self.CART_rot_good.append(list(self.CART_rot[i]))
else:
self.CART_rot_bad.append(list(self.CART_rot[i]))
self.CART_rot = array(self.CART_rot)
self.CART_rot_good = array(self.CART_rot_good)
self.CART_rot_bad = array(self.CART_rot_bad) |
python | def guess_formatter(values, precision=1, commas=True, parens=True, nan='nan', prefix=None, pcts=0,
trunc_dot_zeros=0):
"""Based on the values, return the most suitable formatter
Parameters
----------
values : Series, DataFrame, scalar, list, tuple, or ndarray
Values used to determine which formatter is the best fit
"""
formatter_args = dict(precision=precision, commas=commas, parens=parens, nan=nan, prefix=prefix,
trunc_dot_zeros=trunc_dot_zeros)
try:
if isinstance(values, pd.datetime) and values.hour == 0 and values.minute == 0:
return new_datetime_formatter()
elif is_datetime_arraylike(values):
# basic date formatter if no hours or minutes
if hasattr(values, 'dt'):
if (values.dt.hour == 0).all() and (values.dt.minute == 0).all():
return new_datetime_formatter()
elif isinstance(values, pd.Series):
if values.dropna().apply(lambda d: d.hour == 0).all() and values.apply(lambda d: d.minute == 0).all():
return new_datetime_formatter()
elif isinstance(values, pd.DataFrame):
if values.dropna().applymap(lambda d: d != d or (d.hour == 0 and d.minute == 0)).all().all():
return new_datetime_formatter()
elif isinstance(values, pd.Series):
aval = values.abs()
vmax, vmin = aval.max(), aval.min()
elif isinstance(values, np.ndarray):
if values.ndim == 2:
avalues = pd.DataFrame(values).abs()
vmax = avalues.max().max()
vmin = avalues.min().min()
elif values.ndim == 1:
aval = pd.Series(values).abs()
vmax, vmin = aval.max(), aval.min()
else:
raise ValueError('cannot accept frame with more than 2-dimensions')
elif isinstance(values, pd.DataFrame):
avalues = values.abs()
vmax = avalues.max().max()
vmin = avalues.min().min()
elif isinstance(values, (list, tuple)):
vmax = max(values)
vmin = min(values)
else:
vmax = vmin = abs(values)
if np.isnan(vmin):
return new_float_formatter(**formatter_args)
else:
min_digits = 0 if vmin == 0 else math.floor(math.log10(vmin))
# max_digits = math.floor(math.log10(vmax))
if min_digits >= 12:
return new_trillions_formatter(**formatter_args)
elif min_digits >= 9:
return new_billions_formatter(**formatter_args)
elif min_digits >= 6:
return new_millions_formatter(**formatter_args)
elif min_digits >= 3:
return new_thousands_formatter(**formatter_args)
elif pcts and min_digits < 0 and vmax < 1:
return new_percent_formatter(**formatter_args)
else:
if isinstance(vmax, int):
formatter_args.pop('precision')
return new_int_formatter(**formatter_args)
else:
return new_float_formatter(**formatter_args)
except:
# import sys
# e = sys.exc_info()[0]
return lambda x: x |
java | public static Collection<Class<?>> getInterfaces(Class<?> object, Class<?> base)
{
Check.notNull(object);
Check.notNull(base);
final Collection<Class<?>> interfaces = new ArrayList<>();
Class<?> current = object;
while (current != null)
{
final Deque<Class<?>> currents = new ArrayDeque<>(filterInterfaces(current, base));
final Deque<Class<?>> nexts = new ArrayDeque<>();
while (!currents.isEmpty())
{
nexts.clear();
interfaces.addAll(currents);
checkInterfaces(base, currents, nexts);
currents.clear();
currents.addAll(nexts);
nexts.clear();
}
current = current.getSuperclass();
}
return interfaces;
} |
python | def center(self) -> Location:
"""
:return: a Point corresponding to the absolute position of the center
of the well relative to the deck (with the front-left corner of slot 1
as (0,0,0))
"""
top = self.top()
center_z = top.point.z - (self._depth / 2.0)
return Location(Point(x=top.point.x, y=top.point.y, z=center_z), self) |
java | protected void fireSourceUpdated(S e) {
for (int i = 0, n = this.listenerList.size(); i < n; i++) {
this.listenerList.get(i).sourceUpdated(e);
}
} |
java | public DescribeConfigurationRecordersResult withConfigurationRecorders(ConfigurationRecorder... configurationRecorders) {
if (this.configurationRecorders == null) {
setConfigurationRecorders(new com.amazonaws.internal.SdkInternalList<ConfigurationRecorder>(configurationRecorders.length));
}
for (ConfigurationRecorder ele : configurationRecorders) {
this.configurationRecorders.add(ele);
}
return this;
} |
java | public static String put(String url, Map<String, String> body) {
return put(url, JSONObject.toJSONString(body));
} |
python | def bus_line_names(self):
"""Append bus injection and line flow names to `varname`"""
if self.system.tds.config.compute_flows:
self.system.Bus._varname_inj()
self.system.Line._varname_flow()
self.system.Area._varname_inter() |
python | def swipe_bottom(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.down(steps=steps) |
python | def _set_support(self, v, load=False):
"""
Setter method for support, mapped from YANG variable /support (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_support is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_support() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=support.support, is_container='container', presence=False, yang_name="support", rest_name="support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Support configuration', u'callpoint': u'RASGlobalConfigCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """support must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=support.support, is_container='container', presence=False, yang_name="support", rest_name="support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Support configuration', u'callpoint': u'RASGlobalConfigCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)""",
})
self.__support = t
if hasattr(self, '_set'):
self._set() |
java | public boolean anyMatch(@NotNull IntPredicate predicate) {
while(iterator.hasNext()) {
if(predicate.test(iterator.nextInt()))
return true;
}
return false;
} |
java | @Override
public String getSignature(String baseString, String apiSecret, String tokenSecret) {
try {
Preconditions.checkEmptyString(apiSecret, "Api secret cant be null or empty string");
return OAuthEncoder.encode(apiSecret) + '&' + OAuthEncoder.encode(tokenSecret);
} catch (Exception e) {
throw new OAuthSignatureException(baseString, e);
}
} |
java | private static void writeZonePropsByDOW_LEQ_DOM(Writer writer, boolean isDst, String tzname, int fromOffset, int toOffset,
int month, int dayOfMonth, int dayOfWeek, long startTime, long untilTime) throws IOException {
// Check if this rule can be converted to DOW rule
if (dayOfMonth%7 == 0) {
// Can be represented by DOW rule
writeZonePropsByDOW(writer, isDst, tzname, fromOffset, toOffset,
month, dayOfMonth/7, dayOfWeek, startTime, untilTime);
} else if (month != Calendar.FEBRUARY && (MONTHLENGTH[month] - dayOfMonth)%7 == 0){
// Can be represented by DOW rule with negative week number
writeZonePropsByDOW(writer, isDst, tzname, fromOffset, toOffset,
month, -1*((MONTHLENGTH[month] - dayOfMonth)/7 + 1), dayOfWeek, startTime, untilTime);
} else if (month == Calendar.FEBRUARY && dayOfMonth == 29) {
// Specical case for February
writeZonePropsByDOW(writer, isDst, tzname, fromOffset, toOffset,
Calendar.FEBRUARY, -1, dayOfWeek, startTime, untilTime);
} else {
// Otherwise, convert this to DOW_GEQ_DOM rule
writeZonePropsByDOW_GEQ_DOM(writer, isDst, tzname, fromOffset, toOffset,
month, dayOfMonth - 6, dayOfWeek, startTime, untilTime);
}
} |
java | public void setAnimationType(Type type){
if(type == null)
options.clearProperty(ANIMATION_EASING);
else
options.setProperty(ANIMATION_EASING, type.getValue());
} |
python | def load(self, image=None):
'''load an image, either an actual path on the filesystem or a uri.
Parameters
==========
image: the image path or uri to load (e.g., docker://ubuntu
'''
from spython.image import Image
from spython.instance import Instance
self.simage = Image(image)
if image is not None:
if image.startswith('instance://'):
self.simage = Instance(image)
bot.info(self.simage) |
java | @Override
public String query(String contig, int start, int end) throws Exception {
Region region = new Region(contig, start, end);
QueryResult<GenomeSequenceFeature> queryResult
= genomeDBAdaptor.getSequence(region, QueryOptions.empty());
// This behaviour mimics the behaviour of the org.opencb.biodata.tools.sequence.SamtoolsFastaIndex with one
// difference. If contig does not exist, start is under the left bound, start AND end are out of the right
// bound, then a RunTime exception will be thrown. HOWEVER: if start is within the bounds BUT end is out of the
// right bound, then THIS implementaiton will return available nucleotides while SamtoolsFastaIndex will keep
// returning the exception.
if (queryResult.getResult().size() > 0 && StringUtils.isNotBlank(queryResult.getResult().get(0).getSequence())) {
if (queryResult.getResult().get(0).getSequence().length() < (end - start + 1)) {
logger.warn("End coordinate out of the right bound. Returning available nucleotides.");
}
return queryResult.getResult().get(0).getSequence();
} else {
throw new RuntimeException("Unable to find entry for " + region.toString());
}
} |
java | private String readLine() throws IOException {
StringBuffer sbuf = new StringBuffer();
int result;
String line;
do {
result = in.readLine(buf, 0, buf.length); // does +=
if (result != -1) {
sbuf.append(new String(buf, 0, result, encoding));
}
} while (result == buf.length); // loop only if the buffer was filled
if (sbuf.length() == 0) {
return null; // nothing read, must be at the end of stream
}
// Cut off the trailing \n or \r\n
// It should always be \r\n but IE5 sometimes does just \n
// Thanks to Luke Blaikie for helping make this work with \n
int len = sbuf.length();
if (len >= 2 && sbuf.charAt(len - 2) == '\r') {
sbuf.setLength(len - 2); // cut \r\n
}
else if (len >= 1 && sbuf.charAt(len - 1) == '\n') {
sbuf.setLength(len - 1); // cut \n
}
return sbuf.toString();
} |
java | public void readLed(Callback<LedColor> callback) {
addCallback(BeanMessageID.CC_LED_READ_ALL, callback);
sendMessageWithoutPayload(BeanMessageID.CC_LED_READ_ALL);
} |
python | def execd_submodule_paths(command, execd_dir=None):
"""Generate a list of full paths to the specified command within exec_dir.
"""
for module_path in execd_module_paths(execd_dir):
path = os.path.join(module_path, command)
if os.access(path, os.X_OK) and os.path.isfile(path):
yield path |
python | def get_local_current_sample(ip):
"""Gets current sample from *local* Neurio device IP address.
This is a static method. It doesn't require a token to authenticate.
Note, call get_user_information to determine local Neurio IP addresses.
Args:
ip (string): address of local Neurio device
Returns:
dictionary object containing current sample information
"""
valid_ip_pat = re.compile(
"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
)
if not valid_ip_pat.match(ip):
raise ValueError("ip address invalid")
url = "http://%s/current-sample" % (ip)
headers = { "Content-Type": "application/json" }
r = requests.get(url, headers=headers)
return r.json() |
java | public boolean isWhitelistedAndNotBlacklisted(final WhiteBlackListLeafname jarWhiteBlackList) {
return jarWhiteBlackList.isWhitelistedAndNotBlacklisted(pathWithinParentZipFileSlice) //
&& (parentZipFileSlice == null
|| parentZipFileSlice.isWhitelistedAndNotBlacklisted(jarWhiteBlackList));
} |
python | def connection(self):
""" Provide the connection parameters for kombu's ConsumerMixin.
The `Connection` object is a declaration of connection parameters
that is lazily evaluated. It doesn't represent an established
connection to the broker at this point.
"""
heartbeat = self.container.config.get(
HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT
)
transport_options = self.container.config.get(
TRANSPORT_OPTIONS_CONFIG_KEY, DEFAULT_TRANSPORT_OPTIONS
)
ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY)
conn = Connection(self.amqp_uri,
transport_options=transport_options,
heartbeat=heartbeat,
ssl=ssl
)
return conn |
python | def createKMZ(self, kmz_as_json):
"""
Creates a KMZ file from json.
See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Create_Kmz/02r3000001tm000000/
for more information.
"""
kmlURL = self._url + "/createKmz"
params = {
"f" : "json",
"kml" : kmz_as_json
}
return self._post(url=kmlURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
java | public static SecurityMetadata getSecurityMetadata() {
SecurityMetadata secMetadata = null;
ModuleMetaData mmd = ComponentMetaDataAccessorImpl.getComponentMetaDataAccessor().getComponentMetaData().getModuleMetaData();
if (mmd instanceof WebModuleMetaData) {
secMetadata = (SecurityMetadata) ((WebModuleMetaData) mmd).getSecurityMetaData();
} else {
// ejb environment, check threadlocal.
WebModuleMetaData wmmd = getWebModuleMetaData();
if (wmmd != null) {
secMetadata = (SecurityMetadata) wmmd.getSecurityMetaData();
}
}
return secMetadata;
} |
python | def length(self):
"""Gets the length of the Vector"""
return math.sqrt((self.x * self.x) + (self.y * self.y) + (self.z * self.z) + (self.w * self.w)) |
java | public void updateMainComponent(Component comp) {
comp.setSizeFull();
m_rootLayout.setMainContent(comp);
Map<String, Object> attributes = getAttributesForComponent(comp);
updateAppAttributes(attributes);
} |
java | private void openProxy(CompletableFuture<SessionClient> future) {
log.debug("Opening proxy session");
proxyFactory.get().thenCompose(proxy -> proxy.connect()).whenComplete((proxy, error) -> {
if (error == null) {
synchronized (this) {
this.log = ContextualLoggerFactory.getLogger(getClass(), LoggerContext.builder(SessionClient.class)
.addValue(proxy.sessionId())
.add("type", proxy.type())
.add("name", proxy.name())
.build());
this.session = proxy;
proxy.addStateChangeListener(this::onStateChange);
eventListeners.entries().forEach(entry -> proxy.addEventListener(entry.getKey(), entry.getValue()));
onStateChange(PrimitiveState.CONNECTED);
}
future.complete(this);
} else {
recoverTask = context.schedule(Duration.ofSeconds(1), () -> openProxy(future));
}
});
} |
python | def set_xticks_for_all(self, row_column_list=None, ticks=None):
"""Manually specify the x-axis tick values.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:type row_column_list: list or None
:param ticks: list of tick values.
"""
if row_column_list is None:
self.ticks['x'] = ticks
else:
for row, column in row_column_list:
self.set_xticks(row, column, ticks) |
python | def set_sampling_strategies(self, filter, strategy_and_parms):
"""Set a strategy for all sensors matching the filter, including unseen sensors
The strategy should persist across sensor disconnect/reconnect.
filter : str
Filter for sensor names
strategy_and_params : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
Returns
-------
done : tornado Future
Resolves when done
"""
sensor_list = yield self.list_sensors(filter=filter)
sensor_dict = {}
for sens in sensor_list:
# Set the strategy on each sensor
try:
sensor_name = sens.object.normalised_name
yield self.set_sampling_strategy(sensor_name, strategy_and_parms)
sensor_dict[sensor_name] = strategy_and_parms
except Exception as exc:
self._logger.exception(
'Unhandled exception trying to set sensor strategies {!r} for {} ({})'
.format(strategy_and_parms, sens, exc))
sensor_dict[sensor_name] = None
# Otherwise, depend on self._add_sensors() to handle it from the cache when the sensor appears\
raise tornado.gen.Return(sensor_dict) |
java | public void registerTypeWithKryoSerializer(Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.registerTypeWithKryoSerializer(type, serializerClass);
} |
java | private void attemptSocketBind(InetSocketAddress address, boolean reuseflag) throws IOException {
this.serverSocket.setReuseAddress(reuseflag);
this.serverSocket.bind(address, this.tcpChannel.getConfig().getListenBacklog());
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "ServerSocket bind worked, reuse=" + this.serverSocket.getReuseAddress());
}
} |
java | @edu.umd.cs.findbugs.annotations.SuppressWarnings({"DM_DEFAULT_ENCODING", "OS_OPEN_STREAM"})
private static void getUlimit(PrintWriter writer) throws IOException {
// TODO should first check whether /bin/bash even exists
InputStream is = new ProcessBuilder("bash", "-c", "ulimit -a").start().getInputStream();
try {
// this is reading from the process so platform encoding is correct
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(is));
String line;
while ((line = bufferedReader.readLine()) != null) {
writer.println(line);
}
} finally {
is.close();
}
} |
python | def inverse(self, encoded, duration=None):
'''Inverse transformation'''
ann = jams.Annotation(namespace=self.namespace, duration=duration)
for start, end, value in self.decode_intervals(encoded,
duration=duration,
transition=self.transition,
p_init=self.p_init,
p_state=self.p_state):
# Map start:end to frames
f_start, f_end = time_to_frames([start, end],
sr=self.sr,
hop_length=self.hop_length)
confidence = np.mean(encoded[f_start:f_end+1, value])
value_dec = self.encoder.inverse_transform(np.atleast_2d(value))[0]
for vd in value_dec:
ann.append(time=start,
duration=end-start,
value=vd,
confidence=confidence)
return ann |
java | public TypedQuery<ENTITY> withRowAsyncListener(Function<Row, Row> rowAsyncListener) {
this.options.setRowAsyncListeners(Optional.of(asList(rowAsyncListener)));
return this;
} |
java | public static String cleanParam(String[] strParams, boolean bSetDefault, String strDefault)
{
if ((strParams == null) || (strParams.length == 0))
strParams = new String[1];
if (strParams[0] == null) if (bSetDefault)
strParams[0] = strDefault;
if (strParams[0] == null)
return null;
try {
return URLDecoder.decode(strParams[0], DBConstants.URL_ENCODING);
} catch (java.io.UnsupportedEncodingException ex) {
return strParams[0];
}
} |
python | def download_permanent_media(self, media_id):
"""
获取永久素材。
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例
"""
return requests.post(
url="https://api.weixin.qq.com/cgi-bin/material/get_material",
params={"access_token": self.token},
data=_json.dumps({
"media_id": media_id
}, ensure_ascii=False).encode("utf-8")
) |
python | def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save() |
java | public static void copyURLToFile(final URL source, final File destination) throws UncheckedIOException {
InputStream is = null;
try {
is = source.openStream();
write(destination, is);
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
close(is);
}
} |
python | def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons |
java | public static void writeObject(CodedOutputStream out, int order, FieldType type, Object o, boolean list,
boolean withTag) throws IOException {
if (o == null) {
return;
}
if (type == FieldType.OBJECT) {
Class cls = o.getClass();
Codec target = ProtobufProxy.create(cls);
if (withTag) {
out.writeRawVarint32(makeTag(order, WireFormat.WIRETYPE_LENGTH_DELIMITED));
}
out.writeRawVarint32(target.size(o));
target.writeTo(o, out);
return;
}
if (type == FieldType.BOOL) {
if (withTag) {
out.writeBool(order, (Boolean) o);
} else {
out.writeBoolNoTag((Boolean) o);
}
} else if (type == FieldType.BYTES) {
byte[] bb = (byte[]) o;
if (withTag) {
out.writeBytes(order, ByteString.copyFrom(bb));
} else {
out.writeBytesNoTag(ByteString.copyFrom(bb));
}
} else if (type == FieldType.DOUBLE) {
if (withTag) {
out.writeDouble(order, (Double) o);
} else {
out.writeDoubleNoTag((Double) o);
}
} else if (type == FieldType.FIXED32) {
if (withTag) {
out.writeFixed32(order, (Integer) o);
} else {
out.writeFixed32NoTag((Integer) o);
}
} else if (type == FieldType.FIXED64) {
if (withTag) {
out.writeFixed64(order, (Long) o);
} else {
out.writeFixed64NoTag((Long) o);
}
} else if (type == FieldType.FLOAT) {
if (withTag) {
out.writeFloat(order, (Float) o);
} else {
out.writeFloatNoTag((Float) o);
}
} else if (type == FieldType.INT32) {
if (withTag) {
out.writeInt32(order, (Integer) o);
} else {
out.writeInt32NoTag((Integer) o);
}
} else if (type == FieldType.INT64) {
if (withTag) {
out.writeInt64(order, (Long) o);
} else {
out.writeInt64NoTag((Long) o);
}
} else if (type == FieldType.SFIXED32) {
if (withTag) {
out.writeSFixed32(order, (Integer) o);
} else {
out.writeSFixed32NoTag((Integer) o);
}
} else if (type == FieldType.SFIXED64) {
if (withTag) {
out.writeSFixed64(order, (Long) o);
} else {
out.writeSFixed64NoTag((Long) o);
}
} else if (type == FieldType.SINT32) {
if (withTag) {
out.writeSInt32(order, (Integer) o);
} else {
out.writeSInt32NoTag((Integer) o);
}
} else if (type == FieldType.SINT64) {
if (withTag) {
out.writeSInt64(order, (Long) o);
} else {
out.writeSInt64NoTag((Long) o);
}
} else if (type == FieldType.STRING) {
if (withTag) {
out.writeBytes(order, ByteString.copyFromUtf8(String.valueOf(o)));
} else {
out.writeBytesNoTag(ByteString.copyFromUtf8(String.valueOf(o)));
}
} else if (type == FieldType.UINT32) {
if (withTag) {
out.writeUInt32(order, (Integer) o);
} else {
out.writeUInt32NoTag((Integer) o);
}
} else if (type == FieldType.UINT64) {
if (withTag) {
out.writeUInt64(order, (Long) o);
} else {
out.writeUInt64NoTag((Long) o);
}
} else if (type == FieldType.ENUM) {
int value = 0;
if (o instanceof EnumReadable) {
value = ((EnumReadable) o).value();
} else if (o instanceof Enum) {
value = ((Enum) o).ordinal();
}
if (withTag) {
out.writeEnum(order, value);
} else {
out.writeEnumNoTag(value);
}
}
} |
java | static <K, V> Map<V, K> invert(Map<K, V> map)
{
Map<V, K> result = new LinkedHashMap<V, K>();
for (Entry<K, V> entry : map.entrySet())
{
result.put(entry.getValue(), entry.getKey());
}
return result;
} |
python | def _find_already_built_wheel(metadata_directory):
"""Check for a wheel already built during the get_wheel_metadata hook.
"""
if not metadata_directory:
return None
metadata_parent = os.path.dirname(metadata_directory)
if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
return None
whl_files = glob(os.path.join(metadata_parent, '*.whl'))
if not whl_files:
print('Found wheel built marker, but no .whl files')
return None
if len(whl_files) > 1:
print('Found multiple .whl files; unspecified behaviour. '
'Will call build_wheel.')
return None
# Exactly one .whl file
return whl_files[0] |
python | def execute_notebook(nb_path, pkg_dir, dataframes, write_notebook=False, env=None):
"""
Execute a notebook after adding the prolog and epilog. Can also add %mt_materialize magics to
write dataframes to files
:param nb_path: path to a notebook.
:param pkg_dir: Directory to which dataframes are materialized
:param dataframes: List of names of dataframes to materialize
:return: a Notebook object
"""
import nbformat
from metapack.jupyter.preprocessors import AddEpilog, AddProlog
from metapack.jupyter.exporters import ExecutePreprocessor, Config
from os.path import dirname, join, splitext, basename
from nbconvert.preprocessors.execute import CellExecutionError
with open(nb_path, encoding='utf8') as f:
nb = nbformat.read(f, as_version=4)
root, ext = splitext(basename(nb_path))
c = Config()
nb, resources = AddProlog(config=c, env=env or {}).preprocess(nb, {})
nb, resources = AddEpilog(config=c, pkg_dir=pkg_dir,
dataframes=dataframes,
).preprocess(nb, {})
def _write_notebook(nb_path, root, ext, write_notebook):
if write_notebook:
if write_notebook is True:
exec_nb_path = join(dirname(nb_path), root + '-executed' + ext)
else:
exec_nb_path = write_notebook
with open(exec_nb_path, 'w', encoding='utf8') as f:
nbformat.write(nb, f)
_write_notebook(nb_path, root, ext, write_notebook)
try:
ep = ExecutePreprocessor(config=c)
ep.timeout = 5*60
nb, _ = ep.preprocess(nb, {'metadata': {'path': dirname(nb_path)}})
except (CellExecutionError, TimeoutError) as e:
err_nb_path = join(dirname(nb_path), root + '-errors' + ext)
with open(err_nb_path, 'w', encoding='utf8') as f:
nbformat.write(nb, f)
raise CellExecutionError("Errors executing noteboook. See notebook at {} for details.\n{}"
.format(err_nb_path, ''))
except ImportError as e:
raise NotebookError("Failed to import a library required for notebook execution: {}".format(str(e)))
_write_notebook(nb_path, root, ext, write_notebook)
return nb |
python | def _taskdict(task):
'''
Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running
from the same event loop as the task) or task is the current task, this is fine.
'''
if task is None:
task = asyncio.current_task()
assert task
taskvars = getattr(task, '_syn_taskvars', None)
if taskvars is None:
taskvars = varinit(task)
return taskvars |
java | @Override
public BatchStopJobRunResult batchStopJobRun(BatchStopJobRunRequest request) {
request = beforeClientExecution(request);
return executeBatchStopJobRun(request);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.