max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
732 | <reponame>liuxilu/afdko
// Generated from FeatParser.g4 by ANTLR 4.9.2
#pragma once
#include "antlr4-runtime.h"
#include "FeatParserVisitor.h"
/**
* This class provides an empty implementation of FeatParserVisitor, which can be
* extended to create a visitor which only needs to handle a subset of the available methods.
*/
class FeatParserBaseVisitor : public FeatParserVisitor {
public:
virtual antlrcpp::Any visitFile(FeatParser::FileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTopLevelStatement(FeatParser::TopLevelStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitInclude(FeatParser::IncludeContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGlyphClassAssign(FeatParser::GlyphClassAssignContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLangsysAssign(FeatParser::LangsysAssignContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitMark_statement(FeatParser::Mark_statementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAnchorDef(FeatParser::AnchorDefContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitValueRecordDef(FeatParser::ValueRecordDefContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFeatureBlock(FeatParser::FeatureBlockContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTableBlock(FeatParser::TableBlockContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAnonBlock(FeatParser::AnonBlockContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupBlockTopLevel(FeatParser::LookupBlockTopLevelContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFeatureStatement(FeatParser::FeatureStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupBlockOrUse(FeatParser::LookupBlockOrUseContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitCvParameterBlock(FeatParser::CvParameterBlockContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitCvParameterStatement(FeatParser::CvParameterStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitCvParameter(FeatParser::CvParameterContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitStatement(FeatParser::StatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFeatureUse(FeatParser::FeatureUseContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitScriptAssign(FeatParser::ScriptAssignContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLangAssign(FeatParser::LangAssignContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupflagAssign(FeatParser::LookupflagAssignContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupflagElement(FeatParser::LookupflagElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitIgnoreSubOrPos(FeatParser::IgnoreSubOrPosContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitSubstitute(FeatParser::SubstituteContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitPosition(FeatParser::PositionContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitValuePattern(FeatParser::ValuePatternContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitValueRecord(FeatParser::ValueRecordContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitValueLiteral(FeatParser::ValueLiteralContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitCursiveElement(FeatParser::CursiveElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitBaseToMarkElement(FeatParser::BaseToMarkElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLigatureMarkElement(FeatParser::LigatureMarkElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitParameters(FeatParser::ParametersContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitSizemenuname(FeatParser::SizemenunameContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFeatureNames(FeatParser::FeatureNamesContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitSubtable(FeatParser::SubtableContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_BASE(FeatParser::Table_BASEContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitBaseStatement(FeatParser::BaseStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisTags(FeatParser::AxisTagsContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisScripts(FeatParser::AxisScriptsContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitBaseScript(FeatParser::BaseScriptContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_GDEF(FeatParser::Table_GDEFContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefStatement(FeatParser::GdefStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefGlyphClass(FeatParser::GdefGlyphClassContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefAttach(FeatParser::GdefAttachContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefLigCaretPos(FeatParser::GdefLigCaretPosContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefLigCaretIndex(FeatParser::GdefLigCaretIndexContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_head(FeatParser::Table_headContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHeadStatement(FeatParser::HeadStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHead(FeatParser::HeadContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_hhea(FeatParser::Table_hheaContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHheaStatement(FeatParser::HheaStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHhea(FeatParser::HheaContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_vhea(FeatParser::Table_vheaContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVheaStatement(FeatParser::VheaStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVhea(FeatParser::VheaContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_name(FeatParser::Table_nameContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameStatement(FeatParser::NameStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameID(FeatParser::NameIDContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_OS_2(FeatParser::Table_OS_2Context *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitOs_2Statement(FeatParser::Os_2StatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitOs_2(FeatParser::Os_2Context *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_STAT(FeatParser::Table_STATContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitStatStatement(FeatParser::StatStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitDesignAxis(FeatParser::DesignAxisContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisValue(FeatParser::AxisValueContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisValueStatement(FeatParser::AxisValueStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisValueLocation(FeatParser::AxisValueLocationContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisValueFlags(FeatParser::AxisValueFlagsContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitElidedFallbackName(FeatParser::ElidedFallbackNameContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameEntryStatement(FeatParser::NameEntryStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitElidedFallbackNameID(FeatParser::ElidedFallbackNameIDContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameEntry(FeatParser::NameEntryContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTable_vmtx(FeatParser::Table_vmtxContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVmtxStatement(FeatParser::VmtxStatementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVmtx(FeatParser::VmtxContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAnchor(FeatParser::AnchorContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupPattern(FeatParser::LookupPatternContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLookupPatternElement(FeatParser::LookupPatternElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitPattern(FeatParser::PatternContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitPatternElement(FeatParser::PatternElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGlyphClassOptional(FeatParser::GlyphClassOptionalContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGlyphClass(FeatParser::GlyphClassContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGcLiteral(FeatParser::GcLiteralContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGcLiteralElement(FeatParser::GcLiteralElementContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGlyph(FeatParser::GlyphContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGlyphName(FeatParser::GlyphNameContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitLabel(FeatParser::LabelContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitTag(FeatParser::TagContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFixedNum(FeatParser::FixedNumContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGenNum(FeatParser::GenNumContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitFeatureFile(FeatParser::FeatureFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitStatementFile(FeatParser::StatementFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitCvStatementFile(FeatParser::CvStatementFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitBaseFile(FeatParser::BaseFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHeadFile(FeatParser::HeadFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitHheaFile(FeatParser::HheaFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVheaFile(FeatParser::VheaFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitGdefFile(FeatParser::GdefFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameFile(FeatParser::NameFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitVmtxFile(FeatParser::VmtxFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitOs_2File(FeatParser::Os_2FileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitStatFile(FeatParser::StatFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAxisValueFile(FeatParser::AxisValueFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitNameEntryFile(FeatParser::NameEntryFileContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitSubtok(FeatParser::SubtokContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitRevtok(FeatParser::RevtokContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitAnontok(FeatParser::AnontokContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitEnumtok(FeatParser::EnumtokContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitPostok(FeatParser::PostokContext *ctx) override {
return visitChildren(ctx);
}
virtual antlrcpp::Any visitMarkligtok(FeatParser::MarkligtokContext *ctx) override {
return visitChildren(ctx);
}
};
| 4,624 |
538 | package com.datatransfer;
import android.content.Intent;
import android.widget.Toast;
import com.facebook.react.bridge.Arguments;
import com.facebook.react.bridge.Callback;
import com.facebook.react.bridge.Promise;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.facebook.react.bridge.ReadableArray;
import com.facebook.react.bridge.ReadableMap;
import com.facebook.react.bridge.WritableArray;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.modules.core.DeviceEventManagerModule;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
public class DataTransferModule extends ReactContextBaseJavaModule {
private ReactContext mContext;
public DataTransferModule(ReactApplicationContext reactContext) {
super(reactContext);
mContext = reactContext;
}
@Override
public String getName() {
return "DataTransferModule";
}
/**
* RN获取原生端定义的常量
* @return
*/
@Nullable
@Override
public Map<String, Object> getConstants() {
Map<String, Object> params = new HashMap<>();
params.put("CustomConstant", "我是Android端定义的常量");
params.put("ToastLONG", Toast.LENGTH_LONG);
params.put("ToastSHORT", Toast.LENGTH_SHORT);
return params;
}
/**
* RN向原生传递字符串
* @param s
*/
@ReactMethod
public void getStringFromReactNative(String s) {
Toast.makeText(mContext, s, Toast.LENGTH_SHORT).show();
}
/**
* RN向原生传递Int
* @param i
*/
@ReactMethod
public void getIntFromReactNative(Integer i) {
Toast.makeText(mContext, "" + i, Toast.LENGTH_SHORT).show();
}
/**
* RN向原生传递字典。这里原生端接收RN传过来的字典类型是ReadableMap
* @param map
*/
@ReactMethod
public void getDictionaryFromRN(ReadableMap map) {
System.out.print(map);
Toast.makeText(mContext, "已收到字典数据", Toast.LENGTH_SHORT).show();
}
/**
* RN向原生传递数组
* @param array
*/
@ReactMethod
public void getArrayFromRN(ReadableArray array) {
System.out.print(array);
Toast.makeText(mContext, "已收到数组数据", Toast.LENGTH_SHORT).show();
}
/**
* 原生通过回调的形式向RN端传递string
* @param callback
*/
@ReactMethod
public void passStringBackToRN(Callback callback) {
callback.invoke("This is a string from Native");
}
/**
* 原生通过回调的形式向RN端传递字典。这里传出去的字典类型必须是WritableMap,java中的Map、HashMap是不能传递到RN的
* @param callback
*/
@ReactMethod
public void passDictionaryBackToRN(Callback callback) {
WritableMap map = Arguments.createMap();
map.putString("name", "小明");
map.putInt("age", 20);
map.putString("gender", "male");
map.putBoolean("isGraduated", true);
callback.invoke(map);
}
@ReactMethod
public void passArrayBackToRN(Callback callback) {
WritableArray array = Arguments.createArray();
array.pushString("React Native");
array.pushString("Android");
array.pushString("iOS");
callback.invoke(array);
}
@ReactMethod
public void passPromiseBackToRN(String msg, Promise promise) {
if (!msg.equals("")) {
promise.resolve(true);
} else {
promise.reject("warning", "msg cannot be empty!");
}
}
@ReactMethod
public void jumpToNativeView() {
Intent intent = new Intent();
intent.setClass(mContext, TestActivity.class);
intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
mContext.startActivity(intent);
}
public void sendEvent(String eventName) {
String dataToRN = "这是发给RN的字符串";
mContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class).emit(eventName, dataToRN);
}
}
| 1,805 |
360 | <filename>examples/plot_sine_wave.py
"""
=============================
Plotting simple sine function
=============================
A simple example plotting a fit of the sine function.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y = 100 * \
(numpy.sin((X[:, 6])) - 4.0) + \
10 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth(max_degree=3, minspan_alpha=.5, verbose=True)
model.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
# Plot the model
y_hat = model.predict(X)
plt.plot(X[:, 6], y, 'r.')
plt.plot(X[:, 6], y_hat, 'b.')
plt.show()
| 283 |
563 | #include "stdafx.h"
#include "util_dll.h"
#include "util_static_lib2.h"
#include "sub folder/useless_static_lib2.h"
#include <external.h>
int main(int, char**)
{
std::cout << "Hello XCode World, from " CREATION_DATE "!" << std::endl;
PrintBuildString("Exe");
std::vector<int> someArray(5, 6);
// from dll1
UtilDll1 utilityDll;
utilityDll.ComputeSum(someArray);
// from static_lib2
Util2 utilityStatic;
utilityStatic.DoSomethingUseful();
StaticLib2::UselessMethod();
return 0;
}
| 219 |
340 | package com.zblog.core.feed;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import com.zblog.core.feed.Channel.Article;
import com.zblog.core.util.IndentXMLStreamWriter;
import com.zblog.core.util.StringUtils;
public class RssFeedWriter{
private RssFeedWriter(){
}
public static void write(Channel channel, OutputStream out) throws XMLStreamException{
XMLOutputFactory factory = XMLOutputFactory.newFactory();
XMLStreamWriter writer = null;
try{
writer = new IndentXMLStreamWriter(factory.createXMLStreamWriter(out, "UTF-8"));
SimpleDateFormat format = new SimpleDateFormat("EEE', 'dd' 'MMM' 'yyyy' 'HH:mm:ss' 'Z", Locale.US);
writer.writeStartDocument("UTF-8", "1.0");
writer.writeEndDocument();
writer.writeStartElement("rss");
writer.writeAttribute("version", "2.0");
writer.writeNamespace("content", "http://purl.org/rss/1.0/modules/content/");
writer.writeStartElement("channel");
createNode(writer, "title", channel.getTitle());
createNode(writer, "link", channel.getDomain());
createNode(writer, "description", channel.getDescription());
createNode(writer, "language", "zh-CN");
createNode(writer, "pubdate", format.format(new Date()));
if(!StringUtils.isBlank(channel.getLogoUrl())){
writer.writeStartElement("image");
createNode(writer, "link", channel.getDomain());
createNode(writer, "url", channel.getLogoUrl());
createNode(writer, "title", channel.getTitle());
writer.writeEndElement();
}
for(Article article : channel.getItems()){
writer.writeStartElement("item");
createNode(writer, "title", article.getTitle());
createNode(writer, "link", channel.getDomain() + article.getLink());
createNode(writer, "category", article.getCategory());
createNode(writer, "author", article.getAuthor());
createNode(writer, "description", article.getDescription());
createNode(writer, "content", "http://purl.org/rss/1.0/modules/content/", "encoded", article.getContent());
createNode(writer, "pubDate", format.format(article.getPubDate()));
createNode(writer, "guid", channel.getDomain() + article.getGuid());
createNode(writer, "comments", channel.getDomain() + article.getGuid() + "#comments");
writer.writeEndElement();
}
writer.writeEndElement();
writer.writeEndElement();
writer.flush();
}finally{
if(writer != null)
writer.close();
}
}
private static void createNode(XMLStreamWriter writer, String name, String value) throws XMLStreamException{
writer.writeStartElement(name);
writer.writeCharacters(value);
writer.writeEndElement();
}
private static void createNode(XMLStreamWriter writer, String prefix, String namespace, String name, String value)
throws XMLStreamException{
writer.writeStartElement(prefix, name, namespace);
writer.writeCData(value);
writer.writeEndElement();
}
}
| 1,145 |
1,073 | <gh_stars>1000+
//===-- Redeclarable.h - Base for Decls that can be redeclared -*- C++ -*-====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Redeclarable interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_REDECLARABLE_H
#define LLVM_CLANG_AST_REDECLARABLE_H
#include "clang/AST/ExternalASTSource.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Casting.h"
#include <iterator>
namespace clang {
class ASTContext;
/// \brief Provides common interface for the Decls that can be redeclared.
template<typename decl_type>
class Redeclarable {
protected:
class DeclLink {
/// A pointer to a known latest declaration, either statically known or
/// generationally updated as decls are added by an external source.
typedef LazyGenerationalUpdatePtr<const Decl*, Decl*,
&ExternalASTSource::CompleteRedeclChain>
KnownLatest;
/// We store a pointer to the ASTContext in the UninitializedLatest
/// pointer, but to avoid circular type dependencies when we steal the low
/// bits of this pointer, we use a raw void* here.
typedef const void *UninitializedLatest;
typedef Decl *Previous;
/// A pointer to either an uninitialized latest declaration (where either
/// we've not yet set the previous decl or there isn't one), or to a known
/// previous declaration.
typedef llvm::PointerUnion<Previous, UninitializedLatest> NotKnownLatest;
mutable llvm::PointerUnion<NotKnownLatest, KnownLatest> Next;
public:
enum PreviousTag { PreviousLink };
enum LatestTag { LatestLink };
DeclLink(LatestTag, const ASTContext &Ctx)
: Next(NotKnownLatest(reinterpret_cast<UninitializedLatest>(&Ctx))) {}
DeclLink(PreviousTag, decl_type *D)
: Next(NotKnownLatest(Previous(D))) {}
bool NextIsPrevious() const {
return Next.is<NotKnownLatest>() &&
// FIXME: 'template' is required on the next line due to an
// apparent clang bug.
Next.get<NotKnownLatest>().template is<Previous>();
}
bool NextIsLatest() const { return !NextIsPrevious(); }
decl_type *getNext(const decl_type *D) const {
if (Next.is<NotKnownLatest>()) {
NotKnownLatest NKL = Next.get<NotKnownLatest>();
if (NKL.is<Previous>())
return static_cast<decl_type*>(NKL.get<Previous>());
// Allocate the generational 'most recent' cache now, if needed.
Next = KnownLatest(*reinterpret_cast<const ASTContext *>(
NKL.get<UninitializedLatest>()),
const_cast<decl_type *>(D));
}
return static_cast<decl_type*>(Next.get<KnownLatest>().get(D));
}
void setPrevious(decl_type *D) {
assert(NextIsPrevious() && "decl became non-canonical unexpectedly");
Next = Previous(D);
}
void setLatest(decl_type *D) {
assert(NextIsLatest() && "decl became canonical unexpectedly");
if (Next.is<NotKnownLatest>()) {
NotKnownLatest NKL = Next.get<NotKnownLatest>();
Next = KnownLatest(*reinterpret_cast<const ASTContext *>(
NKL.get<UninitializedLatest>()),
D);
} else {
auto Latest = Next.get<KnownLatest>();
Latest.set(D);
Next = Latest;
}
}
void markIncomplete() { Next.get<KnownLatest>().markIncomplete(); }
Decl *getLatestNotUpdated() const {
assert(NextIsLatest() && "expected a canonical decl");
if (Next.is<NotKnownLatest>())
return nullptr;
return Next.get<KnownLatest>().getNotUpdated();
}
};
static DeclLink PreviousDeclLink(decl_type *D) {
return DeclLink(DeclLink::PreviousLink, D);
}
static DeclLink LatestDeclLink(const ASTContext &Ctx) {
return DeclLink(DeclLink::LatestLink, Ctx);
}
/// \brief Points to the next redeclaration in the chain.
///
/// If NextIsPrevious() is true, this is a link to the previous declaration
/// of this same Decl. If NextIsLatest() is true, this is the first
/// declaration and Link points to the latest declaration. For example:
///
/// #1 int f(int x, int y = 1); // <pointer to #3, true>
/// #2 int f(int x = 0, int y); // <pointer to #1, false>
/// #3 int f(int x, int y) { return x + y; } // <pointer to #2, false>
///
/// If there is only one declaration, it is <pointer to self, true>
DeclLink RedeclLink;
decl_type *First;
decl_type *getNextRedeclaration() const {
return RedeclLink.getNext(static_cast<const decl_type *>(this));
}
public:
Redeclarable(const ASTContext &Ctx)
: RedeclLink(LatestDeclLink(Ctx)), First(static_cast<decl_type *>(this)) {}
/// \brief Return the previous declaration of this declaration or NULL if this
/// is the first declaration.
decl_type *getPreviousDecl() {
if (RedeclLink.NextIsPrevious())
return getNextRedeclaration();
return nullptr;
}
const decl_type *getPreviousDecl() const {
return const_cast<decl_type *>(
static_cast<const decl_type*>(this))->getPreviousDecl();
}
/// \brief Return the first declaration of this declaration or itself if this
/// is the only declaration.
decl_type *getFirstDecl() { return First; }
/// \brief Return the first declaration of this declaration or itself if this
/// is the only declaration.
const decl_type *getFirstDecl() const { return First; }
/// \brief True if this is the first declaration in its redeclaration chain.
bool isFirstDecl() const { return RedeclLink.NextIsLatest(); }
/// \brief Returns the most recent (re)declaration of this declaration.
decl_type *getMostRecentDecl() {
return getFirstDecl()->getNextRedeclaration();
}
/// \brief Returns the most recent (re)declaration of this declaration.
const decl_type *getMostRecentDecl() const {
return getFirstDecl()->getNextRedeclaration();
}
/// \brief Set the previous declaration. If PrevDecl is NULL, set this as the
/// first and only declaration.
void setPreviousDecl(decl_type *PrevDecl);
/// \brief Iterates through all the redeclarations of the same decl.
class redecl_iterator {
/// Current - The current declaration.
decl_type *Current;
decl_type *Starter;
bool PassedFirst;
public:
typedef decl_type* value_type;
typedef decl_type* reference;
typedef decl_type* pointer;
typedef std::forward_iterator_tag iterator_category;
typedef std::ptrdiff_t difference_type;
redecl_iterator() : Current(nullptr) { }
explicit redecl_iterator(decl_type *C)
: Current(C), Starter(C), PassedFirst(false) { }
reference operator*() const { return Current; }
pointer operator->() const { return Current; }
redecl_iterator& operator++() {
assert(Current && "Advancing while iterator has reached end");
// Sanity check to avoid infinite loop on invalid redecl chain.
if (Current->isFirstDecl()) {
if (PassedFirst) {
assert(0 && "Passed first decl twice, invalid redecl chain!");
Current = nullptr;
return *this;
}
PassedFirst = true;
}
// Get either previous decl or latest decl.
decl_type *Next = Current->getNextRedeclaration();
Current = (Next != Starter) ? Next : nullptr;
return *this;
}
redecl_iterator operator++(int) {
redecl_iterator tmp(*this);
++(*this);
return tmp;
}
friend bool operator==(redecl_iterator x, redecl_iterator y) {
return x.Current == y.Current;
}
friend bool operator!=(redecl_iterator x, redecl_iterator y) {
return x.Current != y.Current;
}
};
typedef llvm::iterator_range<redecl_iterator> redecl_range;
/// \brief Returns an iterator range for all the redeclarations of the same
/// decl. It will iterate at least once (when this decl is the only one).
redecl_range redecls() const {
return redecl_range(redecl_iterator(const_cast<decl_type *>(
static_cast<const decl_type *>(this))),
redecl_iterator());
}
redecl_iterator redecls_begin() const { return redecls().begin(); }
redecl_iterator redecls_end() const { return redecls().end(); }
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
/// \brief Get the primary declaration for a declaration from an AST file. That
/// will be the first-loaded declaration.
Decl *getPrimaryMergedDecl(Decl *D);
/// \brief Provides common interface for the Decls that cannot be redeclared,
/// but can be merged if the same declaration is brought in from multiple
/// modules.
template<typename decl_type>
class Mergeable {
public:
Mergeable() {}
/// \brief Return the first declaration of this declaration or itself if this
/// is the only declaration.
decl_type *getFirstDecl() {
decl_type *D = static_cast<decl_type*>(this);
if (!D->isFromASTFile())
return D;
return cast<decl_type>(getPrimaryMergedDecl(const_cast<decl_type*>(D)));
}
/// \brief Return the first declaration of this declaration or itself if this
/// is the only declaration.
const decl_type *getFirstDecl() const {
const decl_type *D = static_cast<const decl_type*>(this);
if (!D->isFromASTFile())
return D;
return cast<decl_type>(getPrimaryMergedDecl(const_cast<decl_type*>(D)));
}
/// \brief Returns true if this is the first declaration.
bool isFirstDecl() const { return getFirstDecl() == this; }
};
}
#endif
| 3,551 |
18,012 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.Configuration;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.extension.Activate;
import org.apache.dubbo.common.extension.ExtensionLoader;
import org.apache.dubbo.common.threadpool.manager.ExecutorRepository;
import org.apache.dubbo.remoting.api.Http2WireProtocol;
import org.apache.dubbo.rpc.HeaderFilter;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.model.FrameworkModel;
import org.apache.dubbo.rpc.model.ScopeModelAware;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleClientHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleCommandOutBoundHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleHttp2FrameServerHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleServerConnectionHandler;
import org.apache.dubbo.rpc.protocol.tri.transport.TripleTailHandler;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.http2.Http2FrameCodec;
import io.netty.handler.codec.http2.Http2FrameCodecBuilder;
import io.netty.handler.codec.http2.Http2MultiplexHandler;
import io.netty.handler.codec.http2.Http2Settings;
import io.netty.handler.ssl.SslContext;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Executor;
import static org.apache.dubbo.common.constants.CommonConstants.HEADER_FILTER_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_ENABLE_PUSH_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_HEADER_TABLE_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_FRAME_SIZE_KEY;
import static org.apache.dubbo.rpc.Constants.H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY;
@Activate
public class TripleHttp2Protocol extends Http2WireProtocol implements ScopeModelAware {
// 1 MiB
private static final int MIB_1 = 1 << 20;
private static final int MIB_8 = 1 << 23;
private static final int KIB_32 = 1 << 15;
private static final int DEFAULT_MAX_HEADER_LIST_SIZE = KIB_32;
private static final int DEFAULT_SETTING_HEADER_LIST_SIZE = 4096;
private static final int DEFAULT_MAX_FRAME_SIZE = MIB_8;
private static final int DEFAULT_WINDOW_INIT_SIZE = MIB_8;
private ExtensionLoader<HeaderFilter> filtersLoader;
private FrameworkModel frameworkModel;
private Configuration config = ConfigurationUtils.getGlobalConfiguration(
ApplicationModel.defaultModel());
@Override
public void setFrameworkModel(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
}
@Override
public void setApplicationModel(ApplicationModel applicationModel) {
this.config = ConfigurationUtils.getGlobalConfiguration(applicationModel);
this.filtersLoader = applicationModel.getExtensionLoader(HeaderFilter.class);
}
@Override
public void close() {
super.close();
}
@Override
public void configServerPipeline(URL url, ChannelPipeline pipeline, SslContext sslContext) {
final List<HeaderFilter> headFilters;
if (filtersLoader != null) {
headFilters = filtersLoader.getActivateExtension(url,
HEADER_FILTER_KEY);
} else {
headFilters = Collections.emptyList();
}
final Http2FrameCodec codec = Http2FrameCodecBuilder.forServer()
.gracefulShutdownTimeoutMillis(10000)
.initialSettings(new Http2Settings().headerTableSize(
config.getInt(H2_SETTINGS_HEADER_TABLE_SIZE_KEY, DEFAULT_SETTING_HEADER_LIST_SIZE))
.maxConcurrentStreams(
config.getInt(H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY, Integer.MAX_VALUE))
.initialWindowSize(
config.getInt(H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY, DEFAULT_WINDOW_INIT_SIZE))
.maxFrameSize(config.getInt(H2_SETTINGS_MAX_FRAME_SIZE_KEY, DEFAULT_MAX_FRAME_SIZE))
.maxHeaderListSize(config.getInt(H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY,
DEFAULT_MAX_HEADER_LIST_SIZE)))
.frameLogger(SERVER_LOGGER)
.build();
final Http2MultiplexHandler handler = new Http2MultiplexHandler(
new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
final ChannelPipeline p = ch.pipeline();
p.addLast(new TripleCommandOutBoundHandler());
p.addLast(new TripleHttp2FrameServerHandler(frameworkModel, lookupExecutor(url),
headFilters));
}
});
pipeline.addLast(codec, new TripleServerConnectionHandler(), handler,
new TripleTailHandler());
}
private Executor lookupExecutor(URL url) {
return url.getOrDefaultApplicationModel()
.getExtensionLoader(ExecutorRepository.class)
.getDefaultExtension().getExecutor(url);
}
@Override
public void configClientPipeline(URL url, ChannelPipeline pipeline, SslContext sslContext) {
final Http2FrameCodec codec = Http2FrameCodecBuilder.forClient()
.gracefulShutdownTimeoutMillis(10000)
.initialSettings(new Http2Settings().headerTableSize(
config.getInt(H2_SETTINGS_HEADER_TABLE_SIZE_KEY, DEFAULT_SETTING_HEADER_LIST_SIZE))
.pushEnabled(config.getBoolean(H2_SETTINGS_ENABLE_PUSH_KEY, false))
.maxConcurrentStreams(
config.getInt(H2_SETTINGS_MAX_CONCURRENT_STREAMS_KEY, Integer.MAX_VALUE))
.initialWindowSize(
config.getInt(H2_SETTINGS_INITIAL_WINDOW_SIZE_KEY, DEFAULT_WINDOW_INIT_SIZE))
.maxFrameSize(config.getInt(H2_SETTINGS_MAX_FRAME_SIZE_KEY, DEFAULT_MAX_FRAME_SIZE))
.maxHeaderListSize(config.getInt(H2_SETTINGS_MAX_HEADER_LIST_SIZE_KEY,
DEFAULT_MAX_HEADER_LIST_SIZE)))
.frameLogger(CLIENT_LOGGER)
.build();
final Http2MultiplexHandler handler = new Http2MultiplexHandler(
new TripleClientHandler(frameworkModel));
pipeline.addLast(codec, handler, new TripleTailHandler());
}
}
| 2,936 |
483 | <reponame>cameronbronstein/notebooks<filename>dev/utils.py
from _ctypes import PyObj_FromPtr
from copy import deepcopy
import json
import re
import numbers
import geojson
from geojson import Polygon, Feature, FeatureCollection
# https://stackoverflow.com/questions/13249415/how-to-implement-custom-indentation-when-pretty-printing-with-the-json-module
class NoIndentCoordinate(object):
""" Value wrapper. """
def __init__(self, value):
self.value = value
class CoordinateEncoder(geojson.GeoJSONEncoder):
FORMAT_SPEC = '@@{}@@'
regex = re.compile(FORMAT_SPEC.format(r'(\d+)'))
def __init__(self, **kwargs):
# Save copy of any keyword argument values needed for use here.
self.__sort_keys = kwargs.get('sort_keys', None)
super(CoordinateEncoder, self).__init__(**kwargs)
def default(self, obj):
val = (self.FORMAT_SPEC.format(id(obj))
if isinstance(obj, NoIndentCoordinate)
else super(CoordinateEncoder, self).default(obj))
return val
def encode(self, obj):
format_spec = self.FORMAT_SPEC # Local var to expedite access.
json_repr = super(CoordinateEncoder, self).encode(obj) # Default JSON.
# Replace any marked-up object ids in the JSON repr with the
# value returned from the json.dumps() of the corresponding
# wrapped Python object.
for match in self.regex.finditer(json_repr):
# see https://stackoverflow.com/a/15012814/355230
id = int(match.group(1))
no_indent = PyObj_FromPtr(id)
json_obj_repr = json.dumps(no_indent.value, sort_keys=self.__sort_keys)
# Replace the matched id string with json formatted representation
# of the corresponding Python object.
json_repr = json_repr.replace(
'"{}"'.format(format_spec.format(id)), json_obj_repr)
return json_repr
class CompactFeature(Feature):
def __repr__(self):
return geojson.dumps(self.specify_coords(), cls=CoordinateEncoder, indent=4)
__str__ = __repr__
def specify_coords(self):
def is_coords(c):
'''Answering: is this object a set of 2D coordinates?'''
if len(c) != 2:
return False
for v in c:
if not isinstance(v, numbers.Number):
return False
return True
def noindent_coords(coords):
'''Find the coordinates and '''
if is_coords(coords):
return NoIndentCoordinate(coords)
else:
for i, c in enumerate(coords):
coords[i] = noindent_coords(c)
return coords
try:
p = deepcopy(self)
p['geometry']['coordinates'] = noindent_coords(p['geometry']['coordinates'])
except (KeyError, TypeError):
pass
return p
class CompactFeatureCollection(FeatureCollection):
def __init__(self, **extras):
super(FeatureCollection, self).__init__(**extras)
self.type = 'FeatureCollection'
def __repr__(self):
return geojson.dumps(self.specify_coords(), cls=CoordinateEncoder, indent=4)
__str__ = __repr__
def specify_coords(self):
try:
p = deepcopy(self)
p.features = [f.specify_coords() for f in p.features]
except (KeyError, TypeError):
pass
return p
| 1,604 |
852 | <reponame>ckamtsikis/cmssw
from PhysicsTools.HeppyCore.framework.analyzer import Analyzer
from PhysicsTools.HeppyCore.statistics.tree import Tree
from ROOT import TFile
class SimpleTreeProducer(Analyzer):
def beginLoop(self, setup):
super(SimpleTreeProducer, self).beginLoop(setup)
self.rootfile = TFile('/'.join([self.dirName,
'simple_tree.root']),
'recreate')
self.tree = Tree( self.cfg_ana.tree_name,
self.cfg_ana.tree_title )
self.tree.var('test_variable')
def process(self, event):
self.tree.fill('test_variable', event.input.var1)
self.tree.tree.Fill()
def write(self, setup):
self.rootfile.Write()
self.rootfile.Close()
| 385 |
1,538 | /*
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: <NAME>
*/
#include "runtime/flet.h"
#include "kernel/instantiate.h"
#include "kernel/abstract.h"
#include "kernel/for_each_fn.h"
#include "kernel/type_checker.h"
#include "kernel/inductive.h"
#include "library/trace.h"
#include "library/class.h"
#include "library/compiler/util.h"
#include "library/compiler/csimp.h"
#include "library/compiler/closed_term_cache.h"
namespace lean {
extern "C" object* lean_mk_eager_lambda_lifting_name(object* n, object* idx);
extern "C" uint8 lean_is_eager_lambda_lifting_name(object* n);
name mk_elambda_lifting_name(name const & fn, unsigned idx) {
return name(lean_mk_eager_lambda_lifting_name(fn.to_obj_arg(), mk_nat_obj(idx)));
}
bool is_elambda_lifting_name(name fn) {
return lean_is_eager_lambda_lifting_name(fn.to_obj_arg());
}
/* Return true iff `e` contains a free variable that is not in `exception_set`. */
static bool has_fvar_except(expr const & e, name_set const & exception_set) {
if (!has_fvar(e)) return false;
bool found = false;
for_each(e, [&](expr const & e, unsigned) {
if (!has_fvar(e)) return false;
if (found) return false; // done
if (is_fvar(e) && !exception_set.contains(fvar_name(e))) {
found = true;
return false; // done
}
return true;
});
return found;
}
/* Return true if the type of a parameter in `params` depends on `fvar`. */
static bool depends_on_fvar(local_ctx const & lctx, buffer<expr> const & params, expr const & fvar) {
for (expr const & param : params) {
local_decl const & decl = lctx.get_local_decl(param);
lean_assert(!decl.get_value());
if (has_fvar(decl.get_type(), fvar))
return true;
}
return false;
}
/*
We eagerly lift lambda expressions that are stored in terminal constructors.
We say a constructor application is terminal if it is the result/returned.
We use this transformation to generate good code for the following scenario:
Suppose we have a definition
```
def f (x : nat) : Pro (Nat -> Nat) (Nat -> Bool) :=
((fun y, <code1 using x y>), (fun z, <code2 using x z>))
```
That is, `f` is "packing" functions in a structure and returning it.
Now, consider the following application:
```
(f a).1 b
```
Without eager lambda lifting, `f a` will create two closures and one pair.
Then, we project the first closure in the pair and apply it to `b`.
This is inefficient. If `f` is small, we can workaround this problem by inlining
`f`. However, if inlining is not feasible, we would have to perform all memory allocations.
This is particularly bad, if `f` is a structure with many fields.
With eager lambda lifting, we transform `f` into
```
def f._elambda_1 (x y) : Nat :=
<code1 using x y>
def f._elambda_2 (x z) : Bool :=
<code2 using x z>
def f (x : nat) : Pro (Nat -> Nat) (Nat -> Bool) :=
(f._elambda_1 x, f._elambda_2 x)
```
Then, when the simplifier sees `(f a).1 b`, it can reduce it to `f._elambda_1 a b`,
and closure and pair allocations are avoided.
Note that we do not lift all nested lambdas here, only the ones in terminal constructors.
Premature lambda lifting may hurt performance in the non-terminal case. Example:
```
def f (xs : List Nat) :=
let g := fun x, x + x in
List.map g xs
```
We want to keep `fun x, x+x` until we specialize `f`.
Remark: we also skip this transformation for definitions marked as `[inline]` or `[instance]`.
*/
class eager_lambda_lifting_fn {
type_checker::state m_st;
csimp_cfg m_cfg;
local_ctx m_lctx;
buffer<comp_decl> m_new_decls;
name m_base_name;
name_set m_closed_fvars; /* let-declarations that only depend on global constants and other closed_fvars */
name_set m_terminal_lambdas;
name_set m_nonterminal_lambdas;
unsigned m_next_idx{1};
environment const & env() const { return m_st.env(); }
name_generator & ngen() { return m_st.ngen(); }
expr eta_expand(expr const & e) {
return lcnf_eta_expand(m_st, m_lctx, e);
}
name next_name() {
name r = mk_elambda_lifting_name(m_base_name, m_next_idx);
m_next_idx++;
return r;
}
bool collect_fvars_core(expr const & e, name_set & collected, buffer<expr> & fvars) {
if (!has_fvar(e)) return true;
bool ok = true;
for_each(e, [&](expr const & x, unsigned) {
if (!has_fvar(x)) return false;
if (!ok) return false;
if (is_fvar(x)) {
if (!collected.contains(fvar_name(x))) {
collected.insert(fvar_name(x));
local_decl d = m_lctx.get_local_decl(x);
/* We do not eagerly lift a lambda if we need to copy a join-point.
Remark: we may revise this decision in the future, and use the same
approach we use at `lambda_lifting.cpp`.
*/
if (is_join_point_name(d.get_user_name())) {
ok = false;
return false;
} else {
if (!collect_fvars_core(d.get_type(), collected, fvars)) {
ok = false;
return false;
}
if (m_closed_fvars.contains(fvar_name(x))) {
/* If x only depends on global constants and other variables in m_closed_fvars.
Then, we also collect the other variables at m_closed_fvars. */
if (!collect_fvars_core(*d.get_value(), collected, fvars)) {
ok = false;
return false;
}
}
fvars.push_back(x);
}
}
}
return true;
});
return ok;
}
bool collect_fvars(expr const & e, buffer<expr> & fvars) {
if (!has_fvar(e)) return true;
name_set collected;
if (collect_fvars_core(e, collected, fvars)) {
sort_fvars(m_lctx, fvars);
return true;
} else {
return false;
}
}
/* Split fvars in two groups: `new_params` and `to_copy`.
We put a fvar `x` in `new_params` if it is not a let declaration,
or a variable in `params` depend on `x`, or it is not in `m_closed_fvars`.
The variables in `to_copy` are variables that depend only on
global constants or other variables in `to_copy`, and `params` do not depend on them. */
void split_fvars(buffer<expr> const & fvars, buffer<expr> const & params, buffer<expr> & new_params, buffer<expr> & to_copy) {
for (expr const & fvar : fvars) {
local_decl const & decl = m_lctx.get_local_decl(fvar);
if (!decl.get_value()) {
new_params.push_back(fvar);
} else {
if (!m_closed_fvars.contains(fvar_name(fvar)) || depends_on_fvar(m_lctx, params, fvar)) {
new_params.push_back(fvar);
} else {
to_copy.push_back(fvar);
}
}
}
}
expr lift_lambda(expr e, bool apply_simp) {
/* Hack: We use `try` here because previous compilation steps may have
produced type incorrect terms. */
try {
lean_assert(is_lambda(e));
buffer<expr> fvars;
if (!collect_fvars(e, fvars)) {
return e;
}
buffer<expr> params;
while (is_lambda(e)) {
expr param_type = instantiate_rev(binding_domain(e), params.size(), params.data());
expr param = m_lctx.mk_local_decl(ngen(), binding_name(e), param_type, binding_info(e));
params.push_back(param);
e = binding_body(e);
}
e = instantiate_rev(e, params.size(), params.data());
buffer<expr> new_params, to_copy;
split_fvars(fvars, params, new_params, to_copy);
/*
Variables in `to_copy` only depend on global constants
and other variables in `to_copy`. Moreover, `params` do not depend on them.
It is wasteful to pass them as new parameters to the new lifted declaration.
We can just copy them. The code duplication is not problematic because later at `extract_closed`
we will create global names for closed terms, and eliminate the redundancy.
*/
e = m_lctx.mk_lambda(to_copy, e);
e = m_lctx.mk_lambda(params, e);
expr code = abstract(e, new_params.size(), new_params.data());
unsigned i = new_params.size();
while (i > 0) {
--i;
local_decl const & decl = m_lctx.get_local_decl(new_params[i]);
expr type = abstract(decl.get_type(), i, new_params.data());
code = ::lean::mk_lambda(decl.get_user_name(), type, code);
}
if (apply_simp) {
code = csimp(env(), code, m_cfg);
}
expr type = cheap_beta_reduce(type_checker(m_st).infer(code));
name n = next_name();
/* We add the auxiliary declaration `n` as a "meta" axiom to the environment.
This is a hack to make sure we can use `csimp` to simplify `code` and
other definitions that use `n`.
We used a similar hack at `specialize.cpp`. */
declaration aux_ax = mk_axiom(n, names(), type, true /* meta */);
m_st.env() = env().add(aux_ax, false);
m_new_decls.push_back(comp_decl(n, code));
return mk_app(mk_constant(n), new_params);
} catch (exception &) {
return e;
}
}
/* Given a free variable `x`, follow let-decls and return a pair `(x, v)`.
Examples for `find(x)`
- `x := 1` ==> `(x, 1)`
- `z := (fun w, w+1); y := z; x := y` ==> `(z, (fun w, w+1))`
- `z := f a; y := mdata kv z; x := y` ==> `(z, f a)`
*/
pair<name, expr> find(expr const & x) const {
lean_assert(is_fvar(x));
expr e = x;
name r = fvar_name(x);
while (true) {
if (is_mdata(e)) {
e = mdata_expr(e);
} else if (is_fvar(e)) {
r = fvar_name(e);
optional<local_decl> decl = m_lctx.find_local_decl(e);
lean_assert(decl);
if (optional<expr> v = decl->get_value()) {
if (is_join_point_name(decl->get_user_name())) {
return mk_pair(r, e);
} else {
e = *v;
}
} else {
return mk_pair(r, e);
}
} else {
return mk_pair(r, e);
}
}
}
expr visit_lambda_core(expr e) {
flet<local_ctx> save_lctx(m_lctx, m_lctx);
buffer<expr> fvars;
while (is_lambda(e)) {
expr new_type = instantiate_rev(binding_domain(e), fvars.size(), fvars.data());
expr new_fvar = m_lctx.mk_local_decl(ngen(), binding_name(e), new_type, binding_info(e));
fvars.push_back(new_fvar);
e = binding_body(e);
}
expr r = visit_terminal(instantiate_rev(e, fvars.size(), fvars.data()));
return m_lctx.mk_lambda(fvars, r);
}
expr visit_let(expr e) {
flet<local_ctx> save_lctx(m_lctx, m_lctx);
buffer<expr> fvars;
while (is_let(e)) {
bool not_root = false;
bool jp = is_join_point_name(let_name(e));
expr new_type = instantiate_rev(let_type(e), fvars.size(), fvars.data());
expr new_val = visit(instantiate_rev(let_value(e), fvars.size(), fvars.data()), not_root, jp);
expr new_fvar = m_lctx.mk_local_decl(ngen(), let_name(e), new_type, new_val);
if (!has_fvar_except(new_type, m_closed_fvars) && !has_fvar_except(new_val, m_closed_fvars)) {
m_closed_fvars.insert(fvar_name(new_fvar));
}
fvars.push_back(new_fvar);
e = let_body(e);
}
expr r = visit_terminal(instantiate_rev(e, fvars.size(), fvars.data()));
r = abstract(r, fvars.size(), fvars.data());
unsigned i = fvars.size();
while (i > 0) {
--i;
name const & n = fvar_name(fvars[i]);
local_decl const & decl = m_lctx.get_local_decl(n);
expr type = abstract(decl.get_type(), i, fvars.data());
expr val = *decl.get_value();
if (m_terminal_lambdas.contains(n) && !m_nonterminal_lambdas.contains(n)) {
expr new_val = eta_expand(val);
lean_assert(is_lambda(new_val));
bool apply_simp = new_val != val;
val = lift_lambda(new_val, apply_simp);
}
r = ::lean::mk_let(decl.get_user_name(), type, abstract(val, i, fvars.data()), r);
}
return r;
}
expr visit_cases_on(expr const & e) {
lean_assert(is_cases_on_app(env(), e));
buffer<expr> args;
expr const & c = get_app_args(e, args);
/* Remark: eager lambda lifting is applied before we have erased most type information. */
unsigned minor_idx; unsigned minors_end;
bool before_erasure = true;
std::tie(minor_idx, minors_end) = get_cases_on_minors_range(env(), const_name(c), before_erasure);
for (; minor_idx < minors_end; minor_idx++) {
args[minor_idx] = visit_lambda_core(args[minor_idx]);
}
return mk_app(c, args);
}
expr visit_app(expr const & e) {
if (is_cases_on_app(env(), e)) {
return visit_cases_on(e);
} else {
buffer<expr> args;
get_app_args(e, args);
for (expr const & arg : args) {
if (is_fvar(arg)) {
name x; expr v;
std::tie(x, v) = find(arg);
if (is_lambda(v)) {
m_nonterminal_lambdas.insert(x);
}
}
}
return e;
}
}
expr visit_lambda(expr const & e, bool root, bool join_point) {
if (root || join_point)
return visit_lambda_core(e);
else
return e;
}
expr visit(expr const & e, bool root = false, bool join_point = false) {
switch (e.kind()) {
case expr_kind::App: return visit_app(e);
case expr_kind::Lambda: return visit_lambda(e, root, join_point);
case expr_kind::Let: return visit_let(e);
default: return e;
}
}
expr visit_terminal(expr const & e) {
expr t = is_fvar(e) ? find(e).second : e;
if (is_constructor_app(env(), t)) {
buffer<expr> args;
get_app_args(e, args);
for (expr const & arg : args) {
if (is_fvar(arg)) {
name x; expr v;
std::tie(x, v) = find(arg);
v = eta_expand(v);
if (is_lambda(v)) {
m_terminal_lambdas.insert(x);
}
}
}
return e;
} else {
return visit(e);
}
}
public:
eager_lambda_lifting_fn(environment const & env, csimp_cfg const & cfg):
m_st(env), m_cfg(cfg) {}
pair<environment, comp_decls> operator()(comp_decl const & cdecl) {
m_base_name = cdecl.fst();
expr r = visit(cdecl.snd(), true);
comp_decl new_cdecl(cdecl.fst(), r);
m_new_decls.push_back(new_cdecl);
return mk_pair(env(), comp_decls(m_new_decls));
}
};
pair<environment, comp_decls> eager_lambda_lifting(environment env, comp_decls const & ds, csimp_cfg const & cfg) {
comp_decls r;
for (comp_decl const & d : ds) {
if (has_inline_attribute(env, d.fst()) || is_instance(env, d.fst())) {
r = append(r, comp_decls(d));
} else {
comp_decls new_ds;
std::tie(env, new_ds) = eager_lambda_lifting_fn(env, cfg)(d);
r = append(r, new_ds);
}
}
return mk_pair(env, r);
}
}
| 8,576 |
704 | <gh_stars>100-1000
package com.netflix.governator.commons_cli.modules;
import java.lang.annotation.Annotation;
import java.util.List;
import javax.inject.Inject;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.Parser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.inject.AbstractModule;
import com.google.inject.Provider;
import com.google.inject.ProvisionException;
import com.google.inject.Singleton;
import com.google.inject.binder.AnnotatedBindingBuilder;
import com.google.inject.name.Names;
import com.netflix.governator.annotations.binding.Main;
import com.netflix.governator.commons_cli.providers.StringOptionProvider;
/**
* Guicify Apache Commons CLI.
*
* Usages
*
* <pre>
* {code
*
* // When creating Guice
*
* install(new OptionsModule() {
* protected void configure() {
* option("f")
* .hasArg()
* .withLongOpt("filename")
* .annotatedWith(Filename.class); // no need to call create()
*
* }
* })
*
* // Inject into any class
*
* @Singleton
* public class MyService {
* @Inject
* public MyService(@Filename String filename) {
* }
* }
*
* // You can also inject CommandLine directly
*
* @Singleton
* public class MyService {
* @Inject
* public MyService(CommandLine commandLine) {
* }
* }
*
* }
* </pre>
*
* @author elandau
*
*/
public abstract class OptionsModule extends AbstractModule {
private static final Logger LOG = LoggerFactory.getLogger(OptionsModule.class);
private List<OptionBuilder> builders = Lists.newArrayList();
private boolean parserIsBound = false;
/**
* Non-static version of commons CLI OptionBuilder
*
* @author elandau
*/
protected class OptionBuilder {
private String longopt;
private String description;
private String argName;
private boolean required;
private int numberOfArgs = Option.UNINITIALIZED;
private Object type;
private boolean optionalArg;
private char valuesep;
private String shortopt;
private String defaultValue;
private Class<? extends Annotation> annot;
public OptionBuilder annotatedWith(Class<? extends Annotation> annot) {
this.annot = annot;
return this;
}
public OptionBuilder withLongOpt(String longopt) {
this.longopt = longopt;
return this;
}
public OptionBuilder withShortOpt(char shortopt) {
this.shortopt = Character.toString(shortopt);
return this;
}
public OptionBuilder hasArg() {
this.numberOfArgs = 1;
return this;
}
public OptionBuilder hasArg(boolean hasArg) {
this.numberOfArgs = hasArg ? 1 : Option.UNINITIALIZED;
return this;
}
public OptionBuilder withArgName(String name) {
this.argName = name;
return this;
}
public OptionBuilder isRequired() {
this.required = true;
return this;
}
public OptionBuilder withValueSeparator(char sep) {
this.valuesep = sep;
return this;
}
public OptionBuilder withValueSeparator() {
this.valuesep = '=';
return this;
}
public OptionBuilder isRequired(boolean newRequired) {
this.required = newRequired;
return this;
}
public OptionBuilder hasArgs() {
this.numberOfArgs = Option.UNLIMITED_VALUES;
return this;
}
public OptionBuilder hasArgs(int num) {
this.numberOfArgs = num;
return this;
}
public OptionBuilder hasOptionalArg() {
this.numberOfArgs = 1;
this.optionalArg = true;
return this;
}
public OptionBuilder hasOptionalArgs() {
this.numberOfArgs = Option.UNLIMITED_VALUES;
this.optionalArg = true;
return this;
}
public OptionBuilder hasOptionalArgs(int numArgs) {
this.numberOfArgs = numArgs;
this.optionalArg = true;
return this;
}
public OptionBuilder withType(Object newType) {
this.type = newType;
return this;
}
public OptionBuilder withDescription(String newDescription) {
this.description = newDescription;
return this;
}
public OptionBuilder withDefaultValue(String defaultValue) {
this.defaultValue = defaultValue;
return this;
}
Option create() throws IllegalArgumentException
{
Preconditions.checkNotNull(shortopt);
Option option = null;
// create the option
option = new Option(shortopt, description);
// set the option properties
option.setLongOpt(longopt);
option.setRequired(required);
option.setOptionalArg(optionalArg);
option.setArgs(numberOfArgs);
option.setType(type);
option.setValueSeparator(valuesep);
option.setArgName(argName);
// return the Option instance
return option;
}
}
/**
* On injection of CommandLine execute the BasicParser
* @author elandau
*/
@Singleton
public static class CommandLineProvider implements Provider<CommandLine> {
private final Options options;
private final String[] arguments;
private final Parser parser;
@Inject
public CommandLineProvider(Options options, @Main String[] arguments, Parser parser) {
this.options = options;
this.arguments = arguments;
this.parser = parser;
}
@Override
public CommandLine get() {
try {
return parser.parse(options, arguments);
} catch (ParseException e) {
throw new ProvisionException("Error parsing command line arguments", e);
}
}
}
@Override
protected final void configure() {
configureOptions();
Options options = new Options();
for (OptionBuilder builder : builders) {
Option option = builder.create();
if (builder.annot != null) {
bind(String.class)
.annotatedWith(builder.annot)
.toProvider(new StringOptionProvider(option, builder.defaultValue))
.asEagerSingleton();
LOG.info("Binding option to annotation : " + builder.annot.getName());
}
else {
bind(String.class)
.annotatedWith(Names.named(option.getOpt()))
.toProvider(new StringOptionProvider(option, builder.defaultValue))
.asEagerSingleton();
LOG.info("Binding option to String : " + option.getOpt());
}
options.addOption(option);
}
bind(Options.class).toInstance(options);
bind(CommandLine.class).toProvider(CommandLineProvider.class);
if (!parserIsBound) {
bindParser().to(BasicParser.class);
}
}
protected abstract void configureOptions();
/**
* @param shortopt
* @return Return a builder through which a single option may be configured
*/
protected OptionBuilder option(char shortopt) {
OptionBuilder builder = new OptionBuilder().withShortOpt(shortopt);
builders.add(builder);
return builder;
}
/**
* Bind any parser. BasicParser is used by default if no other parser is provided.
*/
protected AnnotatedBindingBuilder<Parser> bindParser() {
parserIsBound = true;
return bind(Parser.class);
}
}
| 3,641 |
1,447 | <reponame>Kadantte/VideoSuperResolution
# Copyright (c): <NAME> 2017-2019.
# Author: <NAME>
# Email: <EMAIL>
# Update Date: 2019/5/21 下午4:56
import torch
import torch.nn as nn
import torch.nn.functional as F
from VSR.Backend.Torch.Models.Ops.Loss import total_variance
from .Model import SuperResolution
from .Ops.Blocks import EasyConv2d
from .Ops.Scale import Upsample
from ..Framework.Summary import get_writer
from ..Util import Metrics
class NoiseExtractor(nn.Module):
def __init__(self, channel=3, layers=8, bn=False, **kwargs):
super(NoiseExtractor, self).__init__()
f = kwargs.get('filters', 32)
ks = kwargs.get('kernel_size', 3)
convs = [EasyConv2d(channel, f, ks, use_bn=bn, activation='lrelu')]
for i in range(1, layers - 1):
convs += [EasyConv2d(f, f, ks, use_bn=bn, activation='lrelu')]
convs += [EasyConv2d(f, channel, ks)]
self.body = nn.Sequential(*convs)
def forward(self, x):
return self.body(x)
class NoiseShifter(nn.Module):
def __init__(self, channel=3, layers=8, bn=False, **kwargs):
super(NoiseShifter, self).__init__()
f = kwargs.get('filters', 32)
ks = kwargs.get('kernel_size', 3)
convs = [EasyConv2d(channel, f, ks, use_bn=bn, activation='lrelu')]
for i in range(1, layers - 1):
convs += [EasyConv2d(f, f, ks, use_bn=bn, activation='lrelu')]
convs += [EasyConv2d(f, channel, ks, activation='sigmoid')]
self.body = nn.Sequential(*convs)
def forward(self, x):
return self.body(x)
class NCL(nn.Module):
def __init__(self, channels, filters=32, layers=3, **kwargs):
super(NCL, self).__init__()
ks = kwargs.get('kernel_size', 3)
c = channels
f = filters
conv = []
for i in range(1, layers):
if i == 1:
conv.append(EasyConv2d(3, f, ks, activation='lrelu'))
else:
conv.append(EasyConv2d(f, f, ks, activation='lrelu'))
self.gamma = nn.Sequential(
*conv, EasyConv2d(f, c, ks, activation='sigmoid'))
self.beta = nn.Sequential(
*conv.copy(), EasyConv2d(f, c, ks))
def forward(self, x, noise=None):
if noise is None:
return x
return x * self.gamma(noise) + self.beta(noise)
class CRDB(nn.Module):
def __init__(self, channels, depth=3, scaling=1.0, name='Rdb', **kwargs):
super(CRDB, self).__init__()
self.name = name
self.depth = depth
self.scaling = scaling
ks = kwargs.get('kernel_size', 3)
stride = kwargs.get('stride', 1)
padding = kwargs.get('padding', ks // 2)
dilation = kwargs.get('dilation', 1)
group = kwargs.get('group', 1)
bias = kwargs.get('bias', True)
c = channels
for i in range(depth):
conv = nn.Conv2d(
c + c * i, c, ks, stride, padding, dilation, group, bias)
if i < depth - 1: # no activation after last layer
conv = nn.Sequential(conv, nn.ReLU(True))
setattr(self, f'conv_{i}', conv)
self.ncl = NCL(c)
def forward(self, inputs, noise):
fl = [inputs]
for i in range(self.depth):
conv = getattr(self, f'conv_{i}')
fl.append(conv(torch.cat(fl, dim=1)))
y = fl[-1] * self.scaling + inputs
return self.ncl(y, noise)
class CascadeRdn(nn.Module):
def __init__(self, channels, depth=(3, 3), name='CascadeRdn', **kwargs):
super(CascadeRdn, self).__init__()
self.name = name
self.depth = depth
c = channels
for i in range(self.depth[0]):
setattr(self, f'conv11_{i}', nn.Conv2d(c + c * (i + 1), c, 1))
setattr(self, f'rdn_{i}', CRDB(c, self.depth[1], **kwargs))
def forward(self, inputs, noise):
fl = [inputs]
x = inputs
for i in range(self.depth[0]):
rdn = getattr(self, f'rdn_{i}')
x = rdn(x, noise)
fl.append(x)
c11 = getattr(self, f'conv11_{i}')
x = c11(torch.cat(fl, dim=1))
return x
class Drn(nn.Module):
def __init__(self, channel, scale, n_cb, **kwargs):
super(Drn, self).__init__()
f = kwargs.get('filters', 64)
self.entry = nn.Sequential(
nn.Conv2d(channel, f, 3, 1, 1), nn.Conv2d(f, f, 3, 1, 1))
for i in range(n_cb):
setattr(self, f'cb{i}', CascadeRdn(f))
self.n_cb = n_cb
self.tail = nn.Sequential(
Upsample(f, scale), nn.Conv2d(f, channel, 3, 1, 1))
def forward(self, x, noise=None):
x0 = self.entry(x)
x = x0
for i in range(self.n_cb):
cb = getattr(self, f'cb{i}')
x = cb(x, noise)
x += x0
return self.tail(x)
class DRN(SuperResolution):
def __init__(self, channel, scale, n_cb, noise, offset=0):
super(DRN, self).__init__(channel=channel, scale=scale)
self.noise = noise
self.drn = Drn(channel, scale, n_cb)
self.ne = NoiseExtractor(channel, bn=False)
self.ns = NoiseShifter(channel, bn=False)
p1 = self.trainable_variables('drn') + self.trainable_variables('ne')
p2 = self.trainable_variables('ns')
self.offset = offset
if self.noise < 0:
self.opt = torch.optim.Adam(p2, 1e-4)
else:
self.opt = torch.optim.Adam(p1, 1e-4)
def train(self, inputs, labels, learning_rate=None):
x0 = inputs[0]
metrics = {}
if self.noise > 0:
stddev = torch.rand(1) * self.noise / 255
stddev = stddev.reshape([1, 1, 1, 1])
noise_map = torch.randn(*x0.shape) * stddev
noise_map = noise_map.to(x0.device)
x0 = (x0 + noise_map).clamp(0, 1)
noise = self.ne(x0)
l2_noise = F.mse_loss(noise, noise_map)
metrics['noise'] = l2_noise.detach().cpu().numpy()
elif self.noise < 0:
stddev = self.ns(x0)
noise_map = torch.randn(*x0.shape, device=x0.device) * stddev
noise = self.ne(x0) + noise_map
l2_noise = 0
else:
noise = None
l2_noise = 0
y = self.drn(x0, noise)
l1_image = F.l1_loss(y, labels[0])
loss = l1_image + 10 * l2_noise
if self.noise != 0:
tv = total_variance(noise)
loss += tv * 1.0e-3
metrics['tv'] = tv.detach().cpu().numpy()
if learning_rate:
for param_group in self.opt.param_groups:
param_group["lr"] = learning_rate
self.opt.zero_grad()
loss.backward()
self.opt.step()
metrics['loss'] = loss.detach().cpu().numpy()
metrics['image'] = l1_image.detach().cpu().numpy()
return metrics
def eval(self, inputs, labels=None, **kwargs):
metrics = {}
x0 = inputs[0]
if self.noise > 0:
stddev = torch.rand(1) * self.noise / 255
stddev = stddev.reshape([1, 1, 1, 1])
noise_map = torch.randn(*x0.shape) * stddev
noise_map = noise_map.to(x0.device)
x0 = (x0 + noise_map).clamp(0, 1)
noise = self.ne(x0)
elif self.offset > 0:
noise = self.ne(x0)
stddev = torch.ones(3, dtype=torch.float32) * self.offset / 255
stddev = stddev.reshape([1, 3, 1, 1])
noise_map = torch.randn(*x0.shape) * stddev
noise_map = noise_map.to(x0.device)
noise += noise_map
elif self.noise < 0:
stddev = self.ns(x0)
noise_map = torch.randn(*x0.shape, device=x0.device) * stddev
noise = self.ne(x0) + noise_map
else:
noise = None
y = self.drn(x0, noise)
if labels is not None:
metrics['psnr'] = Metrics.psnr(y, labels[0])
writer = get_writer(self.name)
step = kwargs['epoch']
if writer is not None:
writer.image('sr', y.clamp(0, 1), step=step)
writer.image('hr', labels[0], step=step)
writer.image('lr', x0, step=step)
return [y.detach().cpu().numpy()], metrics
| 3,460 |
3,755 | {
"action": "Watch Video",
"action_label": "Learn more about Report Builders",
"creation": "2021-11-24 17:04:18.762838",
"description": "In each module, you will find a host of single-click reports, ranging from financial statements to sales and purchase analytics and stock tracking reports. If a required new report is not available out-of-the-box, you can create custom reports in ERPNext by pulling values from the same multiple ERPNext tables.\n",
"docstatus": 0,
"doctype": "Onboarding Step",
"idx": 0,
"is_complete": 0,
"is_single": 0,
"is_skipped": 0,
"modified": "2021-11-24 17:04:18.762838",
"modified_by": "Administrator",
"name": "<NAME>",
"owner": "Administrator",
"reference_document": "Report",
"show_form_tour": 0,
"show_full_form": 0,
"title": "Generate Custom Reports",
"validate_action": 1,
"video_url": "https://youtu.be/TxJGUNarcQs"
} | 287 |
2,690 | #include <aslam/splines/EuclideanBSplineDesignVariable.hpp>
namespace aslam {
namespace splines {
EuclideanBSplineDesignVariable::EuclideanBSplineDesignVariable(const bsplines::BSpline & bspline) :
BSplineDesignVariable<3>(bspline)
{
}
EuclideanBSplineDesignVariable::~EuclideanBSplineDesignVariable()
{
}
aslam::backend::EuclideanExpression EuclideanBSplineDesignVariable::toEuclideanExpression(double time, int order)
{
Eigen::VectorXi dvidxs = _bspline.localVvCoefficientVectorIndices(time);
std::vector<aslam::backend::DesignVariable *> dvs;
for(int i = 0; i < dvidxs.size(); ++i)
{
dvs.push_back(&_designVariables[dvidxs[i]]);
}
boost::shared_ptr<aslam::splines::BSplineEuclideanExpressionNode > root( new aslam::splines::BSplineEuclideanExpressionNode(&_bspline, dvs, time, order) );
return aslam::backend::EuclideanExpression(root);
}
Eigen::Vector3d EuclideanBSplineDesignVariable::toEuclidean(double time, int order)
{
return _bspline.evalD(time,order);
}
} // namespace splines
} // namespace aslam
| 625 |
839 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.tools.java2wsdl.processor.internal.jaxws;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.jws.WebService;
import org.apache.cxf.common.i18n.Message;
import org.apache.cxf.common.logging.LogUtils;
import org.apache.cxf.common.util.PackageUtils;
import org.apache.cxf.service.model.EndpointInfo;
import org.apache.cxf.service.model.InterfaceInfo;
import org.apache.cxf.service.model.OperationInfo;
import org.apache.cxf.service.model.ServiceInfo;
import org.apache.cxf.tools.common.Processor;
import org.apache.cxf.tools.common.ToolConstants;
import org.apache.cxf.tools.common.ToolContext;
import org.apache.cxf.tools.common.ToolException;
import org.apache.cxf.tools.common.model.JavaException;
import org.apache.cxf.tools.common.model.JavaInterface;
import org.apache.cxf.tools.common.model.JavaMethod;
import org.apache.cxf.tools.common.model.JavaModel;
import org.apache.cxf.tools.common.model.JavaParameter;
import org.apache.cxf.tools.common.model.JavaReturn;
import org.apache.cxf.tools.common.model.JavaType.Style;
import org.apache.cxf.tools.java2ws.util.JavaFirstUtil;
import org.apache.cxf.tools.java2wsdl.processor.internal.AntGenerator;
import org.apache.cxf.tools.java2wsdl.processor.internal.jaxws.generator.JaxwsClientGenerator;
import org.apache.cxf.tools.java2wsdl.processor.internal.jaxws.generator.JaxwsImplGenerator;
import org.apache.cxf.tools.java2wsdl.processor.internal.jaxws.generator.JaxwsSEIGenerator;
import org.apache.cxf.tools.java2wsdl.processor.internal.jaxws.generator.JaxwsServerGenerator;
import org.apache.cxf.tools.wsdlto.core.AbstractGenerator;
import org.apache.cxf.wsdl.service.factory.ReflectionServiceFactoryBean;
public class JAXWSFrontEndProcessor implements Processor {
private static final String SEI_SUFFIX = "_PortType";
private static final Logger LOG = LogUtils.getL7dLogger(JAXWSFrontEndProcessor.class);
private ToolContext context;
private List<AbstractGenerator> generators = new ArrayList<>();
private List<String> infList = new ArrayList<>();
@SuppressWarnings("unchecked")
public void process() throws ToolException {
checkJaxwsClass();
List<ServiceInfo> services = (List<ServiceInfo>)context.get(ToolConstants.SERVICE_LIST);
ServiceInfo serviceInfo = services.get(0);
JavaInterface jinf = JavaFirstUtil.serviceInfo2JavaInf(serviceInfo);
String className = (String)context.get(ToolConstants.IMPL_CLASS);
if (className != null && className.equals(jinf.getFullClassName())) {
jinf.setName(jinf.getName() + SEI_SUFFIX);
}
JavaModel jm = new JavaModel();
jm.addInterface("inf", jinf);
jinf.setJavaModel(jm);
context.put(JavaModel.class, jm);
context.put(ToolConstants.SERVICE_NAME, serviceInfo.getName());
EndpointInfo endpointInfo = serviceInfo.getEndpoints().iterator().next();
context.put(ToolConstants.PORT_NAME, endpointInfo.getName());
generators.add(new JaxwsSEIGenerator());
generators.add(new JaxwsImplGenerator());
generators.add(new JaxwsServerGenerator());
generators.add(new JaxwsClientGenerator());
generators.add(new AntGenerator());
for (AbstractGenerator generator : generators) {
generator.generate(context);
}
}
public void setEnvironment(ToolContext env) {
this.context = env;
}
public JavaInterface serviceInfo2JavaInf(ServiceInfo service) {
JavaInterface javaInf = new JavaInterface();
InterfaceInfo inf = service.getInterface();
for (OperationInfo op : inf.getOperations()) {
JavaMethod jm = new JavaMethod();
Method m = (Method)op.getProperty(ReflectionServiceFactoryBean.METHOD);
jm.setName(m.getName());
int i = 0;
for (Type type : m.getGenericParameterTypes()) {
JavaParameter jp = new JavaParameter();
jp.setClassName(getClassName(type));
jp.setStyle(Style.IN);
jp.setName("arg" + i++);
jm.addParameter(jp);
}
for (Type type : m.getGenericExceptionTypes()) {
JavaException jex = new JavaException();
String className = getClassName(type);
jex.setClassName(className);
jex.setName(className);
jm.addException(jex);
}
JavaReturn jreturn = new JavaReturn();
jreturn.setClassName(getClassName(m.getGenericReturnType()));
jreturn.setStyle(Style.OUT);
jm.setReturn(jreturn);
String pkg = PackageUtils.getPackageName(m.getDeclaringClass());
javaInf.setPackageName(pkg.length() > 0 ? pkg : ToolConstants.DEFAULT_PACKAGE_NAME);
javaInf.addMethod(jm);
javaInf.setName(inf.getName().getLocalPart());
jm.getParameterList();
}
return javaInf;
}
public String getClassName(Type type) {
if (type instanceof Class) {
Class<?> clz = (Class<?>)type;
if (clz.isArray()) {
return clz.getComponentType().getName() + "[]";
}
return clz.getName();
} else if (type instanceof ParameterizedType) {
return type.toString();
} else if (type instanceof GenericArrayType) {
return type.toString();
}
return "";
}
public void checkJaxwsClass() {
Class<?> clz = context.get(Class.class);
WebService webServiceAnno = clz.getAnnotation(WebService.class);
if (webServiceAnno == null) {
Message msg = new Message("CLASS_DOESNOT_CARRY_WEBSERVICE_ANNO", LOG, clz.getName());
LOG.log(Level.WARNING, msg.toString());
throw new ToolException(msg);
}
if (isImplRmiRemote(clz)) {
Message msg = new Message("PARA_OR_RETURN_IMPL_REMOTE", LOG, clz.getName());
LOG.log(Level.WARNING, msg.toString());
throw new ToolException(msg);
}
}
private boolean isImplRmiRemote(Class<?> claz) {
for (Method method : claz.getMethods()) {
if (Modifier.isPublic(method.getModifiers()) && !Modifier.isStatic(method.getModifiers())
&& !"java.lang.Object".equals(method.getDeclaringClass().getName())) {
Class<?>[] paraClasses = method.getParameterTypes();
for (Class<?> clz : paraClasses) {
getInfClass(clz);
}
Class<?> returnClass = method.getReturnType();
getInfClass(returnClass);
}
}
return infList.contains("java.rmi.Remote");
}
private void getInfClass(Class<?> claz) {
for (Class<?> inf : claz.getInterfaces()) {
getInfClass(inf);
}
if (claz.getSuperclass() != null) {
getInfClass(claz.getSuperclass());
}
if (claz.isInterface()) {
infList.add(claz.getName());
}
}
}
| 3,324 |
1,455 | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.redis.core;
import static org.springframework.data.redis.connection.ReactiveListCommands.*;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.time.Duration;
import java.util.Collection;
import org.springframework.data.redis.core.ListOperations.MoveFrom;
import org.springframework.data.redis.core.ListOperations.MoveTo;
import org.springframework.util.Assert;
/**
* Redis list specific operations.
*
* @author <NAME>
* @author <NAME>
* @see <a href="https://redis.io/commands#list">Redis Documentation: List Commands</a>
* @since 2.0
*/
public interface ReactiveListOperations<K, V> {
/**
* Get elements between {@code begin} and {@code end} from list at {@code key}.
*
* @param key must not be {@literal null}.
* @param start
* @param end
* @return
* @see <a href="https://redis.io/commands/lrange">Redis Documentation: LRANGE</a>
*/
Flux<V> range(K key, long start, long end);
/**
* Trim list at {@code key} to elements between {@code start} and {@code end}.
*
* @param key must not be {@literal null}.
* @param start
* @param end
* @see <a href="https://redis.io/commands/ltrim">Redis Documentation: LTRIM</a>
*/
Mono<Boolean> trim(K key, long start, long end);
/**
* Get the size of list stored at {@code key}.
*
* @param key must not be {@literal null}.
* @return
* @see <a href="https://redis.io/commands/llen">Redis Documentation: LLEN</a>
*/
Mono<Long> size(K key);
/**
* Prepend {@code value} to {@code key}.
*
* @param key must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/lpush">Redis Documentation: LPUSH</a>
*/
Mono<Long> leftPush(K key, V value);
/**
* Prepend {@code values} to {@code key}.
*
* @param key must not be {@literal null}.
* @param values
* @return
* @see <a href="https://redis.io/commands/lpush">Redis Documentation: LPUSH</a>
*/
Mono<Long> leftPushAll(K key, V... values);
/**
* Prepend {@code values} to {@code key}.
*
* @param key must not be {@literal null}.
* @param values must not be {@literal null}.
* @return
* @since 1.5
* @see <a href="https://redis.io/commands/lpush">Redis Documentation: LPUSH</a>
*/
Mono<Long> leftPushAll(K key, Collection<V> values);
/**
* Prepend {@code values} to {@code key} only if the list exists.
*
* @param key must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/lpushx">Redis Documentation: LPUSHX</a>
*/
Mono<Long> leftPushIfPresent(K key, V value);
/**
* Insert {@code value} to {@code key} before {@code pivot}.
*
* @param key must not be {@literal null}.
* @param pivot must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/linsert">Redis Documentation: LINSERT</a>
*/
Mono<Long> leftPush(K key, V pivot, V value);
/**
* Append {@code value} to {@code key}.
*
* @param key must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/rpush">Redis Documentation: RPUSH</a>
*/
Mono<Long> rightPush(K key, V value);
/**
* Append {@code values} to {@code key}.
*
* @param key must not be {@literal null}.
* @param values
* @return
* @see <a href="https://redis.io/commands/rpush">Redis Documentation: RPUSH</a>
*/
Mono<Long> rightPushAll(K key, V... values);
/**
* Append {@code values} to {@code key}.
*
* @param key must not be {@literal null}.
* @param values
* @return
* @since 1.5
* @see <a href="https://redis.io/commands/rpush">Redis Documentation: RPUSH</a>
*/
Mono<Long> rightPushAll(K key, Collection<V> values);
/**
* Append {@code values} to {@code key} only if the list exists.
*
* @param key must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/rpushx">Redis Documentation: RPUSHX</a>
*/
Mono<Long> rightPushIfPresent(K key, V value);
/**
* Insert {@code value} to {@code key} after {@code pivot}.
*
* @param key must not be {@literal null}.
* @param pivot must not be {@literal null}.
* @param value
* @return
* @see <a href="https://redis.io/commands/linsert">Redis Documentation: LINSERT</a>
*/
Mono<Long> rightPush(K key, V pivot, V value);
/**
* Atomically returns and removes the first/last element (head/tail depending on the {@code from} argument) of the
* list stored at {@code sourceKey}, and pushes the element at the first/last element (head/tail depending on the
* {@code to} argument) of the list stored at {@code destinationKey}.
*
* @param from must not be {@literal null}.
* @param to must not be {@literal null}.
* @return
* @since 2.6
* @see <a href="https://redis.io/commands/lmove">Redis Documentation: LMOVE</a>
*/
default Mono<V> move(MoveFrom<K> from, MoveTo<K> to) {
Assert.notNull(from, "Move from must not be null");
Assert.notNull(to, "Move to must not be null");
return move(from.key, Direction.valueOf(from.direction.name()), to.key, Direction.valueOf(to.direction.name()));
}
/**
* Atomically returns and removes the first/last element (head/tail depending on the {@code from} argument) of the
* list stored at {@code sourceKey}, and pushes the element at the first/last element (head/tail depending on the
* {@code to} argument) of the list stored at {@code destinationKey}.
*
* @param sourceKey must not be {@literal null}.
* @param from must not be {@literal null}.
* @param destinationKey must not be {@literal null}.
* @param to must not be {@literal null}.
* @return
* @since 2.6
* @see <a href="https://redis.io/commands/lmove">Redis Documentation: LMOVE</a>
*/
Mono<V> move(K sourceKey, Direction from, K destinationKey, Direction to);
/**
* Atomically returns and removes the first/last element (head/tail depending on the {@code from} argument) of the
* list stored at {@code sourceKey}, and pushes the element at the first/last element (head/tail depending on the
* {@code to} argument) of the list stored at {@code destinationKey}.
* <p>
* <b>Blocks connection</b> until element available or {@code timeout} reached.
*
* @param from must not be {@literal null}.
* @param to must not be {@literal null}.
* @param timeout
* @return
* @since 2.6
* @see <a href="https://redis.io/commands/blmove">Redis Documentation: BLMOVE</a>
*/
default Mono<V> move(MoveFrom<K> from, MoveTo<K> to, Duration timeout) {
Assert.notNull(from, "Move from must not be null");
Assert.notNull(to, "Move to must not be null");
Assert.notNull(timeout, "Timeout must not be null");
Assert.isTrue(!timeout.isNegative(), "Timeout must not be negative");
return move(from.key, Direction.valueOf(from.direction.name()), to.key, Direction.valueOf(to.direction.name()),
timeout);
}
/**
* Atomically returns and removes the first/last element (head/tail depending on the {@code from} argument) of the
* list stored at {@code sourceKey}, and pushes the element at the first/last element (head/tail depending on the
* {@code to} argument) of the list stored at {@code destinationKey}.
* <p>
* <b>Blocks connection</b> until element available or {@code timeout} reached.
*
* @param sourceKey must not be {@literal null}.
* @param from must not be {@literal null}.
* @param destinationKey must not be {@literal null}.
* @param to must not be {@literal null}.
* @param timeout
* @return {@literal null} when used in pipeline / transaction.
* @since 2.6
* @see <a href="https://redis.io/commands/blmove">Redis Documentation: BLMOVE</a>
*/
Mono<V> move(K sourceKey, Direction from, K destinationKey, Direction to, Duration timeout);
/**
* Set the {@code value} list element at {@code index}.
*
* @param key must not be {@literal null}.
* @param index
* @param value
* @see <a href="https://redis.io/commands/lset">Redis Documentation: LSET</a>
*/
Mono<Boolean> set(K key, long index, V value);
/**
* Removes the first {@code count} occurrences of {@code value} from the list stored at {@code key}.
*
* @param key must not be {@literal null}.
* @param count
* @param value
* @return
* @see <a href="https://redis.io/commands/lrem">Redis Documentation: LREM</a>
*/
Mono<Long> remove(K key, long count, Object value);
/**
* Get element at {@code index} form list at {@code key}.
*
* @param key must not be {@literal null}.
* @param index
* @return
* @see <a href="https://redis.io/commands/lindex">Redis Documentation: LINDEX</a>
*/
Mono<V> index(K key, long index);
/**
* Returns the index of the first occurrence of the specified value in the list at at {@code key}. <br />
* Requires Redis 6.0.6 or newer.
*
* @param key must not be {@literal null}.
* @param value must not be {@literal null}.
* @return
* @since 2.4
* @see <a href="https://redis.io/commands/lpos">Redis Documentation: LPOS</a>
*/
Mono<Long> indexOf(K key, V value);
/**
* Returns the index of the last occurrence of the specified value in the list at at {@code key}. <br />
* Requires Redis 6.0.6 or newer.
*
* @param key must not be {@literal null}.
* @param value must not be {@literal null}.
* @return
* @since 2.4
* @see <a href="https://redis.io/commands/lpos">Redis Documentation: LPOS</a>
*/
Mono<Long> lastIndexOf(K key, V value);
/**
* Removes and returns first element in list stored at {@code key}.
*
* @param key must not be {@literal null}.
* @return
* @see <a href="https://redis.io/commands/lpop">Redis Documentation: LPOP</a>
*/
Mono<V> leftPop(K key);
/**
* Removes and returns first element from lists stored at {@code key}. <br>
* <b>Results return once an element available or {@code timeout} reached.</b>
*
* @param key must not be {@literal null}.
* @param timeout maximal duration to wait until an entry in the list at {@code key} is available. Must be either
* {@link Duration#ZERO} or greater {@link 1 second}, must not be {@literal null}. A timeout of zero can be
* used to wait indefinitely. Durations between zero and one second are not supported.
* @return
* @see <a href="https://redis.io/commands/blpop">Redis Documentation: BLPOP</a>
*/
Mono<V> leftPop(K key, Duration timeout);
/**
* Removes and returns last element in list stored at {@code key}.
*
* @param key must not be {@literal null}.
* @return
* @see <a href="https://redis.io/commands/rpop">Redis Documentation: RPOP</a>
*/
Mono<V> rightPop(K key);
/**
* Removes and returns last element from lists stored at {@code key}. <br>
* <b>Results return once an element available or {@code timeout} reached.</b>
*
* @param key must not be {@literal null}.
* @param timeout maximal duration to wait until an entry in the list at {@code key} is available. Must be either
* {@link Duration#ZERO} or greater {@link 1 second}, must not be {@literal null}. A timeout of zero can be
* used to wait indefinitely. Durations between zero and one second are not supported.
* @return
* @see <a href="https://redis.io/commands/brpop">Redis Documentation: BRPOP</a>
*/
Mono<V> rightPop(K key, Duration timeout);
/**
* Remove the last element from list at {@code sourceKey}, append it to {@code destinationKey} and return its value.
*
* @param sourceKey must not be {@literal null}.
* @param destinationKey must not be {@literal null}.
* @return
* @see <a href="https://redis.io/commands/rpoplpush">Redis Documentation: RPOPLPUSH</a>
*/
Mono<V> rightPopAndLeftPush(K sourceKey, K destinationKey);
/**
* Remove the last element from list at {@code srcKey}, append it to {@code dstKey} and return its value.<br>
* <b>Results return once an element available or {@code timeout} reached.</b>
*
* @param sourceKey must not be {@literal null}.
* @param destinationKey must not be {@literal null}.
* @param timeout maximal duration to wait until an entry in the list at {@code sourceKey} is available. Must be
* either {@link Duration#ZERO} or greater {@link 1 second}, must not be {@literal null}. A timeout of zero
* can be used to wait indefinitely. Durations between zero and one second are not supported.
* @return
* @see <a href="https://redis.io/commands/brpoplpush">Redis Documentation: BRPOPLPUSH</a>
*/
Mono<V> rightPopAndLeftPush(K sourceKey, K destinationKey, Duration timeout);
/**
* Removes the given {@literal key}.
*
* @param key must not be {@literal null}.
*/
Mono<Boolean> delete(K key);
}
| 4,546 |
49,076 | /*
* Copyright 2002-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.test.context.junit.jupiter.web;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.context.ApplicationContextInitializer;
import org.springframework.core.annotation.AliasFor;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.test.context.web.WebAppConfiguration;
/**
* {@code @SpringJUnitWebConfig} is a <em>composed annotation</em> that combines
* {@link ExtendWith @ExtendWith(SpringExtension.class)} from JUnit Jupiter with
* {@link ContextConfiguration @ContextConfiguration} and
* {@link WebAppConfiguration @WebAppConfiguration} from the <em>Spring TestContext
* Framework</em>.
*
* @author <NAME>
* @since 5.0
* @see ExtendWith
* @see SpringExtension
* @see ContextConfiguration
* @see WebAppConfiguration
* @see org.springframework.test.context.junit.jupiter.SpringJUnitConfig
*/
@ExtendWith(SpringExtension.class)
@ContextConfiguration
@WebAppConfiguration
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SpringJUnitWebConfig {
/**
* Alias for {@link ContextConfiguration#classes}.
*/
@AliasFor(annotation = ContextConfiguration.class, attribute = "classes")
Class<?>[] value() default {};
/**
* Alias for {@link ContextConfiguration#classes}.
*/
@AliasFor(annotation = ContextConfiguration.class)
Class<?>[] classes() default {};
/**
* Alias for {@link ContextConfiguration#locations}.
*/
@AliasFor(annotation = ContextConfiguration.class)
String[] locations() default {};
/**
* Alias for {@link ContextConfiguration#initializers}.
*/
@AliasFor(annotation = ContextConfiguration.class)
Class<? extends ApplicationContextInitializer<?>>[] initializers() default {};
/**
* Alias for {@link ContextConfiguration#inheritLocations}.
*/
@AliasFor(annotation = ContextConfiguration.class)
boolean inheritLocations() default true;
/**
* Alias for {@link ContextConfiguration#inheritInitializers}.
*/
@AliasFor(annotation = ContextConfiguration.class)
boolean inheritInitializers() default true;
/**
* Alias for {@link ContextConfiguration#name}.
*/
@AliasFor(annotation = ContextConfiguration.class)
String name() default "";
/**
* Alias for {@link WebAppConfiguration#value}.
*/
@AliasFor(annotation = WebAppConfiguration.class, attribute = "value")
String resourcePath() default "src/main/webapp";
}
| 965 |
647 | <reponame>whodarewin/copycat
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.atomix.copycat.protocol;
import io.atomix.catalyst.buffer.BufferInput;
import io.atomix.catalyst.buffer.BufferOutput;
import io.atomix.catalyst.serializer.Serializer;
import io.atomix.catalyst.util.Assert;
import io.atomix.copycat.Operation;
import io.atomix.copycat.Query;
import java.util.Objects;
/**
* Client query request.
* <p>
* Query requests are submitted by clients to the Copycat cluster to commit {@link Query}s to
* the replicated state machine. Each query request must be associated with a registered
* {@link #session()} and have a unique {@link #sequence()} number within that session. Queries will
* be applied in the cluster in the order defined by the provided sequence number. Thus, sequence numbers
* should never be skipped. In the event of a failure of a query request, the request should be resent
* with the same sequence number. Queries are guaranteed to be applied in sequence order.
* <p>
* Query requests should always be submitted to the server to which the client is connected. The provided
* query's {@link Query#consistency() consistency level} will be used to determine how the query should be
* handled. If the query is received by a follower, it may be evaluated on that node if the consistency level
* is {@link Query.ConsistencyLevel#SEQUENTIAL}, otherwise it will be forwarded to the cluster leader.
* Queries are always guaranteed to see state progress monotonically within a single {@link #session()}
* even when switching servers.
*
* @author <a href="http://github.com/kuujo"><NAME></a>
*/
public class QueryRequest extends OperationRequest {
/**
* Returns a new query request builder.
*
* @return A new query request builder.
*/
public static Builder builder() {
return new Builder(new QueryRequest());
}
/**
* Returns a query request builder for an existing request.
*
* @param request The request to build.
* @return The query request builder.
* @throws IllegalStateException if request is null
*/
public static Builder builder(QueryRequest request) {
return new Builder(request);
}
private long index;
private Query query;
/**
* Returns the query index.
*
* @return The query index.
*/
public long index() {
return index;
}
/**
* Returns the query.
*
* @return The query.
*/
public Query query() {
return query;
}
@Override
public Operation operation() {
return query;
}
@Override
public void readObject(BufferInput<?> buffer, Serializer serializer) {
super.readObject(buffer, serializer);
index = buffer.readLong();
query = serializer.readObject(buffer);
}
@Override
public void writeObject(BufferOutput<?> buffer, Serializer serializer) {
super.writeObject(buffer, serializer);
buffer.writeLong(index);
serializer.writeObject(query, buffer);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), session, sequence, index, query);
}
@Override
public boolean equals(Object object) {
if (object instanceof QueryRequest) {
QueryRequest request = (QueryRequest) object;
return request.session == session
&& request.sequence == sequence
&& request.query.equals(query);
}
return false;
}
@Override
public String toString() {
return String.format("%s[session=%d, sequence=%d, index=%d, query=%s]", getClass().getSimpleName(), session, sequence, index, query);
}
/**
* Query request builder.
*/
public static class Builder extends OperationRequest.Builder<Builder, QueryRequest> {
protected Builder(QueryRequest request) {
super(request);
}
/**
* Sets the request index.
*
* @param index The request index.
* @return The request builder.
* @throws IllegalArgumentException if {@code index} is less than {@code 0}
*/
public Builder withIndex(long index) {
request.index = Assert.argNot(index, index < 0, "index cannot be less than 0");
return this;
}
/**
* Sets the request query.
*
* @param query The request query.
* @return The request builder.
* @throws NullPointerException if {@code query} is null
*/
public Builder withQuery(Query query) {
request.query = Assert.notNull(query, "query");
return this;
}
/**
* @throws IllegalStateException if {@code query} is null
*/
@Override
public QueryRequest build() {
super.build();
Assert.stateNot(request.index < 0, "index cannot be less than 0");
Assert.stateNot(request.query == null, "query cannot be null");
return request;
}
}
}
| 1,631 |
505 | package de.rieckpil.blog.then;
public class UserService {
private final UserRepository userRepository;
public UserService(UserRepository userRepository) {
this.userRepository = userRepository;
}
public Long storeNewUser(String username) {
if (username.length() <= 3) {
throw new IllegalArgumentException("Username is too short");
}
User user = new User();
user.setName(username);
user = userRepository.save(user);
return user.getId();
}
}
| 162 |
2,761 | #include "StyleLexers.h"
// ----------------------------------------------------------------------------
KEYWORDLIST KeyWords_PAS =
{
"absolute abstract alias and array as asm assembler begin break case cdecl class const constructor continue "
"cppdecl default destructor dispose div do downto else end end. except exit export exports external false "
"far far16 file finalization finally for forward function goto if implementation in index inherited "
"initialization inline interface is label library local message mod name near new nil nostackframe not "
"object of oldfpccall on operator or out overload override packed pascal private procedure program "
"property protected public published raise read record register reintroduce repeat resourcestring safecall "
"self set shl shr softfloat stdcall stored string then threadvar to true try type unit until uses var "
"virtual while with write xor",
NULL,
};
EDITLEXER lexPAS =
{
SCLEX_PASCAL, "pascal", IDS_LEX_PASCAL_SRC, L"Pascal Source Code", L"pas; dpr; dpk; dfm; pp; lfm; lpr; fpd", L"",
&KeyWords_PAS, {
{ {STYLE_DEFAULT}, IDS_LEX_STR_63126, L"Default", L"", L"" },
//{ {SCE_PAS_DEFAULT}, IDS_LEX_STR_63126, L"Default", L"", L"" },
{ {MULTI_STYLE(SCE_PAS_COMMENT,SCE_PAS_COMMENT2,SCE_PAS_COMMENTLINE,0)}, IDS_LEX_STR_63127, L"Comment", L"fore:#646464", L"" },
{ {SCE_PAS_WORD}, IDS_LEX_STR_63128, L"Keyword", L"bold; fore:#800080", L"" },
{ {SCE_PAS_IDENTIFIER}, IDS_LEX_STR_63129, L"Identifier", L"", L"" },
{ {MULTI_STYLE(SCE_PAS_STRING,SCE_PAS_CHARACTER,SCE_PAS_STRINGEOL,0)}, IDS_LEX_STR_63131, L"String", L"fore:#008000", L"" },
{ {MULTI_STYLE(SCE_PAS_NUMBER,SCE_PAS_HEXNUMBER,0,0)}, IDS_LEX_STR_63130, L"Number", L"fore:#FF0000", L"" },
{ {SCE_PAS_OPERATOR}, IDS_LEX_STR_63132, L"Operator", L"bold", L"" },
{ {SCE_PAS_ASM}, IDS_LEX_STR_63205, L"Inline Asm", L"fore:#0000FF", L"" },
{ {MULTI_STYLE(SCE_PAS_PREPROCESSOR,SCE_PAS_PREPROCESSOR2,0,0)}, IDS_LEX_STR_63133, L"Preprocessor", L"fore:#FF00FF", L"" },
EDITLEXER_SENTINEL
}
};
| 887 |
10,491 | <gh_stars>1000+
// Place your settings in this file to overwrite the default settings
{
"telemetry.enableCrashReporter": false,
"telemetry.enableTelemetry": false,
"go.goroot": "/usr/local/go",
"go.gopath": "/Users/bill/code/go",
"go.enableCodeLens": {"references": false, "runtest": false},
"go.addTags": {},
"go.alternateTools": {},
"go.toolsManagement.autoUpdate": true,
"go.useLanguageServer": true,
"[go]": {
"editor.snippetSuggestions": "none",
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
}
},
"gopls": {
"staticcheck": true,
"usePlaceholders": true, // add parameter placeholders when completing a function
"completeUnimported": true, // autocomplete unimported packages
"deepCompletion": true // enable deep completion
},
"go.lintFlags": [
"-checks=all"
],
"editor.fontSize": 14,
"editor.formatOnType": true,
"editor.mouseWheelZoom": false,
"editor.rulers": [80,120],
"editor.trimAutoWhitespace": true,
"editor.minimap.enabled": false,
"files.exclude": {
"**/.git": true,
"**/.DS_Store": true,
"**/dist/public": true,
"**/tmp": true,
"**/tags": true,
"**/public/dist": true,
"**/.vscode": true
},
"files.trimTrailingWhitespace": false,
"editor.wordWrap": "off",
"workbench.colorCustomizations": {
"editorLineNumber.foreground": "#9F9F9F"
},
"extensions.ignoreRecommendations": true,
"liveshare.featureSet": "insiders",
"workbench.colorTheme": "Zenburn",
"C_Cpp.updateChannel": "Insiders",
"go.coverOnTestPackage": false,
"liveshare.anonymousGuestApproval": "accept",
"editor.folding": false,
"explorer.compactFolders": false,
"files.associations": {
"*.go2": "go"
},
"diffEditor.ignoreTrimWhitespace": false,
"window.zoomLevel": 2,
"terminal.integrated.tabs.enabled": true,
} | 888 |
2,474 | <reponame>joemclo/serverless-next.js
{
"/[blog]/[id]": "pages/[blog]/[id].js",
"/customers/[customer]": "pages/customers/[customer].js",
"/api/[blog]/[id]": "pages/api/[blog]/[id].js",
"/api/customers/[customer]": "pages/api/customers/[customer].js"
}
| 121 |
1,781 | <reponame>acety23/algorithms-sedgewick-wayne<gh_stars>1000+
package chapter1.section1;
import edu.princeton.cs.algs4.StdOut;
/**
* Created by <NAME>
*/
public class Exercise24 {
// Parameters example: 90 20
public static void main(String[] args) {
int gcd = gcd(105, 24);
StdOut.println("GDC 1: " + gcd);
StdOut.println();
int argument1 = Integer.parseInt(args[0]);
int argument2 = Integer.parseInt(args[1]);
int gc2 = gcd(argument1, argument2);
StdOut.println("GDC 2: " + gc2);
StdOut.println();
int gcd3 = gcd(1111111, 1234567);
StdOut.println("GDC 3: " + gcd3);
}
private static int gcd(int p, int q) {
StdOut.println("p: " + p + " - q: " + q);
if (q == 0) {
return p;
} else {
return gcd(q, p % q);
}
}
}
| 357 |
390 | /* Copyright 2013-2019 Barefoot Networks, Inc.
* Copyright 2019 VMware, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* <NAME>
*
*/
#include <bm/bm_sim/expressions.h>
#include <bm/bm_sim/stacks.h>
#include <bm/bm_sim/phv.h>
#include <bm/bm_sim/stateful.h>
#include <stack>
#include <string>
#include <vector>
#include <algorithm> // for std::max
#include <cassert>
namespace bm {
ExprOpcodesMap::ExprOpcodesMap() {
opcodes_map = {
{"load_field", ExprOpcode::LOAD_FIELD},
{"load_header", ExprOpcode::LOAD_HEADER},
{"load_header_stack", ExprOpcode::LOAD_HEADER_STACK},
{"load_last_header_stack_field", ExprOpcode::LOAD_LAST_HEADER_STACK_FIELD},
{"load_union", ExprOpcode::LOAD_UNION},
{"load_union_stack", ExprOpcode::LOAD_UNION_STACK},
{"load_bool", ExprOpcode::LOAD_BOOL},
{"load_const", ExprOpcode::LOAD_CONST},
{"load_local", ExprOpcode::LOAD_LOCAL},
{"load_register_ref", ExprOpcode::LOAD_REGISTER_REF},
{"load_register_gen", ExprOpcode::LOAD_REGISTER_GEN},
{"+", ExprOpcode::ADD},
{"-", ExprOpcode::SUB},
{"%", ExprOpcode::MOD},
{"/", ExprOpcode::DIV},
{"*", ExprOpcode::MUL},
{"<<", ExprOpcode::SHIFT_LEFT},
{">>", ExprOpcode::SHIFT_RIGHT},
{"==", ExprOpcode::EQ_DATA},
{"!=", ExprOpcode::NEQ_DATA},
{"==h", ExprOpcode::EQ_HEADER},
{"!=h", ExprOpcode::NEQ_HEADER},
{"==u", ExprOpcode::EQ_UNION},
{"!=u", ExprOpcode::NEQ_UNION},
{"==b", ExprOpcode::EQ_BOOL},
{"!=b", ExprOpcode::NEQ_BOOL},
{">", ExprOpcode::GT_DATA},
{"<", ExprOpcode::LT_DATA},
{">=", ExprOpcode::GET_DATA},
{"<=", ExprOpcode::LET_DATA},
{"and", ExprOpcode::AND},
{"or", ExprOpcode::OR},
{"not", ExprOpcode::NOT},
{"&", ExprOpcode::BIT_AND},
{"|", ExprOpcode::BIT_OR},
{"^", ExprOpcode::BIT_XOR},
{"~", ExprOpcode::BIT_NEG},
{"valid", ExprOpcode::VALID_HEADER},
{"valid_union", ExprOpcode::VALID_UNION},
{"?", ExprOpcode::TERNARY_OP},
{"two_comp_mod", ExprOpcode::TWO_COMP_MOD},
{"usat_cast", ExprOpcode::USAT_CAST},
{"sat_cast", ExprOpcode::SAT_CAST},
{"d2b", ExprOpcode::DATA_TO_BOOL},
{"b2d", ExprOpcode::BOOL_TO_DATA},
// backward-compatibility
// dereference_stack and dereference_header_stack are equivalent
{"dereference_stack", ExprOpcode::DEREFERENCE_HEADER_STACK},
{"dereference_header_stack", ExprOpcode::DEREFERENCE_HEADER_STACK},
{"dereference_union_stack", ExprOpcode::DEREFERENCE_UNION_STACK},
{"last_stack_index", ExprOpcode::LAST_STACK_INDEX},
{"size_stack", ExprOpcode::SIZE_STACK},
{"access_field", ExprOpcode::ACCESS_FIELD},
{"access_union_header", ExprOpcode::ACCESS_UNION_HEADER},
};
}
ExprOpcodesMap *ExprOpcodesMap::get_instance() {
static ExprOpcodesMap instance;
return &instance;
}
ExprOpcode
ExprOpcodesMap::get_opcode(std::string expr_name) {
ExprOpcodesMap *instance = get_instance();
return instance->opcodes_map[expr_name];
}
/* static */ ExprOpcode
ExprOpcodesUtils::get_eq_opcode(ExprType expr_type) {
switch (expr_type) {
case ExprType::DATA:
return ExprOpcode::EQ_DATA;
case ExprType::HEADER:
return ExprOpcode::EQ_HEADER;
case ExprType::BOOL:
return ExprOpcode::EQ_BOOL;
case ExprType::UNION:
return ExprOpcode::EQ_UNION;
default:
break;
}
assert(0);
return ExprOpcode::EQ_DATA;
}
/* static */ ExprOpcode
ExprOpcodesUtils::get_neq_opcode(ExprType expr_type) {
switch (expr_type) {
case ExprType::DATA:
return ExprOpcode::NEQ_DATA;
case ExprType::HEADER:
return ExprOpcode::NEQ_HEADER;
case ExprType::BOOL:
return ExprOpcode::NEQ_BOOL;
case ExprType::UNION:
return ExprOpcode::NEQ_UNION;
default:
break;
}
assert(0);
return ExprOpcode::NEQ_DATA;
}
/* static */
ExprType
ExprOpcodesUtils::get_opcode_type(ExprOpcode opcode) {
switch (opcode) {
case ExprOpcode::LOAD_FIELD:
case ExprOpcode::LOAD_CONST:
case ExprOpcode::LOAD_LOCAL:
case ExprOpcode::LOAD_REGISTER_REF:
case ExprOpcode::LOAD_REGISTER_GEN:
case ExprOpcode::LOAD_LAST_HEADER_STACK_FIELD:
case ExprOpcode::ADD:
case ExprOpcode::SUB:
case ExprOpcode::MOD:
case ExprOpcode::DIV:
case ExprOpcode::MUL:
case ExprOpcode::SHIFT_LEFT:
case ExprOpcode::SHIFT_RIGHT:
case ExprOpcode::BIT_AND:
case ExprOpcode::BIT_OR:
case ExprOpcode::BIT_XOR:
case ExprOpcode::BIT_NEG:
case ExprOpcode::TWO_COMP_MOD:
case ExprOpcode::USAT_CAST:
case ExprOpcode::SAT_CAST:
case ExprOpcode::BOOL_TO_DATA:
case ExprOpcode::LAST_STACK_INDEX:
case ExprOpcode::SIZE_STACK:
case ExprOpcode::ACCESS_FIELD:
return ExprType::DATA;
case ExprOpcode::LOAD_BOOL:
case ExprOpcode::EQ_DATA:
case ExprOpcode::NEQ_DATA:
case ExprOpcode::GT_DATA:
case ExprOpcode::LT_DATA:
case ExprOpcode::GET_DATA:
case ExprOpcode::LET_DATA:
case ExprOpcode::EQ_HEADER:
case ExprOpcode::NEQ_HEADER:
case ExprOpcode::EQ_UNION:
case ExprOpcode::NEQ_UNION:
case ExprOpcode::EQ_BOOL:
case ExprOpcode::NEQ_BOOL:
case ExprOpcode::AND:
case ExprOpcode::OR:
case ExprOpcode::NOT:
case ExprOpcode::VALID_HEADER:
case ExprOpcode::VALID_UNION:
case ExprOpcode::DATA_TO_BOOL:
return ExprType::BOOL;
case ExprOpcode::LOAD_HEADER:
case ExprOpcode::DEREFERENCE_HEADER_STACK:
case ExprOpcode::ACCESS_UNION_HEADER:
return ExprType::HEADER;
case ExprOpcode::LOAD_HEADER_STACK:
return ExprType::HEADER_STACK;
case ExprOpcode::LOAD_UNION:
case ExprOpcode::DEREFERENCE_UNION_STACK:
return ExprType::UNION;
case ExprOpcode::LOAD_UNION_STACK:
return ExprType::UNION_STACK;
case ExprOpcode::TERNARY_OP:
return ExprType::UNKNOWN;
case ExprOpcode::SKIP:
break;
}
assert(0);
return ExprType::UNKNOWN;
}
Expression::Expression() {
// trick so that empty expressions can still be executed
build();
}
size_t
Expression::get_num_ops() const {
return ops.size();
}
void
Expression::push_back_load_field(header_id_t header, int field_offset) {
Op op;
op.opcode = ExprOpcode::LOAD_FIELD;
op.field = {header, field_offset};
ops.push_back(op);
}
void
Expression::push_back_load_bool(bool value) {
Op op;
op.opcode = ExprOpcode::LOAD_BOOL;
op.bool_value = value;
ops.push_back(op);
}
void
Expression::push_back_load_header(header_id_t header) {
Op op;
op.opcode = ExprOpcode::LOAD_HEADER;
op.header = header;
ops.push_back(op);
}
void
Expression::push_back_load_header_stack(header_stack_id_t header_stack) {
Op op;
op.opcode = ExprOpcode::LOAD_HEADER_STACK;
op.header_stack = header_stack;
ops.push_back(op);
}
void
Expression::push_back_load_last_header_stack_field(
header_stack_id_t header_stack, int field_offset) {
Op op;
op.opcode = ExprOpcode::LOAD_LAST_HEADER_STACK_FIELD;
op.stack_field = {header_stack, field_offset};
ops.push_back(op);
}
void
Expression::push_back_load_header_union(header_union_id_t header_union) {
Op op;
op.opcode = ExprOpcode::LOAD_UNION;
op.header_union = header_union;
ops.push_back(op);
}
void
Expression::push_back_load_header_union_stack(
header_union_stack_id_t header_union_stack) {
Op op;
op.opcode = ExprOpcode::LOAD_UNION_STACK;
op.header_union_stack = header_union_stack;
ops.push_back(op);
}
void
Expression::push_back_load_const(const Data &data) {
const_values.push_back(data);
Op op;
op.opcode = ExprOpcode::LOAD_CONST;
op.const_offset = const_values.size() - 1;
ops.push_back(op);
}
void
Expression::push_back_load_local(const int offset) {
Op op;
op.opcode = ExprOpcode::LOAD_LOCAL;
op.local_offset = offset;
ops.push_back(op);
}
void
Expression::push_back_load_register_ref(RegisterArray *register_array,
unsigned int idx) {
Op op;
op.opcode = ExprOpcode::LOAD_REGISTER_REF;
op.register_ref.array = register_array;
op.register_ref.idx = idx;
ops.push_back(op);
}
void
Expression::push_back_load_register_gen(RegisterArray *register_array) {
Op op;
op.opcode = ExprOpcode::LOAD_REGISTER_GEN;
op.register_array = register_array;
ops.push_back(op);
}
void
Expression::push_back_op(ExprOpcode opcode) {
Op op;
op.opcode = opcode;
ops.push_back(op);
}
void
Expression::append_expression(const Expression &e) {
int offset_consts = const_values.size();
// the tricky part: update the const data offsets in the expression we are
// appending
for (auto &op : e.ops) {
ops.push_back(op);
if (op.opcode == ExprOpcode::LOAD_CONST)
ops.back().const_offset += offset_consts;
}
const_values.insert(const_values.end(),
e.const_values.begin(), e.const_values.end());
}
// A note on the implementation of the ternary operator:
// The difficulty here is that the second and third expression are conditionally
// evaluated based on the result of the first expression (which evaluates to a
// boolean).
// I considered many different solutions, but in the end I decided to flatten
// the second and third expression ops into the main ops vector. For this, I had
// to introduce the special SKIP opcode. SKIP lets the action egine skip a
// pre-determined number of operations. For each ternary op, 2 SKIP ops are
// inserted, one before the second expression op sequence, and one before the
// third expression op sequence. When the condition evaluates to true, we leap
// over the first SKIP, execute all of the second expression ops, then reach the
// second SKIP which makes us skip all of the third expression ops. On the other
// hand, when the condition evaluates to false, we skip all of the second
// expression ops to go directly to the third expression ops.
void
Expression::push_back_ternary_op(const Expression &e1, const Expression &e2) {
Op op;
op.opcode = ExprOpcode::TERNARY_OP;
ops.push_back(op);
op.opcode = ExprOpcode::SKIP;
op.skip_num = e1.get_num_ops() + 1;
ops.push_back(op);
append_expression(e1);
op.skip_num = e2.get_num_ops();
ops.push_back(op);
append_expression(e2);
}
void
Expression::push_back_access_field(int field_offset) {
Op op;
op.opcode = ExprOpcode::ACCESS_FIELD;
op.field_offset = field_offset;
ops.push_back(op);
}
void
Expression::push_back_access_union_header(int header_offset) {
Op op;
op.opcode = ExprOpcode::ACCESS_UNION_HEADER;
op.header_offset = header_offset;
ops.push_back(op);
}
void
Expression::build() {
data_registers_cnt = assign_dest_registers();
built = true;
}
void
Expression::grab_register_accesses(RegisterSync *register_sync) const {
for (auto &op : ops) {
switch (op.opcode) {
case ExprOpcode::LOAD_REGISTER_REF:
case ExprOpcode::LOAD_REGISTER_GEN:
register_sync->add_register_array(op.register_array);
break;
default:
continue;
}
}
}
struct ExpressionTemps {
ExpressionTemps()
: data_temps_size(4), data_temps(data_temps_size) { }
void prepare(int data_registers_cnt) {
while (data_temps_size < data_registers_cnt) {
data_temps.emplace_back();
data_temps_size++;
}
bool_temps_stack.clear();
data_temps_stack.clear();
header_temps_stack.clear();
stack_temps_stack.clear();
union_temps_stack.clear();
}
void push_bool(bool b) {
bool_temps_stack.push_back(b);
}
bool pop_bool() {
auto r = bool_temps_stack.back();
bool_temps_stack.pop_back();
return r;
}
void push_data(const Data *data) {
data_temps_stack.push_back(data);
}
const Data *pop_data() {
auto *r = data_temps_stack.back();
data_temps_stack.pop_back();
return r;
}
void push_header(const Header *hdr) {
header_temps_stack.push_back(hdr);
}
const Header *pop_header() {
auto *r = header_temps_stack.back();
header_temps_stack.pop_back();
return r;
}
void push_stack(const StackIface *stack) {
stack_temps_stack.push_back(stack);
}
const StackIface *pop_stack() {
auto *r = stack_temps_stack.back();
stack_temps_stack.pop_back();
return r;
}
const HeaderStack *pop_header_stack() {
return static_cast<const HeaderStack *>(pop_stack());
}
const HeaderUnionStack *pop_union_stack() {
return static_cast<const HeaderUnionStack *>(pop_stack());
}
void push_union(const HeaderUnion *hdr_union) {
union_temps_stack.push_back(hdr_union);
}
const HeaderUnion *pop_union() {
auto *r = union_temps_stack.back();
union_temps_stack.pop_back();
return r;
}
static ExpressionTemps &get_instance() {
// using a static thread-local variable to avoid allocation new memory every
// time an expression needs to be evaluated. An alternative could be a
// custom stack allocator.
static thread_local ExpressionTemps instance;
return instance;
}
int data_temps_size;
std::vector<Data> data_temps;
// Logically, I am using these as stacks but experiments showed that using
// vectors directly was more efficient.
std::vector<bool> bool_temps_stack;
std::vector<const Data *> data_temps_stack;
std::vector<const Header *> header_temps_stack;
std::vector<const StackIface *> stack_temps_stack;
std::vector<const HeaderUnion *> union_temps_stack;
};
void
Expression::eval_(const PHV &phv,
const std::vector<Data> &locals,
ExpressionTemps *temps) const {
assert(built);
temps->prepare(data_registers_cnt);
auto &data_temps = temps->data_temps;
bool lb, rb;
const Data *ld, *rd;
const Header *lh, *rh;
const HeaderUnion *lu, *ru;
const HeaderStack *hs;
const HeaderUnionStack *hus;
for (size_t i = 0; i < ops.size(); i++) {
const auto &op = ops[i];
switch (op.opcode) {
case ExprOpcode::LOAD_FIELD:
temps->push_data(
&phv.get_field(op.field.header, op.field.field_offset));
break;
case ExprOpcode::LOAD_HEADER:
temps->push_header(&phv.get_header(op.header));
break;
case ExprOpcode::LOAD_HEADER_STACK:
temps->push_stack(&phv.get_header_stack(op.header_stack));
break;
case ExprOpcode::LOAD_LAST_HEADER_STACK_FIELD:
temps->push_data(
&phv.get_header_stack(op.stack_field.header_stack).get_last()
.get_field(op.stack_field.field_offset));
break;
case ExprOpcode::LOAD_UNION:
temps->push_union(&phv.get_header_union(op.header_union));
break;
case ExprOpcode::LOAD_UNION_STACK:
temps->push_stack(&phv.get_header_union_stack(op.header_union_stack));
break;
case ExprOpcode::LOAD_BOOL:
temps->push_bool(op.bool_value);
break;
case ExprOpcode::LOAD_CONST:
temps->push_data(&const_values[op.const_offset]);
break;
case ExprOpcode::LOAD_LOCAL:
temps->push_data(&locals[op.local_offset]);
break;
case ExprOpcode::LOAD_REGISTER_REF:
temps->push_data(&op.register_ref.array->at(op.register_ref.idx));
break;
case ExprOpcode::LOAD_REGISTER_GEN:
rd = temps->pop_data();
temps->push_data(&op.register_array->at(rd->get<size_t>()));
break;
case ExprOpcode::ACCESS_FIELD:
rh = temps->pop_header();
temps->push_data(&rh->get_field(op.field_offset));
break;
case ExprOpcode::ACCESS_UNION_HEADER:
ru = temps->pop_union();
temps->push_header(&ru->at(op.header_offset));
break;
case ExprOpcode::ADD:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].add(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::SUB:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].sub(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::MOD:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].mod(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::DIV:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].divide(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::MUL:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].multiply(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::SHIFT_LEFT:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].shift_left(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::SHIFT_RIGHT:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].shift_right(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::EQ_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld == *rd);
break;
case ExprOpcode::NEQ_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld != *rd);
break;
case ExprOpcode::GT_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld > *rd);
break;
case ExprOpcode::LT_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld < *rd);
break;
case ExprOpcode::GET_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld >= *rd);
break;
case ExprOpcode::LET_DATA:
rd = temps->pop_data();
ld = temps->pop_data();
temps->push_bool(*ld <= *rd);
break;
case ExprOpcode::EQ_HEADER:
rh = temps->pop_header();
lh = temps->pop_header();
temps->push_bool(lh->cmp(*rh));
break;
case ExprOpcode::NEQ_HEADER:
rh = temps->pop_header();
lh = temps->pop_header();
temps->push_bool(!lh->cmp(*rh));
break;
case ExprOpcode::EQ_UNION:
ru = temps->pop_union();
lu = temps->pop_union();
temps->push_bool(lu->cmp(*ru));
break;
case ExprOpcode::NEQ_UNION:
ru = temps->pop_union();
lu = temps->pop_union();
temps->push_bool(!lu->cmp(*ru));
break;
case ExprOpcode::EQ_BOOL:
rb = temps->pop_bool();
lb = temps->pop_bool();
temps->push_bool(lb == rb);
break;
case ExprOpcode::NEQ_BOOL:
rb = temps->pop_bool();
lb = temps->pop_bool();
temps->push_bool(lb != rb);
break;
case ExprOpcode::AND:
rb = temps->pop_bool();
lb = temps->pop_bool();
temps->push_bool(lb && rb);
break;
case ExprOpcode::OR:
rb = temps->pop_bool();
lb = temps->pop_bool();
temps->push_bool(lb || rb);
break;
case ExprOpcode::NOT:
rb = temps->pop_bool();
temps->push_bool(!rb);
break;
case ExprOpcode::BIT_AND:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].bit_and(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::BIT_OR:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].bit_or(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::BIT_XOR:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].bit_xor(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::BIT_NEG:
rd = temps->pop_data();
data_temps[op.data_dest_index].bit_neg(*rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::VALID_HEADER:
rh = temps->pop_header();
temps->push_bool(rh->is_valid());
break;
case ExprOpcode::VALID_UNION:
ru = temps->pop_union();
temps->push_bool(ru->is_valid());
break;
case ExprOpcode::TERNARY_OP:
rb = temps->pop_bool();
if (rb) i += 1;
break;
case ExprOpcode::SKIP:
i += op.skip_num;
break;
case ExprOpcode::TWO_COMP_MOD:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].two_comp_mod(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::USAT_CAST:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].usat_cast(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::SAT_CAST:
rd = temps->pop_data();
ld = temps->pop_data();
data_temps[op.data_dest_index].sat_cast(*ld, *rd);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::DATA_TO_BOOL:
rd = temps->pop_data();
temps->push_bool(!rd->test_eq(0));
break;
case ExprOpcode::BOOL_TO_DATA:
rb = temps->pop_bool();
data_temps[op.data_dest_index].set(static_cast<int>(rb));
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::DEREFERENCE_HEADER_STACK:
rd = temps->pop_data();
hs = temps->pop_header_stack();
temps->push_header(&hs->at(rd->get<size_t>()));
break;
// LAST_STACK_INDEX seems a little redundant given SIZE_STACK, but I don't
// exclude in the future to do some sanity checking for LAST_STACK_INDEX
case ExprOpcode::LAST_STACK_INDEX:
hs = temps->pop_header_stack();
data_temps[op.data_dest_index].set(hs->get_count() - 1);
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::SIZE_STACK:
hs = temps->pop_header_stack();
data_temps[op.data_dest_index].set(hs->get_count());
temps->push_data(&data_temps[op.data_dest_index]);
break;
case ExprOpcode::DEREFERENCE_UNION_STACK:
rd = temps->pop_data();
hus = temps->pop_union_stack();
temps->push_union(&hus->at(rd->get<size_t>()));
break;
default:
assert(0 && "invalid operand");
break;
}
}
}
bool
Expression::eval_bool(const PHV &phv, const std::vector<Data> &locals) const {
// special case, where the expression is empty
// not sure if this is the best way to handle this case, maybe the compiler
// should make sure this never happens instead and we should treat this as
// an error
if (ops.empty()) return false;
auto &temps = ExpressionTemps::get_instance();
eval_(phv, locals, &temps);
return temps.pop_bool();
}
Data
Expression::eval_arith(const PHV &phv, const std::vector<Data> &locals) const {
if (ops.empty()) return Data(0);
auto &temps = ExpressionTemps::get_instance();
eval_(phv, locals, &temps);
return *temps.pop_data();
}
void
Expression::eval_arith(const PHV &phv, Data *data,
const std::vector<Data> &locals) const {
if (ops.empty()) {
data->set(0);
return;
}
auto &temps = ExpressionTemps::get_instance();
eval_(phv, locals, &temps);
data->set(*temps.pop_data());
}
// Unfortunately all the methods use a const_cast. I wanted to avoid having to
// change the interface for existing methods (eval_arith and eval_bool), for
// which the expectation is that the PHV will not be modified during
// evaluation. This meant that changing the private eval_ method was
// difficult. I haven't found a good solution, which doesn't make the code more
// complex. yet.
Data &
Expression::eval_arith_lvalue(PHV *phv, const std::vector<Data> &locals) const {
assert(!ops.empty());
auto &temps = ExpressionTemps::get_instance();
eval_(*phv, locals, &temps);
return const_cast<Data &>(*temps.pop_data());
}
Header &
Expression::eval_header(PHV *phv, const std::vector<Data> &locals) const {
assert(!ops.empty());
auto &temps = ExpressionTemps::get_instance();
eval_(*phv, locals, &temps);
return const_cast<Header &>(*temps.pop_header());
}
HeaderStack &
Expression::eval_header_stack(PHV *phv, const std::vector<Data> &locals) const {
assert(!ops.empty());
auto &temps = ExpressionTemps::get_instance();
eval_(*phv, locals, &temps);
return const_cast<HeaderStack &>(*temps.pop_header_stack());
}
HeaderUnion &
Expression::eval_header_union(PHV *phv, const std::vector<Data> &locals) const {
assert(!ops.empty());
auto &temps = ExpressionTemps::get_instance();
eval_(*phv, locals, &temps);
return const_cast<HeaderUnion &>(*temps.pop_union());
}
HeaderUnionStack &
Expression::eval_header_union_stack(
PHV *phv, const std::vector<Data> &locals) const {
assert(!ops.empty());
auto &temps = ExpressionTemps::get_instance();
eval_(*phv, locals, &temps);
return const_cast<HeaderUnionStack &>(*temps.pop_union_stack());
}
// TODO(antonin): If there is a ternary op, we will over-estimate this number,
// see if there is an easy fix
int
Expression::assign_dest_registers() {
int registers_cnt = 0;
int registers_curr = 0;
std::stack<int> new_registers;
for (auto &op : ops) {
switch (op.opcode) {
case ExprOpcode::ADD:
case ExprOpcode::SUB:
case ExprOpcode::MOD:
case ExprOpcode::DIV:
case ExprOpcode::MUL:
case ExprOpcode::SHIFT_LEFT:
case ExprOpcode::SHIFT_RIGHT:
case ExprOpcode::BIT_AND:
case ExprOpcode::BIT_OR:
case ExprOpcode::BIT_XOR:
case ExprOpcode::TWO_COMP_MOD:
case ExprOpcode::USAT_CAST:
case ExprOpcode::SAT_CAST:
registers_curr -= new_registers.top();
new_registers.pop();
registers_curr -= new_registers.top();
new_registers.pop();
op.data_dest_index = registers_curr;
new_registers.push(1);
registers_curr += 1;
break;
case ExprOpcode::BIT_NEG:
registers_curr -= new_registers.top();
new_registers.pop();
op.data_dest_index = registers_curr;
new_registers.push(1);
registers_curr += 1;
break;
case ExprOpcode::BOOL_TO_DATA:
case ExprOpcode::LAST_STACK_INDEX:
case ExprOpcode::SIZE_STACK:
op.data_dest_index = registers_curr;
new_registers.push(1);
registers_curr += 1;
break;
// added recently; not necessary but could decrease number of registers
case ExprOpcode::EQ_DATA:
case ExprOpcode::NEQ_DATA:
case ExprOpcode::GT_DATA:
case ExprOpcode::LT_DATA:
case ExprOpcode::GET_DATA:
case ExprOpcode::LET_DATA:
registers_curr -= new_registers.top();
new_registers.pop();
registers_curr -= new_registers.top();
new_registers.pop();
break;
case ExprOpcode::DATA_TO_BOOL:
case ExprOpcode::DEREFERENCE_HEADER_STACK:
case ExprOpcode::DEREFERENCE_UNION_STACK:
registers_curr -= new_registers.top();
new_registers.pop();
break;
case ExprOpcode::LOAD_CONST:
case ExprOpcode::LOAD_LOCAL:
case ExprOpcode::LOAD_FIELD:
case ExprOpcode::LOAD_LAST_HEADER_STACK_FIELD:
case ExprOpcode::LOAD_REGISTER_REF:
case ExprOpcode::ACCESS_FIELD:
new_registers.push(0);
break;
case ExprOpcode::LOAD_REGISTER_GEN:
registers_curr -= new_registers.top();
new_registers.pop();
new_registers.push(0);
break;
// here to emphasize the fact that with my skip implementation choice,
// nothing special needs to be done here
case ExprOpcode::TERNARY_OP:
break;
default:
break;
}
registers_cnt = std::max(registers_cnt, registers_curr);
}
return registers_cnt;
}
bool
Expression::empty() const {
return ops.empty();
}
VLHeaderExpression::VLHeaderExpression(const ArithExpression &expr)
: expr(expr) {
for (const Op &op : expr.ops) {
if (op.opcode == ExprOpcode::LOAD_LOCAL) {
offsets.push_back(op.local_offset);
}
}
}
const std::vector<int> &
VLHeaderExpression::get_input_offsets() const {
return offsets;
}
ArithExpression
VLHeaderExpression::resolve(header_id_t header_id) {
assert(expr.built);
ArithExpression new_expr = expr;
std::vector<Op> &ops = new_expr.ops;
for (size_t i = 0; i < ops.size(); i++) {
Op &op = ops[i];
if (op.opcode == ExprOpcode::LOAD_LOCAL) {
op.opcode = ExprOpcode::LOAD_FIELD;
op.field.field_offset = op.local_offset;
op.field.header = header_id;
}
}
return new_expr;
}
} // namespace bm
| 13,068 |
6,240 | /*
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Rhino code, released
* May 6, 1999.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1997-1999
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* <NAME>
* Google Inc.
*
* Alternatively, the contents of this file may be used under the terms of
* the GNU General Public License Version 2 or later (the "GPL"), in which
* case the provisions of the GPL are applicable instead of those above. If
* you wish to allow use of your version of this file only under the terms of
* the GPL and not to allow others to use your version of this file under the
* MPL, indicate your decision by deleting the provisions above and replacing
* them with the notice and other provisions required by the GPL. If you do
* not delete the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* ***** END LICENSE BLOCK ***** */
package com.google.javascript.rhino;
import com.google.common.collect.ImmutableMap;
import javax.annotation.Nullable;
/**
* Enum of primitive functions that the compiler recognizes
*
* <p>These correspond to the @closurePrimitive tag in code; in order to parse new primitives, add
* any entry to the list in parsing/ParserConfig.properties, then map it to an enum member in the
* idToEnum map.
*
* <p>After typechecking is done, all calls to one of these primitive types should have their
* FunctionType annotated with the corresponding enum member. This is intended to make identifying
* these calls more accurate than previous methods of finding primitives by qualified name.
*/
public enum ClosurePrimitive {
ASSERTS_FAIL, // A function that always throws an error
ASSERTS_MATCHES_RETURN, // A function that asserts its first parameter matches the return type
ASSERTS_TRUTHY; // A function that asserts its first parameter is truthy and returns the param
/**
* Maps human-readable ids to enum members.
*
* <p>The expected mapping (although not enforced) of keys -> values is that the enum member maps
* to a lowercase string with "_" replaced with "."
*/
private static final ImmutableMap<String, ClosurePrimitive> ID_TO_ENUM =
ImmutableMap.of(
"asserts.fail",
ASSERTS_FAIL,
"asserts.truthy",
ASSERTS_TRUTHY,
"asserts.matchesReturn",
ASSERTS_MATCHES_RETURN);
/**
* Returns the ClosurePrimitive corresponding to the given string id.
*
* <p>This is to make reading {@code @closurePrimitive} easier in code. Using Enum.valueOf to
* parse closure primitive identifiers from JSDoc directly would require code like {@code
* closurePrimitive {ASSERTS_FAIL}}; instead we separate the string ids from the enum names.
*
* @param id a string id that normalized to an enum member, or null
* @throws IllegalArgumentException if the id is non-null but does not match an enum member
* @return null if the argument is null, otherwise the corresponding enum member
*/
@Nullable
public static ClosurePrimitive fromStringId(@Nullable String id) {
if (id == null) {
return null;
}
ClosurePrimitive primitive = ID_TO_ENUM.get(id);
if (primitive == null) {
throw new IllegalArgumentException(
"String id " + id + " does not match any ClosurePrimitive");
}
return primitive;
}
}
| 1,169 |
411 | <filename>code/ConvertLayer_ncnn.py
"""
Copyright (c) 2017-present, starime.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import math
import numpy as np
class LayerParameter_ncnn(object):
def __init__(self):
self.type = ''
self.param = []
self.weights = []
def CopyTuple(param):
if isinstance(param, tuple):
return param
elif isinstance(param, int):
return param, param
else:
assert type(param)
def ty(ncnn_type):
def f(_):
layer = LayerParameter_ncnn()
layer.type = ncnn_type
return layer
return f
def data(inputs):
layer = LayerParameter_ncnn()
layer.type = 'Input'
input_shape = inputs.data.numpy().shape
for dim in range(1, 4):
if dim - 1 < len(input_shape):
size = input_shape[dim]
else:
size = -233
layer.param.append('%ld' % size)
return layer
def Slice(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Slice'
# """ ncnn only support slicing on channel dimension """
# assert pytorch_layer.axis == 1
layer.param = {}
num_slice = len(pytorch_layer.slice_point) + 1
slice_param = ('%d' % num_slice)
prev_offset = 0
for p in pytorch_layer.slice_point:
offset = p
slice_param += (',%d' % (offset - prev_offset))
prev_offset = offset
slice_param += (',%d' % -233)
layer.param['-23300'] = slice_param
layer.param['1'] = ('%d' % (pytorch_layer.axis - 1))
return layer
def Split(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Split'
return layer
def permute(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Permute'
assert len(pytorch_layer.rev_dim_indices) == 4, len(pytorch_layer.rev_dim_indices)
assert pytorch_layer.rev_dim_indices[0] == 0, pytorch_layer.rev_dim_indices[0]
""" order_type details at src/layer/permute.cpp """
h, w, c = pytorch_layer.rev_dim_indices[1], pytorch_layer.rev_dim_indices[2], pytorch_layer.rev_dim_indices[3]
order_type = 0
if c == 1 and h == 2 and w == 3:
order_type = 0
elif c == 1 and h == 3 and w == 2:
order_type = 1
elif c == 2 and h == 1 and w == 3:
order_type = 2
elif c == 2 and h == 3 and w == 1:
order_type = 3
elif c == 3 and h == 1 and w == 2:
order_type = 4
elif c == 3 and h == 2 and w == 1:
order_type = 5
layer.param.append('%d' % order_type)
return layer
def flatten(pytorch_layer):
""" Only support flatten view """
total = 1
for dim in pytorch_layer.old_size:
total *= dim
assert ((pytorch_layer.new_sizes[1] == total) or (pytorch_layer.new_sizes[1] == -1))
layer = LayerParameter_ncnn()
layer.type = "Flatten"
return layer
def inner_product(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'InnerProduct'
blobs_weight = pytorch_layer.next_functions[2][0].next_functions[0][0].variable.data.numpy()
num_output = pytorch_layer.next_functions[2][0].next_functions[0][0].variable.size(0)
layer.param.append('%d' % num_output)
if pytorch_layer.next_functions[0][0]:
layer.param.append('%d' % True)
bias = pytorch_layer.next_functions[0][0].variable.data.numpy()
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(np.array([0.]))
layer.weights.append(blobs_weight)
layer.weights.append(bias)
else:
layer.param.append('%d' % False)
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(np.array([0.]))
layer.weights.append(blobs_weight)
return layer
def concat(pytorch_layer):
layer = LayerParameter_ncnn()
axis = int(pytorch_layer.dim)
layer.type = 'Concat'
if (axis == 1):
pass
else:
dim = axis - 1 if axis >= 1 else 0
layer.param.append('%d' % dim)
return layer
def spatial_convolution(pytorch_layer):
layer = LayerParameter_ncnn()
blobs_weight = pytorch_layer.next_functions[1][0].variable.data.numpy()
assert len(blobs_weight.shape) == 4, blobs_weight.shape
(nOutputPlane, nInputPlane, kH, kW) = blobs_weight.shape
padH = pytorch_layer.padding[0]
padW = pytorch_layer.padding[1]
dH = pytorch_layer.stride[0]
dW = pytorch_layer.stride[1]
dilation = pytorch_layer.dilation[0]
groups = pytorch_layer.groups
if pytorch_layer.transposed:
layer.type = 'Deconvolution'
layer.param.append('%d' % nInputPlane)
""" ncnn: Need to swap input dim and output dim """
blobs_weight = np.swapaxes(blobs_weight, 0, 1)
else:
layer.type = 'Convolution'
layer.param.append('%d' % nOutputPlane)
assert kH == kW, [kH, kW]
assert dH == dW, [dH, dW]
assert padH == padW, [padH, padW]
layer.param.append('%d' % kH)
layer.param.append('%d' % dilation)
layer.param.append('%d' % dH)
layer.param.append('%d' % padH)
if pytorch_layer.next_functions[2][0]:
layer.param.append('%d' % True)
bias = pytorch_layer.next_functions[2][0].variable.data.numpy()
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(np.array([0.]))
layer.weights.append(blobs_weight)
layer.weights.append(bias)
else:
layer.param.append('%d' % False)
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(np.array([0.]))
layer.weights.append(blobs_weight)
if groups != 1:
layer.param.append('%d' % groups)
layer.type += "DepthWise"
return layer
def FillBilinear(ch, k):
blob = np.zeros(shape=(ch, 1, k, k))
""" Create bilinear weights in numpy array """
bilinear_kernel = np.zeros([k, k], dtype=np.float32)
scale_factor = (k + 1) // 2
if k % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(k):
for y in range(k):
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor)
for i in range(ch):
blob[i, 0, :, :] = bilinear_kernel
return blob
def UpsampleBilinear(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Deconvolution'
assert pytorch_layer.scale_factor[0] == pytorch_layer.scale_factor[1]
factor = int(pytorch_layer.scale_factor[0])
c = int(pytorch_layer.input_size[1])
k = 2 * factor - factor % 2
num_output = c
kernel_size = k
stride = factor
pad = int(math.ceil((factor - 1) / 2.))
dilation = 1
# group = c
# weight_filler = 'bilinear'
bias_term = False
layer.param.append('%d' % num_output)
layer.param.append('%d' % kernel_size)
layer.param.append('%d' % dilation)
layer.param.append('%d' % stride)
layer.param.append('%d' % pad)
layer.param.append('%d' % bias_term)
# learning_param = pb2.ParamSpec()
# learning_param.lr_mult = 0
# learning_param.decay_mult = 0
# layer.param.extend([learning_param])
""" init weight blob of filter kernel """
blobs_weight = FillBilinear(c, k)
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(np.array([0.]))
layer.weights.append(blobs_weight)
return layer
def CopyPoolingParameter(pytorch_layer, layer):
padH, padW = CopyTuple(pytorch_layer.padding)
kH, kW = CopyTuple(pytorch_layer.kernel_size)
dH, dW = CopyTuple(pytorch_layer.stride)
assert kH == kW, [kH, kW]
assert dH == dW, [dH, dW]
assert padH == padW, [padH, padW]
layer.param.append('%d' % kH)
layer.param.append('%d' % dH)
# if pytorch_layer.ceil_mode is True:
layer.param.append('%d' % padH)
""" TODO: global_pooling? """
layer.param.append('%d' % 0)
def MaxPooling(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Pooling'
layer.param.append('%d' % 0)
CopyPoolingParameter(pytorch_layer, layer)
return layer
def AvgPooling(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Pooling'
layer.param.append('%d' % 1)
CopyPoolingParameter(pytorch_layer, layer)
return layer
def dropout(pytorch_layer):
layer = LayerParameter_ncnn()
dropout_ratio = float(pytorch_layer.p)
layer.type = 'Dropout'
if abs(dropout_ratio - 0.5) < 1e-3:
pass
else:
scale = 1.0 - dropout_ratio
layer.param.append('%f' % scale)
return layer
def elu(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'ELU'
alpha = pytorch_layer.additional_args[0]
layer.param.append('%f' % alpha)
return layer
def ReLU(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'ReLU'
layer.param.append('%f' % 0.0)
return layer
def leaky_ReLU(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'ReLU'
negative_slope = float(pytorch_layer.additional_args[0])
layer.param.append('%f' % negative_slope)
return layer
def PReLU(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'PReLU'
blobs_weight = pytorch_layer.next_functions[1][0].variable.data.numpy()
layer.param.append('%d' % blobs_weight.size)
layer.weights.append(blobs_weight)
return layer
def MulConst(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Power'
layer.param.append('%f' % 1)
layer.param.append('%f' % float(pytorch_layer.constant))
layer.param.append('%f' % 0)
return layer
def AddConst(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Power'
layer.param.append('%f' % 1)
layer.param.append('%f' % 1)
""" Constant to add should be filled by hand, since not visible in autograd """
layer.param.append('%f' % float('inf'))
return layer
def softmax(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Softmax'
""" TODO: axis """
layer.param.append('%d' % 0)
return layer
def eltwise(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Eltwise'
""" operation: 0=mul 1=add 2=max """
layer.param.append('%d' % 1)
""" TODO: coefficient """
return layer
def eltwise_max(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'Eltwise'
""" operation: 0=mul 1=add 2=max """
layer.param.append('%d' % 2)
""" TODO: coefficient """
return layer
def negate(pytorch_layer):
layer = LayerParameter_ncnn()
layer.type = 'UnaryOp'
""" Operation_NEG=1, more op details at src/layer/unaryop.h """
layer.param.append('%d' % 1)
return layer
def batchnorm(pytorch_layer):
layer_bn = LayerParameter_ncnn()
layer_bn.type = 'BatchNorm'
layer_bn.param.append('%d' % pytorch_layer.running_mean.numpy().size)
layer_bn.weights.append(np.ones(pytorch_layer.running_mean.numpy().shape))
layer_bn.weights.append(pytorch_layer.running_mean.numpy())
""" Add eps by hand for running_var in ncnn """
running_var = pytorch_layer.running_var.numpy()
running_var = running_var + pytorch_layer.eps
layer_bn.weights.append(running_var)
layer_bn.weights.append(np.zeros(pytorch_layer.running_mean.numpy().shape))
layer_scale = LayerParameter_ncnn()
layer_scale.type = 'Scale'
blobs_weight = pytorch_layer.next_functions[1][0].variable.data.numpy()
if pytorch_layer.next_functions[2][0]:
layer_scale.param.append('%d' % blobs_weight.size)
layer_scale.param.append('%d' % True)
bias = pytorch_layer.next_functions[2][0].variable.data.numpy()
layer_scale.weights.append(blobs_weight)
layer_scale.weights.append(bias)
else:
layer_scale.param.append('%d' % blobs_weight.size)
layer_scale.param.append('%d' % False)
layer_scale.weights.append(blobs_weight)
return [layer_bn, layer_scale]
def build_converter(opts):
return {
'data': data,
'Addmm': inner_product,
'Threshold': ReLU,
'ConvNd': spatial_convolution,
'MaxPool2d': MaxPooling,
'AvgPool2d': AvgPooling,
'Add': eltwise,
'Cmax': eltwise_max,
'BatchNorm': batchnorm,
'Concat': concat,
'Dropout': dropout,
'UpsamplingBilinear2d': UpsampleBilinear,
'MulConstant': MulConst,
'AddConstant': AddConst,
'Softmax': softmax,
'Sigmoid': ty('Sigmoid'),
'Tanh': ty('TanH'),
'ELU': elu,
'LeakyReLU': leaky_ReLU,
'PReLU': PReLU,
'Slice': Slice,
'MultiCopy': Split,
'Negate': negate,
'Permute': permute,
'View': flatten,
}
def convert_ncnn(opts, typename, pytorch_layer):
converter = build_converter(opts)
if typename not in converter:
raise ValueError("Unknown layer type: {}, known types: {}".format(
typename, converter.keys()))
return converter[typename](pytorch_layer)
| 5,767 |
728 | <reponame>santoshkumarkannur/sirix
package org.sirix.axis;
import org.sirix.api.NodeCursor;
import org.sirix.api.NodeReadOnlyTrx;
import org.sirix.api.NodeTrx;
import org.sirix.api.ResourceManager;
import com.google.common.collect.AbstractIterator;
/**
* TemporalAxis abstract class.
*
* @author <NAME>
*
*/
public abstract class AbstractTemporalAxis<R extends NodeReadOnlyTrx & NodeCursor, W extends NodeTrx & NodeCursor>
extends AbstractIterator<R> {
public abstract ResourceManager<R, W> getResourceManager();
}
| 184 |
3,037 | package com.jeremyliao.liveeventbus;
import android.arch.lifecycle.Lifecycle;
import android.arch.lifecycle.LifecycleOwner;
import android.arch.lifecycle.Observer;
import android.os.Handler;
import android.os.Looper;
import android.support.annotation.NonNull;
import com.jeremyliao.liveeventbus.liveevent.LiveEvent;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Created by hailiangliao on 2018/7/4.
*/
public final class LiveEventBus {
private final Map<String, BusLiveEvent<Object>> bus;
private LiveEventBus() {
bus = new HashMap<>();
}
private static class SingletonHolder {
private static final LiveEventBus DEFAULT_BUS = new LiveEventBus();
}
public static LiveEventBus get() {
return SingletonHolder.DEFAULT_BUS;
}
private boolean lifecycleObserverAlwaysActive = true;
public synchronized <T> Observable<T> with(String key, Class<T> type) {
if (!bus.containsKey(key)) {
bus.put(key, new BusLiveEvent<>(key));
}
return (Observable<T>) bus.get(key);
}
public Observable<Object> with(String key) {
return with(key, Object.class);
}
public void lifecycleObserverAlwaysActive(boolean active) {
lifecycleObserverAlwaysActive = active;
}
public interface Observable<T> {
void setValue(T value);
void postValue(T value);
void postValueDelay(T value, long delay);
void postValueDelay(T value, long delay, TimeUnit unit);
void observe(@NonNull LifecycleOwner owner, @NonNull Observer<T> observer);
void observeSticky(@NonNull LifecycleOwner owner, @NonNull Observer<T> observer);
void observeForever(@NonNull Observer<T> observer);
void observeStickyForever(@NonNull Observer<T> observer);
void removeObserver(@NonNull Observer<T> observer);
}
private class BusLiveEvent<T> extends LiveEvent<T> implements Observable<T> {
private class PostValueTask implements Runnable {
private Object newValue;
public PostValueTask(@NonNull Object newValue) {
this.newValue = newValue;
}
@Override
public void run() {
setValue((T) newValue);
}
}
@NonNull
private final String key;
private Handler mainHandler = new Handler(Looper.getMainLooper());
private BusLiveEvent(String key) {
this.key = key;
}
@Override
protected Lifecycle.State observerActiveLevel() {
return lifecycleObserverAlwaysActive ? Lifecycle.State.CREATED : Lifecycle.State.STARTED;
}
@Override
public void postValueDelay(T value, long delay) {
mainHandler.postDelayed(new PostValueTask(value), delay);
}
@Override
public void postValueDelay(T value, long delay, TimeUnit unit) {
postValueDelay(value, TimeUnit.MILLISECONDS.convert(delay, unit));
}
@Override
public void removeObserver(@NonNull Observer<T> observer) {
super.removeObserver(observer);
if (!hasObservers()) {
LiveEventBus.get().bus.remove(key);
}
}
}
}
| 1,354 |
419 | <filename>Code/System/Core/Memory/Memory.h
#pragma once
#include "System/Core/_Module/API.h"
#include "System/Core/Types/IntegralTypes.h"
#include <algorithm>
#include <malloc.h>
//-------------------------------------------------------------------------
#define KRG_USE_CUSTOM_ALLOCATOR 1
#define KRG_DEFAULT_ALIGNMENT 8
//-------------------------------------------------------------------------
#ifdef _WIN32
#define KRG_STACK_ALLOC(x) alloca( x )
#define KRG_STACK_ARRAY_ALLOC(type, numElements) reinterpret_cast<type*>( alloca( sizeof(type) * numElements ) );
#else
#define KRG_STACK_ALLOC(x)
#define KRG_STACK_ARRAY_ALLOC(type, numElements)
#endif
//-------------------------------------------------------------------------
namespace KRG
{
namespace Memory
{
KRG_SYSTEM_CORE_API void Initialize();
KRG_SYSTEM_CORE_API void Shutdown();
KRG_SYSTEM_CORE_API void InitializeThreadHeap();
KRG_SYSTEM_CORE_API void ShutdownThreadHeap();
//-------------------------------------------------------------------------
KRG_FORCE_INLINE void MemsetZero( void* ptr, size_t size )
{
memset( ptr, 0, size );
}
template <typename T>
KRG_FORCE_INLINE void MemsetZero( T* ptr )
{
memset( ptr, 0, sizeof( T ) );
}
//-------------------------------------------------------------------------
inline bool IsAligned( void const* p, size_t n )
{
return ( reinterpret_cast<uintptr_t>( p ) % n ) == 0;
}
template <typename T>
inline bool IsAligned( T const* p )
{
return ( reinterpret_cast<uintptr_t>( p ) % alignof( T ) ) == 0;
}
KRG_FORCE_INLINE size_t CalculatePaddingForAlignment( uintptr_t addressOffset, size_t requiredAlignment )
{
return ( requiredAlignment - ( addressOffset % requiredAlignment ) ) % requiredAlignment;
}
KRG_FORCE_INLINE size_t CalculatePaddingForAlignment( void* address, size_t requiredAlignment )
{
return CalculatePaddingForAlignment( reinterpret_cast<uintptr_t>( address ), requiredAlignment );
}
//-------------------------------------------------------------------------
KRG_SYSTEM_CORE_API size_t GetTotalRequestedMemory();
KRG_SYSTEM_CORE_API size_t GetTotalAllocatedMemory();
}
//-------------------------------------------------------------------------
// Global Memory Management Functions
//-------------------------------------------------------------------------
[[nodiscard]] KRG_SYSTEM_CORE_API void* Alloc( size_t size, size_t alignment = KRG_DEFAULT_ALIGNMENT );
[[nodiscard]] KRG_SYSTEM_CORE_API void* Realloc( void* pMemory, size_t newSize, size_t originalAlignment = KRG_DEFAULT_ALIGNMENT );
KRG_SYSTEM_CORE_API void Free( void*& pMemory );
//-------------------------------------------------------------------------
template< typename T, typename ... ConstructorParams >
[[nodiscard]] KRG_FORCE_INLINE T* New( ConstructorParams&&... params )
{
void* pMemory = Alloc( sizeof( T ), alignof( T ) );
KRG_ASSERT( pMemory != nullptr );
return new( pMemory ) T( std::forward<ConstructorParams>( params )... );
}
template< typename T >
KRG_FORCE_INLINE void Delete( T*& pType )
{
if ( pType != nullptr )
{
pType->~T();
Free( (void*&) pType );
}
}
template< typename T >
KRG_FORCE_INLINE void Free( T*& pType )
{
Free( (void*&) pType );
}
//-------------------------------------------------------------------------
template< typename T, typename ... ConstructorParams >
[[nodiscard]] KRG_FORCE_INLINE T* NewArray( size_t const numElements )
{
size_t const requiredAlignment = std::max( alignof( T ), size_t( 16 ) );
size_t const requiredExtraMemory = std::max( requiredAlignment, size_t( 4 ) );
size_t const requiredMemory = sizeof( T ) * numElements + requiredExtraMemory;
Byte* pOriginalAddress = pOriginalAddress = (Byte*) Alloc( requiredMemory, requiredAlignment );
KRG_ASSERT( pOriginalAddress != nullptr );
// Call required type constructors
T* pArrayAddress = reinterpret_cast<T*>( pOriginalAddress + requiredExtraMemory );
for ( size_t i = 0; i < numElements; i++ )
{
new( &pArrayAddress[i] ) T( std::forward<ConstructorParams>( params )... );
}
// Record the number of array elements
uint32* pNumElements = reinterpret_cast<uint32*>( pArrayAddress ) - 1;
*pNumElements = uint32( numElements );
return pArrayAddress;
}
template< typename T >
[[nodiscard]] KRG_FORCE_INLINE T* NewArray( size_t const numElements, T const& value )
{
size_t const requiredAlignment = std::max( alignof( T ), size_t( 16 ) );
size_t const requiredExtraMemory = std::max( requiredAlignment, size_t( 4 ) );
size_t const requiredMemory = sizeof( T ) * numElements + requiredExtraMemory;
Byte* pOriginalAddress = pOriginalAddress = (Byte*) Alloc( requiredMemory, requiredAlignment );
KRG_ASSERT( pOriginalAddress != nullptr );
// Call required type constructors
T* pArrayAddress = reinterpret_cast<T*>( pOriginalAddress + requiredExtraMemory );
for ( size_t i = 0; i < numElements; i++ )
{
new( &pArrayAddress[i] ) T( value );
}
// Record the number of array elements
uint32* pNumElements = reinterpret_cast<uint32*>( pArrayAddress ) - 1;
*pNumElements = uint32( numElements );
return pArrayAddress;
}
template< typename T >
KRG_FORCE_INLINE void DeleteArray( T*& pArray )
{
size_t const requiredAlignment = std::max( alignof( T ), size_t( 16 ) );
size_t const requiredExtraMemory = std::max( requiredAlignment, size_t( 4 ) );
// Get number of elements in array and call destructor on each entity
uint32 const numElements = *( reinterpret_cast<uint32*>( pArray ) - 1 );
for ( uint32 i = 0; i < numElements; i++ )
{
pArray[i].~T();
}
Byte* pOriginalAddress = reinterpret_cast<Byte*>( pArray ) - requiredExtraMemory;
Free( (void*&) pOriginalAddress );
pArray = nullptr;
}
} | 2,704 |
663 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from subprocess import PIPE, STDOUT, Popen
from datadog_checks.base.utils.common import ensure_bytes
from datadog_checks.base.utils.http import RequestsWrapper
from datadog_checks.dev.errors import SubprocessError
from datadog_checks.dev.structures import LazyFunction
from datadog_checks.voltdb.client import Client
from datadog_checks.voltdb.types import Instance
from . import common
class CreateSchema(LazyFunction):
def __init__(self, compose_file, schema, container_name):
# type: (str, str, str) -> None
# See: https://docs.voltdb.com/UsingVoltDB/ChapDesignSchema.php
command = [
'docker',
'exec',
'-i',
container_name,
'sqlcmd',
'--user=admin',
'--password=<PASSWORD>',
]
if common.TLS_ENABLED:
# See: https://docs.voltdb.com/UsingVoltDB/SecuritySSL.php#SecuritySSLCli
command += ['--ssl=/tmp/certs/localcert.properties']
self._command = command
self._schema = schema
def __call__(self):
# type: () -> None
command = self._command
schema = self._schema
process = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# Simulate manually typing the schema in.
# Didn't find any other way to pass the schema (eg --query="file /path/to/schema.sql" won't work).
process.communicate(ensure_bytes(schema))
process.wait()
if process.returncode != 0:
raise SubprocessError('Command: {}\nExit code: {}'.format(command, process.returncode))
class EnsureExpectedMetricsShowUp(LazyFunction):
"""
Call procedures to ensure that all expected metrics will be reported by VoltDB.
"""
def __init__(self, instance):
# type: (Instance) -> None
http = RequestsWrapper(instance, {})
self._client = Client(url=instance['url'], http_get=http.get, username='admin', password='<PASSWORD>')
def __call__(self):
# type: () -> None
# Call procedures to make PROCEDURE and PROCEDUREDETAIL metrics show up...
# Built-in procedure.
r = self._client.request('Hero.insert', parameters=[0, 'Bits'])
assert r.status_code == 200
assert r.json()["status"] == 1
# Custom procedure.
r = self._client.request('LookUpHero', parameters=[0])
assert r.status_code == 200
data = r.json()
assert data["status"] == 1
rows = data["results"][0]["data"]
assert rows == [[0, "Bits"]]
# Create a snapshot to make SNAPSHOTSTATUS metrics appear.
# See: https://docs.voltdb.com/UsingVoltDB/sysprocsave.php
block_transactions = 0 # We don't really care, but this is required.
r = self._client.request('@SnapshotSave', parameters=['/tmp/voltdb/backup/', 'heroes', block_transactions])
assert r.status_code == 200
assert r.json()["status"] == 1
| 1,253 |
1,602 | //===--- MisExpect.cpp - Check the use of llvm.expect with PGO data -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit warnings for potentially incorrect usage of the
// llvm.expect intrinsic. This utility extracts the threshold values from
// metadata associated with the instrumented Branch or Switch instruction. The
// threshold values are then used to determine if a warning should be emmited.
//
// MisExpect metadata is generated when llvm.expect intrinsics are lowered see
// LowerExpectIntrinsic.cpp
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/MisExpect.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormatVariadic.h"
#include <cstdint>
#include <functional>
#include <numeric>
#define DEBUG_TYPE "misexpect"
using namespace llvm;
using namespace misexpect;
namespace llvm {
// Command line option to enable/disable the warning when profile data suggests
// a mismatch with the use of the llvm.expect intrinsic
static cl::opt<bool> PGOWarnMisExpect(
"pgo-warn-misexpect", cl::init(false), cl::Hidden,
cl::desc("Use this option to turn on/off "
"warnings about incorrect usage of llvm.expect intrinsics."));
} // namespace llvm
namespace {
Instruction *getOprndOrInst(Instruction *I) {
assert(I != nullptr && "MisExpect target Instruction cannot be nullptr");
Instruction *Ret = nullptr;
if (auto *B = dyn_cast<BranchInst>(I)) {
Ret = dyn_cast<Instruction>(B->getCondition());
}
// TODO: Find a way to resolve condition location for switches
// Using the condition of the switch seems to often resolve to an earlier
// point in the program, i.e. the calculation of the switch condition, rather
// than the switches location in the source code. Thus, we should use the
// instruction to get source code locations rather than the condition to
// improve diagnostic output, such as the caret. If the same problem exists
// for branch instructions, then we should remove this function and directly
// use the instruction
//
// else if (auto S = dyn_cast<SwitchInst>(I)) {
// Ret = I;
//}
return Ret ? Ret : I;
}
void emitMisexpectDiagnostic(Instruction *I, LLVMContext &Ctx,
uint64_t ProfCount, uint64_t TotalCount) {
double PercentageCorrect = (double)ProfCount / TotalCount;
auto PerString =
formatv("{0:P} ({1} / {2})", PercentageCorrect, ProfCount, TotalCount);
auto RemStr = formatv(
"Potential performance regression from use of the llvm.expect intrinsic: "
"Annotation was correct on {0} of profiled executions.",
PerString);
Twine Msg(PerString);
Instruction *Cond = getOprndOrInst(I);
if (PGOWarnMisExpect)
Ctx.diagnose(DiagnosticInfoMisExpect(Cond, Msg));
OptimizationRemarkEmitter ORE(I->getParent()->getParent());
ORE.emit(OptimizationRemark(DEBUG_TYPE, "misexpect", Cond) << RemStr.str());
}
} // namespace
namespace llvm {
namespace misexpect {
void verifyMisExpect(Instruction *I, const SmallVector<uint32_t, 4> &Weights,
LLVMContext &Ctx) {
if (auto *MisExpectData = I->getMetadata(LLVMContext::MD_misexpect)) {
auto *MisExpectDataName = dyn_cast<MDString>(MisExpectData->getOperand(0));
if (MisExpectDataName &&
MisExpectDataName->getString().equals("misexpect")) {
LLVM_DEBUG(llvm::dbgs() << "------------------\n");
LLVM_DEBUG(llvm::dbgs()
<< "Function: " << I->getFunction()->getName() << "\n");
LLVM_DEBUG(llvm::dbgs() << "Instruction: " << *I << ":\n");
LLVM_DEBUG(for (int Idx = 0, Size = Weights.size(); Idx < Size; ++Idx) {
llvm::dbgs() << "Weights[" << Idx << "] = " << Weights[Idx] << "\n";
});
// extract values from misexpect metadata
const auto *IndexCint =
mdconst::dyn_extract<ConstantInt>(MisExpectData->getOperand(1));
const auto *LikelyCInt =
mdconst::dyn_extract<ConstantInt>(MisExpectData->getOperand(2));
const auto *UnlikelyCInt =
mdconst::dyn_extract<ConstantInt>(MisExpectData->getOperand(3));
if (!IndexCint || !LikelyCInt || !UnlikelyCInt)
return;
const uint64_t Index = IndexCint->getZExtValue();
const uint64_t LikelyBranchWeight = LikelyCInt->getZExtValue();
const uint64_t UnlikelyBranchWeight = UnlikelyCInt->getZExtValue();
const uint64_t ProfileCount = Weights[Index];
const uint64_t CaseTotal = std::accumulate(
Weights.begin(), Weights.end(), (uint64_t)0, std::plus<uint64_t>());
const uint64_t NumUnlikelyTargets = Weights.size() - 1;
const uint64_t TotalBranchWeight =
LikelyBranchWeight + (UnlikelyBranchWeight * NumUnlikelyTargets);
const llvm::BranchProbability LikelyThreshold(LikelyBranchWeight,
TotalBranchWeight);
uint64_t ScaledThreshold = LikelyThreshold.scale(CaseTotal);
LLVM_DEBUG(llvm::dbgs()
<< "Unlikely Targets: " << NumUnlikelyTargets << ":\n");
LLVM_DEBUG(llvm::dbgs() << "Profile Count: " << ProfileCount << ":\n");
LLVM_DEBUG(llvm::dbgs()
<< "Scaled Threshold: " << ScaledThreshold << ":\n");
LLVM_DEBUG(llvm::dbgs() << "------------------\n");
if (ProfileCount < ScaledThreshold)
emitMisexpectDiagnostic(I, Ctx, ProfileCount, CaseTotal);
}
}
}
void checkFrontendInstrumentation(Instruction &I) {
if (auto *MD = I.getMetadata(LLVMContext::MD_prof)) {
unsigned NOps = MD->getNumOperands();
// Only emit misexpect diagnostics if at least 2 branch weights are present.
// Less than 2 branch weights means that the profiling metadata is:
// 1) incorrect/corrupted
// 2) not branch weight metadata
// 3) completely deterministic
// In these cases we should not emit any diagnostic related to misexpect.
if (NOps < 3)
return;
// Operand 0 is a string tag "branch_weights"
if (MDString *Tag = cast<MDString>(MD->getOperand(0))) {
if (Tag->getString().equals("branch_weights")) {
SmallVector<uint32_t, 4> RealWeights(NOps - 1);
for (unsigned i = 1; i < NOps; i++) {
ConstantInt *Value =
mdconst::dyn_extract<ConstantInt>(MD->getOperand(i));
RealWeights[i - 1] = Value->getZExtValue();
}
verifyMisExpect(&I, RealWeights, I.getContext());
}
}
}
}
} // namespace misexpect
} // namespace llvm
#undef DEBUG_TYPE
| 2,658 |
14,668 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_CLIENT_HINTS_BROWSER_CLIENT_HINTS_H_
#define COMPONENTS_CLIENT_HINTS_BROWSER_CLIENT_HINTS_H_
#include <memory>
#include "base/memory/raw_ptr.h"
#include "base/memory/scoped_refptr.h"
#include "components/content_settings/core/browser/cookie_settings.h"
#include "components/keyed_service/core/keyed_service.h"
#include "content/public/browser/client_hints_controller_delegate.h"
class GURL;
class HostContentSettingsMap;
class PrefService;
namespace blink {
struct UserAgentMetadata;
class EnabledClientHints;
} // namespace blink
namespace client_hints {
class ClientHints : public KeyedService,
public content::ClientHintsControllerDelegate {
public:
ClientHints(content::BrowserContext* context,
network::NetworkQualityTracker* network_quality_tracker,
HostContentSettingsMap* settings_map,
scoped_refptr<content_settings::CookieSettings> cookie_settings,
PrefService* pref_service);
ClientHints(const ClientHints&) = delete;
ClientHints& operator=(const ClientHints&) = delete;
~ClientHints() override;
// content::ClientHintsControllerDelegate:
network::NetworkQualityTracker* GetNetworkQualityTracker() override;
void GetAllowedClientHintsFromSource(
const GURL& url,
blink::EnabledClientHints* client_hints) override;
bool IsJavaScriptAllowed(const GURL& url) override;
bool AreThirdPartyCookiesBlocked(const GURL& url) override;
blink::UserAgentMetadata GetUserAgentMetadata() override;
void PersistClientHints(const url::Origin& primary_origin,
const std::vector<network::mojom::WebClientHintsType>&
client_hints) override;
void SetAdditionalClientHints(
const std::vector<network::mojom::WebClientHintsType>&) override;
void ClearAdditionalClientHints() override;
private:
raw_ptr<content::BrowserContext> context_ = nullptr;
raw_ptr<network::NetworkQualityTracker> network_quality_tracker_ = nullptr;
raw_ptr<HostContentSettingsMap> settings_map_ = nullptr;
scoped_refptr<content_settings::CookieSettings> cookie_settings_;
std::vector<network::mojom::WebClientHintsType> additional_hints_;
raw_ptr<PrefService> pref_service_;
};
} // namespace client_hints
#endif // COMPONENTS_CLIENT_HINTS_BROWSER_CLIENT_HINTS_H_
| 887 |
809 | <reponame>nikitavlaev/embox<filename>third-party/bsp/multiclet/libs/source/wdt/wdt_set_cycle.c<gh_stars>100-1000
#include "wdt.h"
void wdt_set_cycle(int period)
{
WDT_OFF;
WDT_SET_CNT_EN;
WDT_SET_CNT(0x000FFFFF);
WDT_ON;
}
| 119 |
1,779 | //
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ZETASQL_LOCAL_SERVICE_STATE_H_
#define ZETASQL_LOCAL_SERVICE_STATE_H_
#include <stddef.h>
#include <cstdint>
#include <map>
#include <memory>
#include <type_traits>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "zetasql/base/map_util.h"
namespace zetasql {
namespace local_service {
class GenericState;
// Pool of saved states that can be shared by multiple statements.
// The state class T must extend GenericState and must be thread safe.
template<class T>
class SharedStatePool {
public:
SharedStatePool() : next_id_(0) {}
SharedStatePool(const SharedStatePool&) = delete;
SharedStatePool& operator=(const SharedStatePool&) = delete;
// Register a state object into the pool. The pool takes ownership.
// Will return -1 if the state is null or already registered.
int64_t Register(std::shared_ptr<T> state) {
if (state == nullptr) {
return -1;
}
absl::MutexLock lock(&mutex_);
int64_t id = next_id_++;
if (!state->SetId(id)) {
return -1;
}
saved_states_[id] = state;
return id;
}
// Register a state object into the pool. The pool takes ownership.
// Will return -1 if the state is null or already registered.
int64_t Register(T* state) {
// We reimplement this, because calling the shared_ptr<T> version would
// pass ownership to a shared_ptr, which can deallocate in the case of error
// So, if someone registers twice, it will deallocate rather than just
// returning -1 as expected.
if (state == nullptr) {
return -1;
}
absl::MutexLock lock(&mutex_);
int64_t id = next_id_++;
if (!state->SetId(id)) {
return -1;
}
saved_states_[id].reset(state);
return id;
}
bool Has(int64_t id) const {
absl::MutexLock lock(&mutex_);
return zetasql_base::ContainsKey(saved_states_, id);
}
// Get a state object with given id, ownership is shared by the pool and all
// threads that currently hold the state object.
std::shared_ptr<T> Get(int64_t id) {
absl::MutexLock lock(&mutex_);
std::shared_ptr<T>* result = zetasql_base::FindOrNull(saved_states_, id);
if (result == nullptr) {
return nullptr;
} else {
return *result;
}
}
// Removes a state object from the pool. The state will be deleted immediately
// if not held by any other threads, or after all threads releasing it.
bool Delete(int64_t id) {
absl::MutexLock lock(&mutex_);
if (!zetasql_base::ContainsKey(saved_states_, id)) {
return false;
}
saved_states_.erase(id);
return true;
}
size_t NumSavedStates() {
absl::MutexLock lock(&mutex_);
return saved_states_.size();
}
private:
mutable absl::Mutex mutex_;
int64_t next_id_ ABSL_GUARDED_BY(mutex_);
std::map<int64_t, std::shared_ptr<T>> saved_states_ ABSL_GUARDED_BY(mutex_);
static_assert(
std::is_base_of<GenericState, T>::value,
"SharedStatePool only works with subclass of GenericState");
};
// Base class of saved states with an int64_t id.
class GenericState {
public:
GenericState() = default;
virtual ~GenericState() {}
int64_t GetId() const { return id_; }
bool IsRegistered() { return id_ != -1; }
private:
int64_t id_ = -1;
// Should only be called by SharedStatePool.
bool SetId(int64_t id) {
if (id_ == -1) {
id_ = id;
return true;
}
return false;
}
template<class T> friend class SharedStatePool;
GenericState(const GenericState&) = delete;
GenericState& operator=(const GenericState&) = delete;
};
} // namespace local_service
} // namespace zetasql
#endif // ZETASQL_LOCAL_SERVICE_STATE_H_
| 1,526 |
361 | #include "Graph/FlowGraphUtils.h"
#include "Asset/FlowAssetEditor.h"
#include "Graph/FlowGraph.h"
#include "FlowAsset.h"
#include "Toolkits/ToolkitManager.h"
TSharedPtr<FFlowAssetEditor> FFlowGraphUtils::GetFlowAssetEditor(const UObject* ObjectToFocusOn)
{
check(ObjectToFocusOn);
TSharedPtr<FFlowAssetEditor> FlowAssetEditor;
if (UFlowAsset* FlowAsset = Cast<const UFlowGraph>(ObjectToFocusOn)->GetFlowAsset())
{
const TSharedPtr<IToolkit> FoundAssetEditor = FToolkitManager::Get().FindEditorForAsset(FlowAsset);
if (FoundAssetEditor.IsValid())
{
FlowAssetEditor = StaticCastSharedPtr<FFlowAssetEditor>(FoundAssetEditor);
}
}
return FlowAssetEditor;
}
| 238 |
3,670 | <gh_stars>1000+
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Module for log_prob transformation."""
from jax import core as jax_core
from jax import random
from jax import tree_util
from oryx.core import trace_util
from oryx.core.interpreters import inverse
from oryx.core.interpreters import propagate
__all__ = [
'LogProbRules',
'log_prob'
]
safe_map = jax_core.safe_map
InverseAndILDJ = inverse.core.InverseAndILDJ
ildj_registry = inverse.core.ildj_registry
class LogProbRules(dict):
"""Default dictionary for log_prob propagation rules.
By default, the rules for LogProb propagation are just the InverseAndILDJ
rules, but instead of raising a NotImplementedError, LogProb will silently
fail. This default dict-like class implements this behavior, but also allows
primitives to register custom propagation rules.
"""
def __missing__(self, prim):
self[prim] = rule = make_default_rule(prim)
return rule
log_prob_rules = LogProbRules()
# The log_prob_registry is used to compute log_prob values from samples after
# propagation is done.
log_prob_registry = set()
def log_prob(f):
"""LogProb function transformation."""
def wrapped(sample, *args, **kwargs):
"""Function wrapper that takes in log_prob arguments."""
# Trace the function using a random seed
dummy_seed = random.PRNGKey(0)
jaxpr, _ = trace_util.stage(f, dynamic=False)(dummy_seed, *args, **kwargs)
flat_outargs, _ = tree_util.tree_flatten(sample)
flat_inargs, _ = tree_util.tree_flatten(args)
constcells = [InverseAndILDJ.new(val) for val in jaxpr.literals]
flat_incells = [
InverseAndILDJ.unknown(trace_util.get_shaped_aval(dummy_seed))
] + [InverseAndILDJ.new(val) for val in flat_inargs]
flat_outcells = [InverseAndILDJ.new(a) for a in flat_outargs]
return log_prob_jaxpr(jaxpr.jaxpr, constcells, flat_incells, flat_outcells)
return wrapped
failed_log_prob = object() # sentinel for being unable to compute a log_prob
def log_prob_jaxpr(jaxpr, constcells, flat_incells, flat_outcells):
"""Runs log_prob propagation on a Jaxpr."""
def reducer(env, eqn, curr_log_prob, new_log_prob):
if curr_log_prob is failed_log_prob or new_log_prob is failed_log_prob:
# If `curr_log_prob` is `None` that means we were unable to compute
# a log_prob elsewhere, so the propagate failed.
return failed_log_prob
if eqn.primitive in log_prob_registry and new_log_prob is None:
# We are unable to compute a log_prob for this primitive.
return failed_log_prob
if new_log_prob is not None:
cells = [env.read(var) for var in eqn.outvars]
ildjs = sum([cell.ildj.sum() for cell in cells if cell.top()])
return curr_log_prob + new_log_prob + ildjs
return curr_log_prob
# Re-use the InverseAndILDJ propagation but silently fail instead of
# erroring when we hit a primitive we can't invert. We accumulate the log
# probability values using the propagater state.
_, final_log_prob = propagate.propagate(
InverseAndILDJ,
log_prob_rules,
jaxpr,
constcells,
flat_incells,
flat_outcells,
reducer=reducer,
initial_state=0.)
if final_log_prob is failed_log_prob:
raise ValueError('Cannot compute log_prob of function.')
return final_log_prob
def make_default_rule(prim):
"""Creates rule for prim without a registered log_prob."""
def rule(incells, outcells, **params):
"""Executes the inverse rule but fails if the inverse isn't implemented."""
try:
return ildj_registry[prim](incells, outcells, **params)
except NotImplementedError:
return incells, outcells, None
return rule
| 1,510 |
2,039 | <gh_stars>1000+
package org.nd4j.linalg.api.ops.impl.transforms;
import org.nd4j.autodiff.samediff.SDVariable;
import org.nd4j.linalg.api.ops.DynamicCustomOp;
import java.util.Arrays;
import java.util.List;
public class MatrixDiagPart extends DynamicCustomOp {
public MatrixDiagPart() {
//
}
@Override
public String opName() {
return "matrix_diag_part";
}
@Override
public String tensorflowName() {
return "MatrixDiagPart";
}
@Override
public List<SDVariable> doDiff(List<SDVariable> i_v) {
throw new UnsupportedOperationException("Not implemented yet");
}
}
| 258 |
425 | <gh_stars>100-1000
{"Sections":[{"Errors":[],"SectionType":"entitySection","Id":"entitySection_test","Body":"","Name":"test","Type":"/hrf-[0-9]{6}","SynonymsOrPhraseList":[],"Range":{"Start":{"Line":1,"Character":0},"End":{"Line":1,"Character":19}}},{"Errors":[],"SectionType":"simpleIntentSection","Id":"simpleIntentSection_test","Body":"- this is a {test=one} utterance","UtteranceAndEntitiesMap":[{"utterance":"this is a one utterance","entities":[{"type":"entities","entity":"test","role":"","startPos":10,"endPos":12}],"errorMsgs":[],"contextText":"- this is a {test=one} utterance","range":{"Start":{"Line":3,"Character":0},"End":{"Line":3,"Character":32}}}],"Entities":[],"Name":"test","IntentNameLine":"# test","Range":{"Start":{"Line":2,"Character":0},"End":{"Line":3,"Character":32}}}],"Content":"$test:/hrf-[0-9]{6}\n# test\n- this is a {test=one} utterance","Errors":[]} | 277 |
3,372 | /*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codecommit.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Information about conflicts in a merge operation.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/Conflict" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class Conflict implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* Metadata about a conflict in a merge operation.
* </p>
*/
private ConflictMetadata conflictMetadata;
/**
* <p>
* A list of hunks that contain the differences between files or lines causing the conflict.
* </p>
*/
private java.util.List<MergeHunk> mergeHunks;
/**
* <p>
* Metadata about a conflict in a merge operation.
* </p>
*
* @param conflictMetadata
* Metadata about a conflict in a merge operation.
*/
public void setConflictMetadata(ConflictMetadata conflictMetadata) {
this.conflictMetadata = conflictMetadata;
}
/**
* <p>
* Metadata about a conflict in a merge operation.
* </p>
*
* @return Metadata about a conflict in a merge operation.
*/
public ConflictMetadata getConflictMetadata() {
return this.conflictMetadata;
}
/**
* <p>
* Metadata about a conflict in a merge operation.
* </p>
*
* @param conflictMetadata
* Metadata about a conflict in a merge operation.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Conflict withConflictMetadata(ConflictMetadata conflictMetadata) {
setConflictMetadata(conflictMetadata);
return this;
}
/**
* <p>
* A list of hunks that contain the differences between files or lines causing the conflict.
* </p>
*
* @return A list of hunks that contain the differences between files or lines causing the conflict.
*/
public java.util.List<MergeHunk> getMergeHunks() {
return mergeHunks;
}
/**
* <p>
* A list of hunks that contain the differences between files or lines causing the conflict.
* </p>
*
* @param mergeHunks
* A list of hunks that contain the differences between files or lines causing the conflict.
*/
public void setMergeHunks(java.util.Collection<MergeHunk> mergeHunks) {
if (mergeHunks == null) {
this.mergeHunks = null;
return;
}
this.mergeHunks = new java.util.ArrayList<MergeHunk>(mergeHunks);
}
/**
* <p>
* A list of hunks that contain the differences between files or lines causing the conflict.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setMergeHunks(java.util.Collection)} or {@link #withMergeHunks(java.util.Collection)} if you want to
* override the existing values.
* </p>
*
* @param mergeHunks
* A list of hunks that contain the differences between files or lines causing the conflict.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Conflict withMergeHunks(MergeHunk... mergeHunks) {
if (this.mergeHunks == null) {
setMergeHunks(new java.util.ArrayList<MergeHunk>(mergeHunks.length));
}
for (MergeHunk ele : mergeHunks) {
this.mergeHunks.add(ele);
}
return this;
}
/**
* <p>
* A list of hunks that contain the differences between files or lines causing the conflict.
* </p>
*
* @param mergeHunks
* A list of hunks that contain the differences between files or lines causing the conflict.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Conflict withMergeHunks(java.util.Collection<MergeHunk> mergeHunks) {
setMergeHunks(mergeHunks);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getConflictMetadata() != null)
sb.append("ConflictMetadata: ").append(getConflictMetadata()).append(",");
if (getMergeHunks() != null)
sb.append("MergeHunks: ").append(getMergeHunks());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof Conflict == false)
return false;
Conflict other = (Conflict) obj;
if (other.getConflictMetadata() == null ^ this.getConflictMetadata() == null)
return false;
if (other.getConflictMetadata() != null && other.getConflictMetadata().equals(this.getConflictMetadata()) == false)
return false;
if (other.getMergeHunks() == null ^ this.getMergeHunks() == null)
return false;
if (other.getMergeHunks() != null && other.getMergeHunks().equals(this.getMergeHunks()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getConflictMetadata() == null) ? 0 : getConflictMetadata().hashCode());
hashCode = prime * hashCode + ((getMergeHunks() == null) ? 0 : getMergeHunks().hashCode());
return hashCode;
}
@Override
public Conflict clone() {
try {
return (Conflict) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.codecommit.model.transform.ConflictMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| 2,784 |
2,561 |
# coding: utf-8
# In[1]:
import os
os.chdir('H:/')
import pandas as pd
# In[2]:
prod=pd.read_csv('Production_Crops_E_All_Data_(Normalized).csv',
encoding='latin-1')
prix=pd.read_csv('Prices_E_All_Data_(Normalized).csv',
encoding='latin-1')
land=pd.read_csv('Inputs_LandUse_E_All_Data_(Normalized).csv',
encoding='latin-1')
# In[3]:
global beginyear,endyear
beginyear=2012;
endyear=2019
# In[4]:
mapping=pd.read_csv('mapping.csv')
# In[5]:
#select malaysia from 2012-2018
malay_land=land[land['Year'].isin(range(beginyear,endyear))][land['Area']=='Malaysia'][land['Element'].isin(['Area'])][land['Item'].isin(['Cropland'])]
malay_prod=prod[prod['Year'].isin(range(beginyear,endyear))][prod['Area']=='Malaysia'][prod['Element'].isin(['Area harvested','Production'])]
malay_prod=malay_prod.merge(mapping,on=['Item Code', 'Item'],how='left')
# In[6]:
#remove redundant cols
for i in ['Area Code','Element Code','Year Code',
'Flag','COMMODITY','Item Code',
'subclass code','class code',
'DEFINITIONS, COVERAGE, REMARKS',]:
del malay_prod[i]
# In[7]:
#select crops with available data
a=set(malay_prod['Item'][malay_prod['Element']=='Area harvested'])
b=set(malay_prod['Item'][malay_prod['Element']=='Production'])
target_crops=a.intersection(b)
# In[8]:
#exclude land usage<1% without price data
exclude=['Areca nuts',
'Bastfibres, other',
'Cashew nuts, with shell',
'Cereals, Total',
'Chillies and peppers, dry',
'Citrus Fruit, Total',
'Cloves',
'Coarse Grain, Total',
'Coffee, green',
'Coir',
'Fibre Crops Primary',
'Fruit Primary',
'Fruit, citrus nes',
'Fruit, fresh nes',
'Fruit, tropical fresh nes',
'Groundnuts, with shell',
'Manila fibre (abaca)',
'Nutmeg, mace and cardamoms',
'Oilcrops',
'Oilcrops, Cake Equivalent',
'Oilcrops, Oil Equivalent',
'Roots and Tubers, Total',
'Roots and tubers nes',
'Soybeans',
'Spices nes',
'Tea',
'Treenuts, Total',
'Vegetables Primary',
'Vegetables, fresh nes']
# In[9]:
#finalize the target
targets=[i for i in target_crops if i not in exclude]
# In[10]:
#cleanse
malay_crops=malay_prod[malay_prod['Item'].isin(targets)]
# In[11]:
#subtotal
total=malay_prod[malay_prod['class'].isnull()]
# In[12]:
#compare sum of area by crops with sum of area by subclass
sss=total[total['Element']=='Area harvested'].groupby(['Item','Year']).sum()
ttt=malay_crops[malay_crops['Element']=='Area harvested'].groupby(['class','Year']).sum()
# In[13]:
#compare sum of area by crops
malay_crops[malay_crops['Element']=='Area harvested'].groupby(['Year']).sum()
# In[14]:
#with cropland
malay_land
| 1,120 |
603 | <filename>sympy/matrices/expressions/matmul.py
from sympy import Number
from sympy.core import Mul, Basic, sympify, S
from sympy.core.mul import mul
from sympy.functions import adjoint
from sympy.strategies import (rm_id, unpack, typed, flatten, exhaust,
do_one, new)
from sympy.matrices.common import ShapeError, NonInvertibleMatrixError
from sympy.matrices.matrices import MatrixBase
from .inverse import Inverse
from .matexpr import MatrixExpr
from .matpow import MatPow
from .transpose import transpose
from .permutation import PermutationMatrix
from .special import ZeroMatrix, Identity, GenericIdentity, OneMatrix
# XXX: MatMul should perhaps not subclass directly from Mul
class MatMul(MatrixExpr, Mul):
"""
A product of matrix expressions
Examples
========
>>> from sympy import MatMul, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 4)
>>> B = MatrixSymbol('B', 4, 3)
>>> C = MatrixSymbol('C', 3, 6)
>>> MatMul(A, B, C)
A*B*C
"""
is_MatMul = True
identity = GenericIdentity()
def __new__(cls, *args, evaluate=False, check=True, _sympify=True):
if not args:
return cls.identity
# This must be removed aggressively in the constructor to avoid
# TypeErrors from GenericIdentity().shape
args = list(filter(lambda i: cls.identity != i, args))
if _sympify:
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
factor, matrices = obj.as_coeff_matrices()
if check:
validate(*matrices)
if not matrices:
# Should it be
#
# return Basic.__neq__(cls, factor, GenericIdentity()) ?
return factor
if evaluate:
return canonicalize(obj)
return obj
@property
def shape(self):
matrices = [arg for arg in self.args if arg.is_Matrix]
return (matrices[0].rows, matrices[-1].cols)
def _entry(self, i, j, expand=True, **kwargs):
from sympy import Dummy, Sum, Mul, ImmutableMatrix, Integer
coeff, matrices = self.as_coeff_matrices()
if len(matrices) == 1: # situation like 2*X, matmul is just X
return coeff * matrices[0][i, j]
indices = [None]*(len(matrices) + 1)
ind_ranges = [None]*(len(matrices) - 1)
indices[0] = i
indices[-1] = j
def f():
counter = 1
while True:
yield Dummy("i_%i" % counter)
counter += 1
dummy_generator = kwargs.get("dummy_generator", f())
for i in range(1, len(matrices)):
indices[i] = next(dummy_generator)
for i, arg in enumerate(matrices[:-1]):
ind_ranges[i] = arg.shape[1] - 1
matrices = [arg._entry(indices[i], indices[i+1], dummy_generator=dummy_generator) for i, arg in enumerate(matrices)]
expr_in_sum = Mul.fromiter(matrices)
if any(v.has(ImmutableMatrix) for v in matrices):
expand = True
result = coeff*Sum(
expr_in_sum,
*zip(indices[1:-1], [0]*len(ind_ranges), ind_ranges)
)
# Don't waste time in result.doit() if the sum bounds are symbolic
if not any(isinstance(v, (Integer, int)) for v in ind_ranges):
expand = False
return result.doit() if expand else result
def as_coeff_matrices(self):
scalars = [x for x in self.args if not x.is_Matrix]
matrices = [x for x in self.args if x.is_Matrix]
coeff = Mul(*scalars)
if coeff.is_commutative is False:
raise NotImplementedError("noncommutative scalars in MatMul are not supported.")
return coeff, matrices
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, MatMul(*matrices)
def _eval_transpose(self):
"""Transposition of matrix multiplication.
Notes
=====
The following rules are applied.
Transposition for matrix multiplied with another matrix:
`\\left(A B\\right)^{T} = B^{T} A^{T}`
Transposition for matrix multiplied with scalar:
`\\left(c A\\right)^{T} = c A^{T}`
References
==========
.. [1] https://en.wikipedia.org/wiki/Transpose
"""
coeff, matrices = self.as_coeff_matrices()
return MatMul(
coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()
def _eval_adjoint(self):
return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit()
def _eval_trace(self):
factor, mmul = self.as_coeff_mmul()
if factor != 1:
from .trace import trace
return factor * trace(mmul.doit())
else:
raise NotImplementedError("Can't simplify any further")
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import Determinant
factor, matrices = self.as_coeff_matrices()
square_matrices = only_squares(*matrices)
return factor**self.rows * Mul(*list(map(Determinant, square_matrices)))
def _eval_inverse(self):
try:
return MatMul(*[
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]]).doit()
except ShapeError:
return Inverse(self)
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
# treat scalar*MatrixSymbol or scalar*MatPow separately
expr = canonicalize(MatMul(*args))
return expr
# Needed for partial compatibility with Mul
def args_cnc(self, **kwargs):
coeff_c = [x for x in self.args if x.is_commutative]
coeff_nc = [x for x in self.args if not x.is_commutative]
return [coeff_c, coeff_nc]
def _eval_derivative_matrix_lines(self, x):
from .transpose import Transpose
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
if right_args:
right_mat = MatMul.fromiter(right_args)
else:
right_mat = Identity(self.shape[1])
if left_args:
left_rev = MatMul.fromiter([Transpose(i).doit() if i.is_Matrix else i for i in reversed(left_args)])
else:
left_rev = Identity(self.shape[0])
d = self.args[ind]._eval_derivative_matrix_lines(x)
for i in d:
i.append_first(left_rev)
i.append_second(right_mat)
lines.append(i)
return lines
mul.register_handlerclass((Mul, MatMul), MatMul)
def validate(*matrices):
""" Checks for valid shapes for args of MatMul """
for i in range(len(matrices)-1):
A, B = matrices[i:i+2]
if A.cols != B.rows:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
# Rules
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(MatMul, *args)
def any_zeros(mul):
if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args]):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
[1 1] [1 2]
A*[ ]*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[4 6]
A*[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, (MatrixBase, Number)) and isinstance(last, (MatrixBase, Number)):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necesssary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
========
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
def combine_powers(mul):
"""Combine consecutive powers with the same base into one
e.g. A*A**2 -> A**3
This also cancels out the possible matrix inverses using the
knowledgebase of ``Inverse``.
e.g. Y * X * X.I -> Y
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for B in args[1:]:
A = new_args[-1]
if A.is_square == False or B.is_square == False:
new_args.append(B)
continue
if isinstance(A, MatPow):
A_base, A_exp = A.args
else:
A_base, A_exp = A, S.One
if isinstance(B, MatPow):
B_base, B_exp = B.args
else:
B_base, B_exp = B, S.One
if A_base == B_base:
new_exp = A_exp + B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
elif not isinstance(B_base, MatrixBase):
try:
B_base_inv = B_base.inverse()
except NonInvertibleMatrixError:
B_base_inv = None
if B_base_inv is not None and A_base == B_base_inv:
new_exp = A_exp - B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
new_args.append(B)
return newmul(factor, *new_args)
def combine_permutations(mul):
"""Refine products of permutation matrices as the products of cycles.
"""
args = mul.args
l = len(args)
if l < 2:
return mul
result = [args[0]]
for i in range(1, l):
A = result[-1]
B = args[i]
if isinstance(A, PermutationMatrix) and \
isinstance(B, PermutationMatrix):
cycle_1 = A.args[0]
cycle_2 = B.args[0]
result[-1] = PermutationMatrix(cycle_1 * cycle_2)
else:
result.append(B)
return MatMul(*result)
def combine_one_matrices(mul):
"""
Combine products of OneMatrix
e.g. OneMatrix(2, 3) * OneMatrix(3, 4) -> 3 * OneMatrix(2, 4)
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for B in args[1:]:
A = new_args[-1]
if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):
new_args.append(B)
continue
new_args.pop()
new_args.append(OneMatrix(A.shape[0], B.shape[1]))
factor *= A.shape[1]
return newmul(factor, *new_args)
def distribute_monom(mul):
"""
Simplify MatMul expressions but distributing
rational term to MatMul.
e.g. 2*(A+B) -> 2*A + 2*B
"""
args = mul.args
if len(args) == 2:
from .matadd import MatAdd
if args[0].is_MatAdd and args[1].is_Rational:
return MatAdd(*[MatMul(mat, args[1]).doit() for mat in args[0].args])
if args[1].is_MatAdd and args[0].is_Rational:
return MatAdd(*[MatMul(args[0], mat).doit() for mat in args[1].args])
return mul
rules = (
distribute_monom, any_zeros, remove_ids, combine_one_matrices, combine_powers, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten, combine_permutations)
canonicalize = exhaust(typed({MatMul: do_one(*rules)}))
def only_squares(*matrices):
"""factor matrices only if they are square"""
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(MatMul(*matrices[start:i+1]).doit())
start = i+1
return out
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_MatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X.T
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
exprargs = []
for args in expr.args:
if args.is_Matrix:
exprargs.append(args)
else:
newargs.append(args)
last = exprargs[0]
for arg in exprargs[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
handlers_dict['MatMul'] = refine_MatMul
| 6,575 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "android_webview/browser/page_load_metrics/aw_page_load_metrics_memory_tracker_factory.h"
#include "base/memory/singleton.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
#include "components/keyed_service/content/browser_context_keyed_service_factory.h"
#include "components/page_load_metrics/browser/page_load_metrics_memory_tracker.h"
namespace android_webview {
page_load_metrics::PageLoadMetricsMemoryTracker*
AwPageLoadMetricsMemoryTrackerFactory::GetForBrowserContext(
content::BrowserContext* context) {
return static_cast<page_load_metrics::PageLoadMetricsMemoryTracker*>(
GetInstance()->GetServiceForBrowserContext(context, true));
}
AwPageLoadMetricsMemoryTrackerFactory*
AwPageLoadMetricsMemoryTrackerFactory::GetInstance() {
return base::Singleton<AwPageLoadMetricsMemoryTrackerFactory>::get();
}
AwPageLoadMetricsMemoryTrackerFactory::AwPageLoadMetricsMemoryTrackerFactory()
: BrowserContextKeyedServiceFactory(
"PageLoadMetricsMemoryTracker",
BrowserContextDependencyManager::GetInstance()) {}
bool AwPageLoadMetricsMemoryTrackerFactory::ServiceIsCreatedWithBrowserContext()
const {
return base::FeatureList::IsEnabled(features::kV8PerFrameMemoryMonitoring);
}
KeyedService* AwPageLoadMetricsMemoryTrackerFactory::BuildServiceInstanceFor(
content::BrowserContext* context) const {
return new page_load_metrics::PageLoadMetricsMemoryTracker();
}
content::BrowserContext*
AwPageLoadMetricsMemoryTrackerFactory::GetBrowserContextToUse(
content::BrowserContext* context) const {
return context;
}
} // namespace android_webview
| 545 |
690 | package com.artemis;
import com.artemis.annotations.Fluid;
/**
* Needs to be in same folder as maven plugin.
*
* @author <NAME>
*/
public class MavenFluidGeneratorPreferences extends FluidGeneratorPreferences {
}
| 71 |
5,937 | <filename>src/Microsoft.DotNet.Wpf/src/WpfGfx/shared/debug/DebugLib/DebugLib.cxx
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//------------------------------------------------------------------------------
//
//
// File: DebugLib.cxx
// Contents: Interface to debugging .dll (if available)
//------------------------------------------------------------------------------
#include "Always.h"
#include "strsafe.h"
//------------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
struct TAGINFO
{
CHAR * pchOwner;
CHAR * pchDesc;
BOOL fEnabled;
};
TAGINFO g_rgtaginfo[] =
{
{ "Debug", "General debug output", TRUE }, // 0: tagDefault
{ "Trace", "Errors", TRUE }, // 1: tagError
{ "Trace", "Warnings", FALSE }, // 2: tagWarning
{ "Thread", "Thread related tracing", FALSE }, // 3: tagThread
{ "Assert", "Exit on asserts", FALSE }, // 4: tagAssertExit
{ "Assert", "Stacktraces on asserts", TRUE }, // 5: tagAssertStacks
{ "Memory", "Use VMem for MemAlloc", FALSE }, // 6: tagMemoryStrict
{ "Memory", "Use VMem for CoTaskMemAlloc", FALSE }, // 7: tagCoMemoryStrict
{ "Memory", "Use VMem strict at end (vs beginning)", FALSE }, // 8: tagMemoryStrictTail
{ "Memory", "VMem pad to quadword at end", FALSE }, // 9: tagMemoryStrictAlign
{ "Trace", "All calls to OCX interfaces", FALSE }, // 10: tagOLEWatch
{ "FALSE", "FALSE", FALSE }, // 12: tagFALSE
};
#define TAG_NONAME 0x01
#define TAG_NONEWLINE 0x02
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#define local_tagDefault ((TRACETAG)0)
#define local_tagError ((TRACETAG)1)
#define local_tagWarning ((TRACETAG)2)
#define local_tagThread ((TRACETAG)3)
#define local_tagAssertExit ((TRACETAG)4)
#define local_tagAssertStacks ((TRACETAG)5)
#define local_tagMemoryStrict ((TRACETAG)6)
#define local_tagCoMemoryStrict ((TRACETAG)7)
#define local_tagMemoryStrictTail ((TRACETAG)8)
#define local_tagMemoryStrictAlign ((TRACETAG)9)
#define local_tagOLEWatch ((TRACETAG)10)
#define local_tagFALSE ((TRACETAG)12)
// Data ------------------------------------------------------------------------
HINSTANCE g_hInstDbg = NULL;
HINSTANCE g_hInstLeak = NULL;
//------------------------------------------------------------------------------
char * GetModuleName(HINSTANCE hInst)
{
static char achMod[MAX_PATH];
achMod[0] = 0;
GetModuleFileNameA(hInst, achMod, sizeof(achMod));
char * psz = &achMod[lstrlenA(achMod)];
while (psz > achMod && *psz != '\\' && *psz != '//') --psz;
if (*psz == '\\' || *psz == '//') ++psz;
return(psz);
}
void LeakDumpAppend(__in PSTR pszMsg, void * pvArg = NULL)
{
HANDLE hFile;
char ach[1024];
char *pEnd = NULL;
size_t iRemaining = 0;
DWORD dw;
StringCchCopyA(ach, ARRAY_SIZE(ach), GetModuleName(g_hInstLeak));
StringCchCatExA(ach, ARRAY_SIZE(ach), ": ", &pEnd, &iRemaining, 0);
StringCchVPrintfA(pEnd, iRemaining, pszMsg, (va_list)pvArg);
hFile = CreateFileA("c:\\leakdump.txt", GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
if (hFile != INVALID_HANDLE_VALUE)
{
SetFilePointer(hFile, 0, NULL, FILE_END);
WriteFile(hFile, ach, lstrlenA(ach), &dw, NULL);
WriteFile(hFile, "\r\n", 2, &dw, NULL);
CloseHandle(hFile);
}
}
// Many stub ignore their parameters.
#pragma warning(push)
#pragma warning(disable:4100) /* unreferenced formal parameter */
DWORD WINAPI _DbgExGetVersion()
{
return(AVALON_DEBUG_API_VERSION);
}
BOOL WINAPI _DbgExIsFullDebug()
{
return(FALSE);
}
void WINAPI _DbgExAddRefDebugLibrary()
{
}
void WINAPI _DbgExReleaseDebugLibrary()
{
}
void WINAPI _DbgExSetDllMain(HANDLE hDllHandle, BOOL (WINAPI * pfnDllMain)(HANDLE, DWORD, LPVOID))
{
}
void WINAPI _DbgExDoTracePointsDialog(BOOL fWait)
{
}
void WINAPI _DbgExRestoreDefaultDebugState()
{
}
BOOL WINAPI _DbgExEnableTag(TRACETAG tag, BOOL fEnable)
{
BOOL fOld = FALSE;
if (tag > 0 && tag < ARRAY_SIZE(g_rgtaginfo) - 1)
{
fOld = g_rgtaginfo[tag].fEnabled;
g_rgtaginfo[tag].fEnabled = fEnable;
}
return(fOld);
}
BOOL WINAPI _DbgExSetDiskFlag(TRACETAG tag, BOOL fSendToDisk)
{
return(FALSE);
}
BOOL WINAPI _DbgExSetBreakFlag(TRACETAG tag, BOOL fBreak)
{
return(FALSE);
}
BOOL WINAPI _DbgExIsTagEnabled(TRACETAG tag)
{
return(tag >= 0 && tag < ARRAY_SIZE(g_rgtaginfo) && g_rgtaginfo[tag].fEnabled);
}
TRACETAG WINAPI _DbgExFindTag(__in PCSTR szTagDesc)
{
TAGINFO * pti = g_rgtaginfo;
TRACETAG tag;
for (tag = 0; tag < ARRAY_SIZE(g_rgtaginfo); ++tag, ++pti)
{
if (!lstrcmpiA(pti->pchDesc, szTagDesc))
{
return(tag);
}
}
return(local_tagFALSE);
}
TRACETAG WINAPI _DbgExTagError()
{
return(local_tagError);
}
TRACETAG WINAPI _DbgExTagWarning()
{
return(local_tagWarning);
}
TRACETAG WINAPI _DbgExTagThread()
{
return(local_tagThread);
}
TRACETAG WINAPI _DbgExTagAssertExit()
{
return(local_tagAssertExit);
}
TRACETAG WINAPI _DbgExTagAssertStacks()
{
return(local_tagAssertStacks);
}
TRACETAG WINAPI _DbgExTagMemoryStrict()
{
return(local_tagMemoryStrict);
}
TRACETAG WINAPI _DbgExTagCoMemoryStrict()
{
return(local_tagCoMemoryStrict);
}
TRACETAG WINAPI _DbgExTagMemoryStrictTail()
{
return(local_tagMemoryStrictTail);
}
TRACETAG WINAPI _DbgExTagMemoryStrictAlign()
{
return(local_tagMemoryStrictAlign);
}
TRACETAG WINAPI _DbgExTagOLEWatch()
{
return(local_tagOLEWatch);
}
TRACETAG WINAPI _DbgExTagRegisterTrace(__in PCSTR szTag, __in PCSTR szOwner, __in PCSTR szDescrip, BOOL fEnabled)
{
TAGINFO * pti = g_rgtaginfo;
TRACETAG tag;
for (tag = 0; tag < ARRAY_SIZE(g_rgtaginfo) - 1; ++tag, ++pti)
{
if (!lstrcmpiA(pti->pchDesc, szDescrip) && !lstrcmpiA(pti->pchOwner, szOwner))
{
return(tag);
}
}
return(local_tagFALSE);
}
BOOL WINAPI _DbgExTaggedTraceListEx(TRACETAG tag, USHORT usFlags, __in PCSTR szFmt, va_list valMarker)
{
if (DbgExIsTagEnabled(tag))
{
CHAR achDup[512], *pch;
CHAR achBuf[1024];
CHAR *pStart = achBuf;
size_t cch = ARRAY_SIZE(achBuf);
achBuf[0] = '\0';
StringCchCopyA(achDup, ARRAY_SIZE(achDup), szFmt);
for (pch = achDup; *pch; ++pch)
{
if (*pch == '%')
{
if (pch[1] == '%')
{
++pch;
continue;
}
if (pch[1] == 'h' && pch[2] == 'r')
{
pch[1] = 'l';
pch[2] = 'X';
continue;
}
}
}
if (!(usFlags & TAG_NONAME))
{
StringCchCopyExA(achBuf, ARRAY_SIZE(achBuf), "WPF: ", &pStart, &cch, 0);
}
StringCchVPrintfA(pStart, cch, szFmt, valMarker);
if (!(usFlags & TAG_NONEWLINE))
{
StringCchCatA(achBuf, ARRAY_SIZE(achBuf), "\r\n");
}
OutputDebugStringA(achBuf);
}
return(FALSE);
}
void WINAPI _DbgExTaggedTraceCallers(TRACETAG tag, int iStart, int cTotal)
{
}
void WINAPI _DbgExAssertThreadDisable(BOOL fDisable)
{
}
size_t WINAPI _DbgExPreAlloc(size_t cbRequest, PERFMETERTAG mt)
{
return(cbRequest);
}
void * WINAPI _DbgExPostAlloc(void *pv)
{
return(pv);
}
void * WINAPI _DbgExPreFree(void *pv)
{
if (g_hInstDbg)
{
LeakDumpAppend("DbgExPreFree: freeing memory at %08lX", pv);
pv = NULL;
}
return(pv);
}
void WINAPI _DbgExPostFree()
{
}
size_t WINAPI _DbgExPreRealloc(void *pvRequest, size_t cbRequest, void **ppv, PERFMETERTAG mt)
{
*ppv = pvRequest;
return(cbRequest);
}
void * WINAPI _DbgExPostRealloc(void *pv)
{
return(pv);
}
void * WINAPI _DbgExPreGetSize(void *pvRequest)
{
return(pvRequest);
}
size_t WINAPI _DbgExPostGetSize(size_t cb)
{
return(cb);
}
size_t WINAPI _DbgExMtPreAlloc(size_t cbRequest, PERFMETERTAG mt)
{
return(cbRequest);
}
void * WINAPI _DbgExMtPostAlloc(void *pv)
{
return(pv);
}
void * WINAPI _DbgExMtPreFree(void *pv)
{
if (g_hInstDbg)
{
LeakDumpAppend("DbgExMtPreFree: freeing memory at %08lX", pv);
pv = NULL;
}
return(pv);
}
void WINAPI _DbgExMtPostFree()
{
}
size_t WINAPI _DbgExMtPreRealloc(void *pvRequest, size_t cbRequest, void **ppv, PERFMETERTAG mt)
{
*ppv = pvRequest;
return(cbRequest);
}
void * WINAPI _DbgExMtPostRealloc(void *pv)
{
return(pv);
}
void * WINAPI _DbgExMtPreGetSize(void *pvRequest)
{
return(pvRequest);
}
size_t WINAPI _DbgExMtPostGetSize(size_t cb)
{
return(cb);
}
void WINAPI _DbgExMemoryTrackDisable(BOOL fDisable)
{
}
void WINAPI _DbgExCoMemoryTrackDisable(BOOL fDisable)
{
}
void WINAPI _DbgExMemoryBlockTrackDisable(void * pv)
{
}
void * WINAPI _DbgExGetMallocSpy()
{
return(NULL);
}
void WINAPI _DbgExTraceMemoryLeaks()
{
}
BOOL WINAPI _DbgExValidateKnownAllocations()
{
return(TRUE);
}
LONG_PTR WINAPI _DbgExTraceFailL(LONG_PTR errExpr, LONG_PTR errTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line)
{
return(errExpr);
}
LONG_PTR WINAPI _DbgExTraceWin32L(LONG_PTR errExpr, LONG_PTR errTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line)
{
return(errExpr);
}
HRESULT WINAPI _DbgExTraceHR(HRESULT hrTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line)
{
return(hrTest);
}
HRESULT WINAPI _DbgExTraceOLE(HRESULT hrTest, BOOL fIgnore, __in LPSTR pstrExpr, __in LPSTR pstrFile, int line, LPVOID lpsite)
{
return(hrTest);
}
void WINAPI _DbgExSetSimFailCounts(int firstFailure, int cInterval)
{
}
void WINAPI _DbgExShowSimFailDlg()
{
}
BOOL WINAPI _DbgExFFail()
{
return(FALSE);
}
int WINAPI _DbgExGetFailCount()
{
return(INT_MIN);
}
void WINAPI _DbgExOpenMemoryMonitor()
{
}
void WINAPI _DbgExOpenLogFile(LPCSTR szFName)
{
}
void WINAPI _DbgExDumpProcessHeaps()
{
}
PERFMETERTAG WINAPI _DbgExMtRegister(__in PCSTR szTag, __in PCSTR szOwner, __in PCSTR szDescrip, DWORD dwFlags)
{
return(0);
}
void WINAPI _DbgExMtAdd(PERFMETERTAG mt, LONG lCnt, LONG lVal)
{
}
void WINAPI _DbgExMtSet(PERFMETERTAG mt, LONG lCnt, LONG lVal)
{
}
char * WINAPI _DbgExMtGetName(PERFMETERTAG mt)
{
return("");
}
char * WINAPI _DbgExMtGetDesc(PERFMETERTAG mt)
{
return("");
}
PERFMETERTAG WINAPI _DbgExMtGetParent(PERFMETERTAG mt)
{
return NULL;
}
DWORD WINAPI _DbgExMtGetFlags(PERFMETERTAG mt)
{
return 0;
}
void WINAPI _DbgExMtSetFlags(PERFMETERTAG mt, DWORD dwFlags)
{
return;
}
BOOL WINAPI _DbgExMtSimulateOutOfMemory(PERFMETERTAG mt, LONG lNewValue)
{
return(0);
}
void WINAPI _DbgExMtOpenMonitor()
{
}
void WINAPI _DbgExMtLogDump(__in PCSTR pchFile)
{
}
PERFMETERTAG WINAPI _DbgExMtLookupMeter(__in PCSTR szTag)
{
return 0;
}
long WINAPI _DbgExMtGetMeterCnt(PERFMETERTAG mt, BOOL fExclusive)
{
return 0;
}
long WINAPI _DbgExMtGetMeterVal(PERFMETERTAG mt, BOOL fExclusive)
{
return 0;
}
PERFMETERTAG WINAPI _DbgExMtGetDefaultMeter()
{
return NULL;
}
PERFMETERTAG WINAPI _DbgExMtSetDefaultMeter(PERFMETERTAG mtDefault)
{
return NULL;
}
void WINAPI _DbgExSetTopUrl(__in LPWSTR pstrUrl)
{
}
void WINAPI _DbgExGetSymbolFromAddress(void * pvAddr, __out_ecount(cchBuf) char * pszBuf, DWORD cchBuf)
{
pszBuf[0] = 0;
}
void WINAPI _DbgExGetStackAddresses(void ** ppvAddr, int iStart, int cTotal)
{
memset( ppvAddr, 0, sizeof(void *) * cTotal );
}
#pragma warning(pop) /* reenable disabled warnings */
BOOL WINAPI _DbgExGetChkStkFill(DWORD * pdwFill)
{
*pdwFill = GetPrivateProfileIntA("chkstk", "fill", 0xCCCCCCCC, "avalndbg.ini");
return(!GetPrivateProfileIntA("chkstk", "disable", FALSE, "avalndbg.ini"));
}
// cdecl function "wrappers" to their va_list equivalent ----------------------
BOOL __cdecl
DbgExTaggedTrace(TRACETAG tag, __in PCSTR szFmt, ...)
{
va_list va;
BOOL f;
va_start(va, szFmt);
f = DbgExTaggedTraceListEx(tag, 0, szFmt, va);
va_end(va);
return f;
}
BOOL __cdecl
DbgExTaggedTraceEx(TRACETAG tag, USHORT usFlags, __in PCSTR szFmt, ...)
{
va_list va;
BOOL f;
va_start(va, szFmt);
f = DbgExTaggedTraceListEx(tag, usFlags, szFmt, va);
va_end(va);
return f;
}
// InitDebugLib ---------------------------------------------------------------
#define DBGEXFUNCTIONS() \
DBGEXWRAP (DWORD, DbgExGetVersion, (), ()) \
DBGEXWRAP (BOOL, DbgExIsFullDebug, (), ()) \
DBGEXWRAP_(void, DbgExAddRefDebugLibrary, (), ()) \
DBGEXWRAP_(void, DbgExReleaseDebugLibrary, (), ()) \
DBGEXWRAP_(void, DbgExSetDllMain, (HANDLE hDllHandle, BOOL (WINAPI * pfnDllMain)(HANDLE, DWORD, LPVOID)), (hDllHandle, pfnDllMain)) \
DBGEXWRAP_(void, DbgExDoTracePointsDialog, (BOOL fWait), (fWait)) \
DBGEXWRAP_(void, DbgExRestoreDefaultDebugState, (), ()) \
DBGEXWRAP (BOOL, DbgExEnableTag, (TRACETAG tag, BOOL fEnable), (tag, fEnable)) \
DBGEXWRAP (BOOL, DbgExSetDiskFlag, (TRACETAG tag, BOOL fSendToDisk), (tag, fSendToDisk)) \
DBGEXWRAP (BOOL, DbgExSetBreakFlag, (TRACETAG tag, BOOL fBreak), (tag, fBreak)) \
DBGEXWRAP (BOOL, DbgExIsTagEnabled, (TRACETAG tag), (tag)) \
DBGEXWRAP (TRACETAG, DbgExFindTag, (__in PCSTR szTagDesc), (szTagDesc)) \
DBGEXWRAP (TRACETAG, DbgExTagError, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagWarning, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagThread, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagAssertExit, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagAssertStacks, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagMemoryStrict, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagCoMemoryStrict, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagMemoryStrictTail, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagMemoryStrictAlign, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagOLEWatch, (), ()) \
DBGEXWRAP (TRACETAG, DbgExTagRegisterTrace, (__in PCSTR szTag, __in PCSTR szOwner, __in PCSTR szDescrip, BOOL fEnabled), (szTag, szOwner, szDescrip, fEnabled)) \
DBGEXWRAP (BOOL, DbgExTaggedTraceListEx, (TRACETAG tag, USHORT usFlags, __in PCSTR szFmt, __in va_list valMarker), (tag, usFlags, szFmt, valMarker)) \
DBGEXWRAP_(void, DbgExTaggedTraceCallers, (TRACETAG tag, int iStart, int cTotal), (tag, iStart, cTotal)) \
DBGEXWRAP_(void, DbgExAssertThreadDisable, (BOOL fDisable), (fDisable)) \
DBGEXWRAP (size_t, DbgExPreAlloc, (size_t cbRequest, PERFMETERTAG mt), (cbRequest, mt)) \
DBGEXWRAP (void *, DbgExPostAlloc, (void *pv), (pv)) \
DBGEXWRAP (void *, DbgExPreFree, (void *pv), (pv)) \
DBGEXWRAP_(void, DbgExPostFree, (), ()) \
DBGEXWRAP (size_t, DbgExPreRealloc, (void *pvRequest, size_t cbRequest, void **ppv, PERFMETERTAG mt), (pvRequest, cbRequest, ppv, mt)) \
DBGEXWRAP (void *, DbgExPostRealloc, (void *pv), (pv)) \
DBGEXWRAP (void *, DbgExPreGetSize, (void *pvRequest), (pvRequest)) \
DBGEXWRAP (size_t, DbgExPostGetSize, (size_t cb), (cb)) \
DBGEXWRAP (size_t, DbgExMtPreAlloc, (size_t cbRequest, PERFMETERTAG mt), (cbRequest, mt)) \
DBGEXWRAP (void *, DbgExMtPostAlloc, (void *pv), (pv)) \
DBGEXWRAP (void *, DbgExMtPreFree, (void *pv), (pv)) \
DBGEXWRAP_(void, DbgExMtPostFree, (), ()) \
DBGEXWRAP (size_t, DbgExMtPreRealloc, (void *pvRequest, size_t cbRequest, void **ppv, PERFMETERTAG mt), (pvRequest, cbRequest, ppv, mt)) \
DBGEXWRAP (void *, DbgExMtPostRealloc, (void *pv), (pv)) \
DBGEXWRAP (void *, DbgExMtPreGetSize, (void *pvRequest), (pvRequest)) \
DBGEXWRAP (size_t, DbgExMtPostGetSize, (size_t cb), (cb)) \
DBGEXWRAP_(void, DbgExMemoryTrackDisable, (BOOL fDisable), (fDisable)) \
DBGEXWRAP_(void, DbgExCoMemoryTrackDisable, (BOOL fDisable), (fDisable)) \
DBGEXWRAP_(void, DbgExMemoryBlockTrackDisable, (void * pv), (pv)) \
DBGEXWRAP_(void, DbgExTraceMemoryLeaks, (), ()) \
DBGEXWRAP (BOOL, DbgExValidateKnownAllocations, (), ()) \
DBGEXWRAP (LONG_PTR, DbgExTraceFailL, (LONG_PTR errExpr, LONG_PTR errTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line), (errExpr, errTest, fIgnore, pstrExpr, pstrFile, line)) \
DBGEXWRAP (LONG_PTR, DbgExTraceWin32L, (LONG_PTR errExpr, LONG_PTR errTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line), (errExpr, errTest, fIgnore, pstrExpr, pstrFile, line)) \
DBGEXWRAP (HRESULT, DbgExTraceHR, (HRESULT hrTest, BOOL fIgnore, __in PCSTR pstrExpr, __in PCSTR pstrFile, int line), (hrTest, fIgnore, pstrExpr, pstrFile, line)) \
DBGEXWRAP_(void, DbgExSetSimFailCounts, (int firstFailure, int cInterval), (firstFailure, cInterval)) \
DBGEXWRAP_(void, DbgExShowSimFailDlg, (), ()) \
DBGEXWRAP (BOOL, DbgExFFail, (), ()) \
DBGEXWRAP (int, DbgExGetFailCount, (), ()) \
DBGEXWRAP_(void, DbgExOpenMemoryMonitor, (), ()) \
DBGEXWRAP_(void, DbgExOpenLogFile, (LPCSTR szFName), (szFName)) \
DBGEXWRAP_(void, DbgExDumpProcessHeaps, (), ()) \
DBGEXWRAP(PERFMETERTAG, DbgExMtRegister, (__in PCSTR szTag, __in PCSTR szOwner, __in PCSTR szDescrip, DWORD dwFlags), (szTag, szOwner, szDescrip, dwFlags)) \
DBGEXWRAP_(void, DbgExMtAdd, (PERFMETERTAG mt, LONG lCnt, LONG lVal), (mt, lCnt, lVal)) \
DBGEXWRAP_(void, DbgExMtSet, (PERFMETERTAG mt, LONG lCnt, LONG lVal), (mt, lCnt, lVal)) \
DBGEXWRAP (char *, DbgExMtGetName, (PERFMETERTAG mt), (mt)) \
DBGEXWRAP (char *, DbgExMtGetDesc, (PERFMETERTAG mt), (mt)) \
DBGEXWRAP (PERFMETERTAG, DbgExMtGetParent, (PERFMETERTAG mt), (mt)) \
DBGEXWRAP (DWORD, DbgExMtGetFlags, (PERFMETERTAG mt), (mt)) \
DBGEXWRAP_(void, DbgExMtSetFlags, (PERFMETERTAG mt, DWORD dwFlags), (mt, dwFlags)) \
DBGEXWRAP (BOOL, DbgExMtSimulateOutOfMemory, (PERFMETERTAG mt, LONG lNewValue), (mt, lNewValue)) \
DBGEXWRAP_(void, DbgExMtOpenMonitor, (), ()) \
DBGEXWRAP_(void, DbgExMtLogDump, (__in PCSTR pchFile), (pchFile)) \
DBGEXWRAP (PERFMETERTAG, DbgExMtLookupMeter, (__in PCSTR szTag), (szTag)) \
DBGEXWRAP (long, DbgExMtGetMeterCnt, (PERFMETERTAG mt, BOOL fExclusive), (mt, fExclusive)) \
DBGEXWRAP (long, DbgExMtGetMeterVal, (PERFMETERTAG mt, BOOL fExclusive), (mt, fExclusive)) \
DBGEXWRAP (PERFMETERTAG, DbgExMtGetDefaultMeter, (), ()) \
DBGEXWRAP (PERFMETERTAG, DbgExMtSetDefaultMeter, (PERFMETERTAG mtDefault), (mtDefault)) \
DBGEXWRAP_(void, DbgExGetStackAddresses, (void ** ppvAddr, int iStart, int cTotal), (ppvAddr, iStart, cTotal)) \
DBGEXWRAP (BOOL, DbgExGetChkStkFill, (DWORD * pdwFill), (pdwFill)) \
#undef DBGEXWRAP
#undef DBGEXWRAP_
#define DBGEXWRAP(ret, fn, formals, params) ret (WINAPI * g_##fn) formals = _##fn;
#define DBGEXWRAP_(ret, fn, formals, params) ret (WINAPI * g_##fn) formals = _##fn;
#pragma prefast (suppress: __WARNING_ENCODE_GLOBAL_FUNCTION_POINTER)
DBGEXFUNCTIONS()
#undef DBGEXWRAP
#undef DBGEXWRAP_
#define DBGEXWRAP(ret, fn, formals, params) ret WINAPI fn formals { return(g_##fn params); }
#define DBGEXWRAP_(ret, fn, formals, params) ret WINAPI fn formals { g_##fn params; }
DBGEXFUNCTIONS()
BOOL InitDebugProcedure(void ** ppv, __in PSTR pchFn)
{
*ppv = (void *)GetProcAddress(g_hInstDbg, pchFn);
if (*ppv == NULL)
{
char ach[512];
StringCchVPrintfA(ach, ARRAY_SIZE(ach), "InitDebugLib: Can't find PresentationDebug.dll entrypoint %s\r\n", pchFn);
OutputDebugStringA(ach);
return(FALSE);
}
return(TRUE);
}
void InitDebugStubs()
{
#undef DBGEXWRAP
#undef DBGEXWRAP_
#define DBGEXWRAP(ret, fn, formals, params) g_##fn = _##fn;
#define DBGEXWRAP_(ret, fn, formals, params) g_##fn = _##fn;
DBGEXFUNCTIONS()
}
void InitDebugLib(
__in_ecount_opt(1) HANDLE hDllHandle,
__in_ecount_opt(1) BOOL (WINAPI * pfnDllMain)(HANDLE, DWORD, LPVOID),
BOOL fExe
)
{
UNREFERENCED_PARAMETER(hDllHandle);
UNREFERENCED_PARAMETER(pfnDllMain);
g_hInstDbg = LoadLibraryA("PresentationDebug.dll");
if (g_hInstDbg == NULL)
{
// OutputDebugStringA("InitDebugLib: Can't find PresentationDebug.dll. Only partial debug support available.\r\n");
goto dostubs;
}
#undef DBGEXWRAP
#undef DBGEXWRAP_
#define DBGEXWRAP(ret, fn, formals, params) if (!InitDebugProcedure((void **)&g_##fn, #fn)) goto dostubs;
#define DBGEXWRAP_(ret, fn, formals, params) if (!InitDebugProcedure((void **)&g_##fn, #fn)) goto dostubs;
DBGEXFUNCTIONS()
if (DbgExGetVersion() != AVALON_DEBUG_API_VERSION)
{
char ach[512];
StringCchPrintfA(ach, ARRAY_SIZE(ach), "InitDebugLib: Version mismatch for PresentationDebug.DLL. Expected %ld but found %ld.\r\n",
AVALON_DEBUG_API_VERSION, DbgExGetVersion());
OutputDebugStringA(ach);
FreeLibrary(g_hInstDbg);
g_hInstDbg = NULL;
goto dostubs;
}
else
{
if (!fExe)
DbgExAddRefDebugLibrary();
//
// This doesn't work on dll used by the URT because it holds a reference to our dll and the
// callback is no longer safe to do.
//
// DbgExSetDllMain(hDllHandle, pfnDllMain);
}
return;
dostubs:
InitDebugStubs();
}
void TermDebugLib(__in_ecount(1) HANDLE hDllHandle, BOOL fFinal)
{
UNREFERENCED_PARAMETER(hDllHandle);
if (g_hInstDbg == NULL)
return;
if (fFinal)
{
DbgExReleaseDebugLibrary();
FreeLibrary(g_hInstDbg);
g_hInstDbg = NULL;
}
else
{
//
// This doesn't work on dll used by the URT because it holds a reference to our dll and the
// callback is no longer safe to do.
//
// DbgExSetDllMain(hDllHandle, NULL);
}
}
#ifdef __cplusplus
}
#endif
| 10,581 |
5,447 | <gh_stars>1000+
"""GluonCV Model Zoo"""
# pylint: disable=wildcard-import
from ..resnetv1b import *
| 39 |
4,071 | # Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_dist_tree_api', [dirname(__file__)])
except ImportError:
import _dist_tree_api
return _dist_tree_api
if fp is not None:
try:
_mod = imp.load_module('_dist_tree_api', fp, pathname, description)
finally:
fp.close()
return _mod
_dist_tree_api = swig_import_helper()
del swig_import_helper
else:
import _dist_tree_api
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def cdata(ptr, nelements=1):
return _dist_tree_api.cdata(ptr, nelements)
cdata = _dist_tree_api.cdata
def memmove(data, indata):
return _dist_tree_api.memmove(data, indata)
memmove = _dist_tree_api.memmove
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _dist_tree_api.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _dist_tree_api.SwigPyIterator_value(self)
def incr(self, n=1):
return _dist_tree_api.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _dist_tree_api.SwigPyIterator_decr(self, n)
def distance(self, x):
return _dist_tree_api.SwigPyIterator_distance(self, x)
def equal(self, x):
return _dist_tree_api.SwigPyIterator_equal(self, x)
def copy(self):
return _dist_tree_api.SwigPyIterator_copy(self)
def next(self):
return _dist_tree_api.SwigPyIterator_next(self)
def __next__(self):
return _dist_tree_api.SwigPyIterator___next__(self)
def previous(self):
return _dist_tree_api.SwigPyIterator_previous(self)
def advance(self, n):
return _dist_tree_api.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _dist_tree_api.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _dist_tree_api.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _dist_tree_api.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _dist_tree_api.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _dist_tree_api.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _dist_tree_api.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _dist_tree_api.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class _string_list(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _string_list, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _string_list, name)
__repr__ = _swig_repr
def iterator(self):
return _dist_tree_api._string_list_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _dist_tree_api._string_list___nonzero__(self)
def __bool__(self):
return _dist_tree_api._string_list___bool__(self)
def __len__(self):
return _dist_tree_api._string_list___len__(self)
def __getslice__(self, i, j):
return _dist_tree_api._string_list___getslice__(self, i, j)
def __setslice__(self, *args):
return _dist_tree_api._string_list___setslice__(self, *args)
def __delslice__(self, i, j):
return _dist_tree_api._string_list___delslice__(self, i, j)
def __delitem__(self, *args):
return _dist_tree_api._string_list___delitem__(self, *args)
def __getitem__(self, *args):
return _dist_tree_api._string_list___getitem__(self, *args)
def __setitem__(self, *args):
return _dist_tree_api._string_list___setitem__(self, *args)
def pop(self):
return _dist_tree_api._string_list_pop(self)
def append(self, x):
return _dist_tree_api._string_list_append(self, x)
def empty(self):
return _dist_tree_api._string_list_empty(self)
def size(self):
return _dist_tree_api._string_list_size(self)
def swap(self, v):
return _dist_tree_api._string_list_swap(self, v)
def begin(self):
return _dist_tree_api._string_list_begin(self)
def end(self):
return _dist_tree_api._string_list_end(self)
def rbegin(self):
return _dist_tree_api._string_list_rbegin(self)
def rend(self):
return _dist_tree_api._string_list_rend(self)
def clear(self):
return _dist_tree_api._string_list_clear(self)
def get_allocator(self):
return _dist_tree_api._string_list_get_allocator(self)
def pop_back(self):
return _dist_tree_api._string_list_pop_back(self)
def erase(self, *args):
return _dist_tree_api._string_list_erase(self, *args)
def __init__(self, *args):
this = _dist_tree_api.new__string_list(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _dist_tree_api._string_list_push_back(self, x)
def front(self):
return _dist_tree_api._string_list_front(self)
def back(self):
return _dist_tree_api._string_list_back(self)
def assign(self, n, x):
return _dist_tree_api._string_list_assign(self, n, x)
def resize(self, *args):
return _dist_tree_api._string_list_resize(self, *args)
def insert(self, *args):
return _dist_tree_api._string_list_insert(self, *args)
def reserve(self, n):
return _dist_tree_api._string_list_reserve(self, n)
def capacity(self):
return _dist_tree_api._string_list_capacity(self)
__swig_destroy__ = _dist_tree_api.delete__string_list
__del__ = lambda self: None
_string_list_swigregister = _dist_tree_api._string_list_swigregister
_string_list_swigregister(_string_list)
def DIST_TREE__API_new():
return _dist_tree_api.DIST_TREE__API_new()
DIST_TREE__API_new = _dist_tree_api.DIST_TREE__API_new
def DIST_TREE__API_set_prefix(handler, prefix):
return _dist_tree_api.DIST_TREE__API_set_prefix(handler, prefix)
DIST_TREE__API_set_prefix = _dist_tree_api.DIST_TREE__API_set_prefix
def DIST_TREE__API_set_store(handler, store):
return _dist_tree_api.DIST_TREE__API_set_store(handler, store)
DIST_TREE__API_set_store = _dist_tree_api.DIST_TREE__API_set_store
def DIST_TREE__API_set_branch(handler, branch):
return _dist_tree_api.DIST_TREE__API_set_branch(handler, branch)
DIST_TREE__API_set_branch = _dist_tree_api.DIST_TREE__API_set_branch
def DIST_TREE__API_load(handler):
return _dist_tree_api.DIST_TREE__API_load(handler)
DIST_TREE__API_load = _dist_tree_api.DIST_TREE__API_load
# This file is compatible with both classic and new-style classes.
| 4,120 |
2,338 | <gh_stars>1000+
//===--- AttrDocTable.cpp - implements Attr::getDocumentation() -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains out-of-line methods for Attr classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
#include "llvm/ADT/StringRef.h"
#include "AttrDocTable.inc"
static const llvm::StringRef AttrDoc[] = {
#define ATTR(NAME) AttrDoc_##NAME,
#include "clang/Basic/AttrList.inc"
};
llvm::StringRef clang::Attr::getDocumentation(clang::attr::Kind K) {
if(K < llvm::array_lengthof(AttrDoc))
return AttrDoc[K];
return "";
}
| 290 |
335 | {
"word": "Inshore",
"definitions": [
"Towards or closer to the shore."
],
"parts-of-speech": "Adverb"
} | 61 |
473 | #include "cgc_stdlib.h"
#define ERR(x, ...) cgc_printf(x "\n", ##__VA_ARGS__)
| 40 |
1,844 | <reponame>jjmata/robosat<gh_stars>1000+
import os
import sys
import time
import argparse
import concurrent.futures as futures
import requests
from PIL import Image
from tqdm import tqdm
from robosat.tiles import tiles_from_csv, fetch_image
def add_parser(subparser):
parser = subparser.add_parser(
"download", help="downloads images from Mapbox Maps API", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("url", type=str, help="endpoint with {z}/{x}/{y} variables to fetch image tiles from")
parser.add_argument("--ext", type=str, default="webp", help="file format to save images in")
parser.add_argument("--rate", type=int, default=10, help="rate limit in max. requests per second")
parser.add_argument("tiles", type=str, help="path to .csv tiles file")
parser.add_argument("out", type=str, help="path to slippy map directory for storing tiles")
parser.set_defaults(func=main)
def main(args):
tiles = list(tiles_from_csv(args.tiles))
with requests.Session() as session:
num_workers = args.rate
# tqdm has problems with concurrent.futures.ThreadPoolExecutor; explicitly call `.update`
# https://github.com/tqdm/tqdm/issues/97
progress = tqdm(total=len(tiles), ascii=True, unit="image")
with futures.ThreadPoolExecutor(num_workers) as executor:
def worker(tile):
tick = time.monotonic()
x, y, z = map(str, [tile.x, tile.y, tile.z])
os.makedirs(os.path.join(args.out, z, x), exist_ok=True)
path = os.path.join(args.out, z, x, "{}.{}".format(y, args.ext))
if os.path.isfile(path):
return tile, True
url = args.url.format(x=tile.x, y=tile.y, z=tile.z)
res = fetch_image(session, url)
if not res:
return tile, False
try:
image = Image.open(res)
image.save(path, optimize=True)
except OSError:
return tile, False
tock = time.monotonic()
time_for_req = tock - tick
time_per_worker = num_workers / args.rate
if time_for_req < time_per_worker:
time.sleep(time_per_worker - time_for_req)
progress.update()
return tile, True
for tile, ok in executor.map(worker, tiles):
if not ok:
print("Warning: {} failed, skipping".format(tile), file=sys.stderr)
| 1,206 |
7,353 | <reponame>LK26/outline-client<filename>third_party/badvpn/udevmonitor/NCDUdevCache.h
/**
* @file NCDUdevCache.h
* @author <NAME> <<EMAIL>>
*
* @section LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BADVPN_UDEVMONITOR_NCDUDEVCACHE_H
#define BADVPN_UDEVMONITOR_NCDUDEVCACHE_H
#include <misc/debug.h>
#include <structure/BAVL.h>
#include <structure/LinkedList1.h>
#include <base/DebugObject.h>
#include <stringmap/BStringMap.h>
struct NCDUdevCache_device {
BStringMap map;
const char *devpath;
int is_cleaned;
union {
BAVLNode devices_tree_node;
LinkedList1Node cleaned_devices_list_node;
};
int is_refreshed;
};
typedef struct {
BAVL devices_tree;
LinkedList1 cleaned_devices_list;
DebugObject d_obj;
} NCDUdevCache;
void NCDUdevCache_Init (NCDUdevCache *o);
void NCDUdevCache_Free (NCDUdevCache *o);
const BStringMap * NCDUdevCache_Query (NCDUdevCache *o, const char *devpath);
int NCDUdevCache_Event (NCDUdevCache *o, BStringMap map) WARN_UNUSED;
void NCDUdevCache_StartClean (NCDUdevCache *o);
void NCDUdevCache_FinishClean (NCDUdevCache *o);
int NCDUdevCache_GetCleanedDevice (NCDUdevCache *o, BStringMap *out_map);
const char * NCDUdevCache_First (NCDUdevCache *o);
const char * NCDUdevCache_Next (NCDUdevCache *o, const char *key);
#endif
| 943 |
3,457 | Matrix3d m = Matrix3d::Random();
cout << "Here is the matrix m:" << endl << m << endl;
cout << "Here is the product of each row:" << endl << m.rowwise().prod() << endl;
| 61 |
12,278 | <reponame>Harshitha91/Tmdb-react-native-node
/*=============================================================================
Copyright (c) 2012 <NAME>
match.h
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef BOOST_HOF_GUARD_FUNCTION_OVERLOAD_H
#define BOOST_HOF_GUARD_FUNCTION_OVERLOAD_H
/// match
/// =====
///
/// Description
/// -----------
///
/// The `match` function adaptor combines several functions together and
/// resolves which one should be called by using C++ overload resolution. This
/// is different than the [`first_of`](/include/boost/hof/conditional) adaptor which resolves
/// them based on order.
///
/// Synopsis
/// --------
///
/// template<class... Fs>
/// constexpr match_adaptor<Fs...> match(Fs...fs);
///
/// Requirements
/// ------------
///
/// Fs must be:
///
/// * [ConstInvocable](ConstInvocable)
/// * MoveConstructible
///
/// Example
/// -------
///
/// #include <boost/hof.hpp>
/// using namespace boost::hof;
///
/// struct int_class
/// {
/// int operator()(int) const
/// {
/// return 1;
/// }
/// };
///
/// struct foo
/// {};
///
/// struct foo_class
/// {
/// foo operator()(foo) const
/// {
/// return foo();
/// }
/// };
///
/// typedef match_adaptor<int_class, foo_class> fun;
///
/// static_assert(std::is_same<int, decltype(fun()(1))>::value, "Failed match");
/// static_assert(std::is_same<foo, decltype(fun()(foo()))>::value, "Failed match");
///
/// int main() {}
///
/// References
/// ----------
///
/// * [POO51](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0051r2.pdf) - Proposal for C++
/// Proposal for C++ generic overload function
///
#include <boost/hof/reveal.hpp>
#include <boost/hof/detail/callable_base.hpp>
#include <boost/hof/detail/delegate.hpp>
#include <boost/hof/detail/move.hpp>
#include <boost/hof/detail/make.hpp>
#include <boost/hof/detail/static_const_var.hpp>
namespace boost { namespace hof {
template<class...Fs> struct match_adaptor;
template<class F, class...Fs>
struct match_adaptor<F, Fs...> : detail::callable_base<F>, match_adaptor<Fs...>
{
typedef match_adaptor<Fs...> base;
typedef match_adaptor fit_rewritable_tag;
struct failure
: failure_for<detail::callable_base<F>, Fs...>
{};
BOOST_HOF_INHERIT_DEFAULT(match_adaptor, detail::callable_base<F>, base);
template<class X, class... Xs, BOOST_HOF_ENABLE_IF_CONVERTIBLE(X, detail::callable_base<F>), BOOST_HOF_ENABLE_IF_CONSTRUCTIBLE(base, Xs...)>
constexpr match_adaptor(X&& f1, Xs&& ... fs)
: detail::callable_base<F>(BOOST_HOF_FORWARD(X)(f1)), base(BOOST_HOF_FORWARD(Xs)(fs)...)
{}
using F::operator();
using base::operator();
};
template<class F>
struct match_adaptor<F> : detail::callable_base<F>
{
typedef detail::callable_base<F> base;
typedef match_adaptor fit_rewritable_tag;
using F::operator();
BOOST_HOF_INHERIT_CONSTRUCTOR(match_adaptor, detail::callable_base<F>);
};
BOOST_HOF_DECLARE_STATIC_VAR(match, detail::make<match_adaptor>);
}} // namespace boost::hof
#endif
| 1,318 |
677 | /*
* Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "BytecodeLivenessAnalysis.h"
#include "BytecodeKills.h"
#include "BytecodeLivenessAnalysisInlines.h"
#include "BytecodeUseDef.h"
#include "CodeBlock.h"
#include "FullBytecodeLiveness.h"
#include "HeapInlines.h"
#include "InterpreterInlines.h"
#include "PreciseJumpTargets.h"
namespace JSC {
BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock)
: m_graph(codeBlock, codeBlock->instructions())
{
compute();
}
template<typename Functor>
void BytecodeLivenessAnalysis::computeDefsForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
{
JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
}
template<typename Functor>
void BytecodeLivenessAnalysis::computeUsesForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
{
JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
}
void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
{
BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
ASSERT(block);
ASSERT(!block->isEntryBlock());
ASSERT(!block->isExitBlock());
result.resize(block->out().numBits());
computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result);
}
bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
{
if (operandIsAlwaysLive(operand))
return true;
FastBitVector result;
getLivenessInfoAtBytecodeOffset(bytecodeOffset, result);
return operandThatIsNotAlwaysLiveIsLive(result, operand);
}
FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
{
FastBitVector out;
getLivenessInfoAtBytecodeOffset(bytecodeOffset, out);
return out;
}
void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
{
FastBitVector out;
CodeBlock* codeBlock = m_graph.codeBlock();
result.m_map.resize(codeBlock->instructions().size());
for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) {
if (block->isEntryBlock() || block->isExitBlock())
continue;
out = block->out();
for (unsigned i = block->offsets().size(); i--;) {
unsigned bytecodeOffset = block->offsets()[i];
stepOverInstruction(m_graph, bytecodeOffset, out);
result.m_map[bytecodeOffset] = out;
}
}
}
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result)
{
FastBitVector out;
CodeBlock* codeBlock = m_graph.codeBlock();
result.m_codeBlock = codeBlock;
result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(codeBlock->instructions().size());
for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) {
if (block->isEntryBlock() || block->isExitBlock())
continue;
out = block->out();
for (unsigned i = block->offsets().size(); i--;) {
unsigned bytecodeOffset = block->offsets()[i];
stepOverInstruction(
m_graph, bytecodeOffset, out,
[&] (unsigned index) {
// This is for uses.
if (out[index])
return;
result.m_killSets[bytecodeOffset].add(index);
out[index] = true;
},
[&] (unsigned index) {
// This is for defs.
out[index] = false;
});
}
}
}
void BytecodeLivenessAnalysis::dumpResults()
{
CodeBlock* codeBlock = m_graph.codeBlock();
dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n");
Interpreter* interpreter = codeBlock->vm()->interpreter;
Instruction* instructionsBegin = codeBlock->instructions().begin();
unsigned i = 0;
unsigned numberOfBlocks = m_graph.size();
Vector<FastBitVector> predecessors(numberOfBlocks);
for (BytecodeBasicBlock* block : m_graph)
predecessors[block->index()].resize(numberOfBlocks);
for (BytecodeBasicBlock* block : m_graph) {
for (unsigned j = 0; j < block->successors().size(); j++) {
unsigned blockIndex = block->index();
unsigned successorIndex = block->successors()[j]->index();
predecessors[successorIndex][blockIndex] = true;
}
}
auto dumpBitVector = [] (FastBitVector& bits) {
for (unsigned j = 0; j < bits.numBits(); j++) {
if (bits[j])
dataLogF(" %u", j);
}
};
for (BytecodeBasicBlock* block : m_graph) {
dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i++, block, block->leaderOffset(), block->totalLength());
dataLogF("Predecessors:");
dumpBitVector(predecessors[block->index()]);
dataLogF("\n");
dataLogF("Successors:");
FastBitVector successors;
successors.resize(numberOfBlocks);
for (unsigned j = 0; j < block->successors().size(); j++) {
BytecodeBasicBlock* successor = block->successors()[j];
successors[successor->index()] = true;
}
dumpBitVector(successors); // Dump in sorted order.
dataLogF("\n");
if (block->isEntryBlock()) {
dataLogF("Entry block %p\n", block);
continue;
}
if (block->isExitBlock()) {
dataLogF("Exit block: %p\n", block);
continue;
}
for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset];
dataLogF("Live variables:");
FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset);
dumpBitVector(liveBefore);
dataLogF("\n");
codeBlock->dumpBytecode(WTF::dataFile(), instructionsBegin, currentInstruction);
OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
unsigned opcodeLength = opcodeLengths[opcodeID];
bytecodeOffset += opcodeLength;
}
dataLogF("Live variables:");
FastBitVector liveAfter = block->out();
dumpBitVector(liveAfter);
dataLogF("\n");
}
}
void BytecodeLivenessAnalysis::compute()
{
runLivenessFixpoint(m_graph);
if (Options::dumpBytecodeLivenessResults())
dumpResults();
}
} // namespace JSC
| 3,180 |
553 | //
// GreyShader.cpp
// cocos2d_libs
//
// Created by Kirito on 10/22/14.
//
//
#include "GreyShader.h"
void GreyShader::setGreyShader(Sprite * s)
{
auto fileUtiles = FileUtils::getInstance();
auto fragmentFullPath = fileUtiles->fullPathForFilename("res/shader3D/greyScale.fsh");
auto fragSource = fileUtiles->getStringFromFile(fragmentFullPath);
auto glprogram = cocos2d::GLProgram::createWithByteArrays(ccPositionTextureColor_noMVP_vert, fragSource.c_str());
auto glprogramstate = cocos2d::GLProgramState::getOrCreateWithGLProgram(glprogram);
s->setGLProgramState(glprogramstate);
} | 230 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_sd.hxx"
#ifdef SD_DLLIMPLEMENTATION
#undef SD_DLLIMPLEMENTATION
#endif
#include <com/sun/star/presentation/ClickAction.hpp>
#include <vcl/svapp.hxx>
#include <vos/mutex.hxx>
#include <vcl/msgbox.hxx>
#include <vcl/lstbox.hxx>
#include <vcl/combobox.hxx>
#include <sfx2/doctempl.hxx>
#include <svl/lstner.hxx>
#include <sfx2/objsh.hxx>
#include <svtools/ehdl.hxx>
#include <svtools/sfxecode.hxx>
#include <tools/urlobj.hxx>
#include <com/sun/star/presentation/FadeEffect.hpp>
#include <fadedef.h>
#include <sfx2/sfxsids.hrc>
#include <svl/undo.hxx>
#include "DrawDocShell.hxx"
#include <vcl/gdimtf.hxx>
#include <tools/wintypes.hxx>
#include "docprev.hxx"
#include <sfx2/app.hxx>
#include <sfx2/docfile.hxx>
#include <sfx2/dispatch.hxx>
#include <sfx2/request.hxx>
#include <com/sun/star/sdbc/XResultSet.hpp>
#include <com/sun/star/lang/XComponent.hpp>
#include <com/sun/star/util/XCloseable.hpp>
#include <com/sun/star/uno/RuntimeException.hpp>
#include <com/sun/star/frame/XModuleManager.hpp>
#include <com/sun/star/ucb/XSimpleFileAccess.hpp>
#include <com/sun/star/ui/XModuleUIConfigurationManagerSupplier.hpp>
#include <com/sun/star/ui/XImageManager.hpp>
#include <unotools/historyoptions.hxx>
#include <tools/urlobj.hxx>
#include <osl/file.hxx>
#include <sfx2/filedlghelper.hxx>
#include "sdpage.hxx"
#include "helpids.h"
#include "assclass.hxx"
#include "dlgass.hrc"
#include "dlgass.hxx"
#include "dlgctrls.hxx"
#ifndef _SD_CFGID_HXX
#include "strings.hrc"
#endif
#ifndef _DATETIMEITEM_HXX
#include "dlgassim.hxx"
#endif
#include "TemplateScanner.hxx"
#include "WindowUpdater.hxx"
#include <comphelper/processfactory.hxx>
using namespace ::com::sun::star;
using namespace ::com::sun::star::uno;
using namespace ::sd;
void InterpolateFixedBitmap( FixedBitmap * pBitmap )
{
Bitmap aBmp( pBitmap->GetBitmap() );
Size aSize = pBitmap->GetSizePixel();
aBmp.Scale( aSize, BMP_SCALE_INTERPOLATE );
pBitmap->SetBitmap( aBmp );
}
// ====================================================================
// ====================================================================
const char* PageHelpIds[] =
{
HID_SD_AUTOPILOT_PAGE1,
HID_SD_AUTOPILOT_PAGE2,
HID_SD_AUTOPILOT_PAGE3,
HID_SD_AUTOPILOT_PAGE4,
HID_SD_AUTOPILOT_PAGE5
};
// ====================================================================
class PasswordEntry
{
public:
String maPassword;
String maPath;
};
DECLARE_LIST( PasswordEntryList, PasswordEntry * )
// ====================================================================
/** A simple wrapper that looks like a PushButton and is used to force the
broadcasting of focus events primarily for accessibility tools.
Forcing focus events is achieved by using two identical PushButtons
which, when the focus event is requested, are exchanged and play focus
ping-pong by moving the focus from one to the other.
*/
class NextButton
{
public:
NextButton (::Window* pParent, const ResId& rResId);
void ForceFocusEventBroadcast (void);
void SetClickHdl (const Link& rLink);
bool IsEnabled (void);
void Enable (bool bEnable);
private:
PushButton maNextButton1;
PushButton maNextButton2;
bool mbIsFirstButtonActive;
};
// ====================================================================
class AssistentDlgImpl : public SfxListener
{
public:
AssistentDlgImpl( ::Window* pWindow, const Link& rFinishLink, sal_Bool bAutoPilot );
~AssistentDlgImpl();
/// Local mutex used to serialize concurrent method calls.
::osl::Mutex maMutex;
SfxObjectShellLock GetDocument();
/** closes the current preview docshell */
void CloseDocShell();
/** Extract form the history list of recently used files the impress
files and insert them into a listbox.
*/
void ScanDocmenu (void);
/** Flag that is set to sal_True after the recently used files have been
scanned.
*/
sal_Bool mbRecentDocumentsReady;
/** When the list of templates has not been scanned already this is done
when this method is called. That includes requesting the whole list
of templates from UCB and extracting from that list the impress
templates and layouts and storing them for later use in
<member>maPresentList</member>. Note that the first call to this
method after installing a new Office may take some time.
*/
void ProvideTemplates (void);
/** This method transfers the template folders from the template scanner
to the internal structures of this class. On termination it sets
the flag <member>mbTemplatesReady</member> to <TRUE/> to indicate
that the templates are available.
@param rTemplateFolders
This is a list of template folders. This method takes ownership
of the supplied entries by removing them from the list and
transferring them to an internal structure.
*/
void TemplateScanDone (std::vector<TemplateDir*>& rTemplateFolders);
/** Flag that is set to sal_True after the impress templates have been
scanned.
*/
sal_Bool mbTemplatesReady;
/** Flag used to prevent nested or concurrent calls to the
<member>UpdatePreview</memember> method. A <TRUE/> value indicates
that a preview update is currently active.
*/
sal_Bool mbPreviewUpdating;
::Window* mpWindow;
void SavePassword( SfxObjectShellLock xDoc, const String& rPath );
void RestorePassword( SfxItemSet* pSet, const String& rPath );
String GetPassword( const String rPath );
void DeletePassords();
PasswordEntryList maPasswordList;
String maDocFile;
String maLayoutFile;
String GetDocFileName();
String GetLayoutFileName();
/// List of URLs of recently used impress files.
std::vector<String*> maOpenFilesList;
/// List of folders containing data about impress templates.
std::vector<TemplateDir*> maPresentList;
/// Currently selected template folder.
TemplateDir* mpTemplateRegion;
/// Currently selected layout folder.
TemplateDir* mpLayoutRegion;
// preview
sal_Bool mbUserDataDirty;
Timer maPrevTimer;
Timer maEffectPrevTimer;
Timer maUpdatePageListTimer;
Timer maStartScanTimer;
SfxObjectShellLock xDocShell;
::std::auto_ptr<WindowUpdater> mpWindowUpdater;
sal_Bool mbPreview;
sal_uInt16 mnShowPage;
sal_Bool mbDocPreview;
sal_uLong mnTemplate;
String maPageListFile;
void UpdatePreview( sal_Bool bDocPreview );
void UpdatePageList();
void UpdateUserData();
sal_Bool IsOwnFormat( const String& rPath );
// dlg status
void EndDialog( long nResult = 0 );
void SetStartType( StartType eType );
StartType GetStartType();
void SelectTemplateRegion( const String& rRegion );
void SelectLayoutRegion( const String& rRegion );
void UpdatePage();
void ChangePage();
void LeavePage();
String GetUiTextForCommand (const ::rtl::OUString& aCommandURL);
Image GetUiIconForCommand (const ::rtl::OUString& aCommandURL);
DECL_LINK( StartScanHdl, void * );
DECL_LINK( SelectFileHdl, ListBox * );
DECL_LINK( SelectRegionHdl, ListBox * );
DECL_LINK( UpdatePreviewHdl, void * );
DECL_LINK( UpdatePageListHdl, void * );
DECL_LINK( StartTypeHdl, RadioButton * );
DECL_LINK( SelectTemplateHdl, ListBox * );
DECL_LINK( NextPageHdl, PushButton * );
DECL_LINK( LastPageHdl, PushButton * );
DECL_LINK( PreviewFlagHdl, CheckBox * );
DECL_LINK( EffectPreviewHdl, Button * );
DECL_LINK( SelectLayoutHdl, ListBox * );
DECL_LINK( PageSelectHdl, Control * );
DECL_LINK( PresTypeHdl, RadioButton * );
DECL_LINK( UpdateUserDataHdl, Edit* );
DECL_LINK( SelectEffectHdl, void* );
DECL_LINK( OpenButtonHdl, Button * );
// Common
Assistent maAssistentFunc;
CheckBox maPreviewFlag;
CheckBox maStartWithFlag;
HelpButton maHelpButton;
CancelButton maCancelButton;
PushButton maLastPageButton;
NextButton maNextPageButton;
OKButton maFinishButton;
SdDocPreviewWin maPreview;
String maCreateStr;
String maOpenStr;
// Seite 1
FixedBitmap* mpPage1FB;
FixedLine* mpPage1ArtFL;
RadioButton* mpPage1EmptyRB;
RadioButton* mpPage1TemplateRB;
ListBox* mpPage1RegionLB;
ListBox* mpPage1TemplateLB;
RadioButton* mpPage1OpenRB;
ListBox* mpPage1OpenLB;
PushButton* mpPage1OpenPB;
// Seite 2
FixedBitmap* mpPage2FB;
FixedLine* mpPage2LayoutFL;
ListBox* mpPage2RegionLB;
ListBox* mpPage2LayoutLB;
FixedLine* mpPage2OutTypesFL;
RadioButton* mpPage2Medium1RB;
RadioButton* mpPage2Medium2RB;
RadioButton* mpPage2Medium3RB;
RadioButton* mpPage2Medium4RB;
RadioButton* mpPage2Medium5RB;
// Seite 3
FixedBitmap* mpPage3FB;
FixedLine* mpPage3EffectFL;
FixedText* mpPage3EffectFT;
FadeEffectLB* mpPage3EffectLB;
FixedText* mpPage3SpeedFT;
ListBox* mpPage3SpeedLB;
FixedLine* mpPage3PresTypeFL;
RadioButton* mpPage3PresTypeLiveRB;
RadioButton* mpPage3PresTypeKioskRB;
FixedText* mpPage3PresTimeFT;
TimeField* mpPage3PresTimeTMF;
FixedText* mpPage3BreakFT;
TimeField* mpPage3BreakTMF;
CheckBox* mpPage3LogoCB;
// Seite 4
FixedBitmap* mpPage4FB;
FixedLine* mpPage4PersonalFL;
FixedText* mpPage4AskNameFT;
Edit* mpPage4AskNameEDT;
FixedText* mpPage4AskTopicFT;
Edit* mpPage4AskTopicEDT;
FixedText* mpPage4AskInfoFT;
MultiLineEdit* mpPage4AskInfoEDT;
// Seite 5
FixedBitmap* mpPage5FB;
FixedText* mpPage5PageListFT;
SdPageListControl* mpPage5PageListCT;
CheckBox* mpPage5SummaryCB;
};
// ====================================================================
AssistentDlgImpl::AssistentDlgImpl( ::Window* pWindow, const Link& rFinishLink, sal_Bool bAutoPilot ) :
mpTemplateRegion(NULL),
mpLayoutRegion(NULL),
mbUserDataDirty(sal_False),
xDocShell (NULL),
mpWindowUpdater (new WindowUpdater()),
mbPreview(sal_True),
mnShowPage(0),
mbDocPreview(sal_False),
maAssistentFunc(5),
maPreviewFlag(pWindow,SdResId(CB_PREVIEW)),
maStartWithFlag(pWindow,SdResId(CB_STARTWITH)),
maHelpButton(pWindow,SdResId(BUT_HELP)),
maCancelButton(pWindow,SdResId(BUT_CANCEL)),
maLastPageButton(pWindow,SdResId(BUT_LAST)),
maNextPageButton(pWindow,SdResId(BUT_NEXT)),
maFinishButton(pWindow,SdResId(BUT_FINISH)),
maPreview(pWindow,SdResId(CT_PREVIEW)),
maCreateStr(SdResId(STR_CREATE)),
maOpenStr(SdResId(STR_OPEN))
{
maPageListFile += sal_Unicode('?'),
mbRecentDocumentsReady = sal_False;
mbTemplatesReady = sal_False;
mbPreviewUpdating = sal_False;
mpWindow = pWindow;
if(bAutoPilot)
maStartWithFlag.Hide();
else
maAssistentFunc.InsertControl(1, &maStartWithFlag );
//page1 wird initialisiert und an die Assistentenfunktionalitaet
//uebergeben
maAssistentFunc.InsertControl(1, &maPreview );
maAssistentFunc.InsertControl(1, &maPreviewFlag );
maAssistentFunc.InsertControl(1,
mpPage1FB = new FixedBitmap(pWindow,SdResId(FB_PAGE1)));
maAssistentFunc.InsertControl(1,
mpPage1ArtFL = new FixedLine(pWindow,SdResId(FL_PAGE1_ARTGROUP)));
maAssistentFunc.InsertControl(1,
mpPage1EmptyRB=new RadioButton(pWindow,SdResId(RB_PAGE1_EMPTY)));
maAssistentFunc.InsertControl(1,
mpPage1TemplateRB=new RadioButton(pWindow,SdResId(RB_PAGE1_TEMPLATE)));
maAssistentFunc.InsertControl(1,
mpPage1OpenRB=new RadioButton(pWindow,SdResId(RB_PAGE1_OPEN)));
maAssistentFunc.InsertControl(1,
mpPage1RegionLB = new ListBox(pWindow,SdResId(LB_PAGE1_REGION)));
maAssistentFunc.InsertControl(1,
mpPage1TemplateLB=new ListBox(pWindow,SdResId(LB_PAGE1_TEMPLATES)));
maAssistentFunc.InsertControl(1,
mpPage1OpenPB=new PushButton(pWindow,SdResId(PB_PAGE1_OPEN)));
maAssistentFunc.InsertControl(1,
mpPage1OpenLB=new ListBox(pWindow,SdResId(LB_PAGE1_OPEN)));
// Align the button and list box displayed for the "open existing file"
// radio button with the text of that radio button.
{
RadioButton aEmptyRB (mpWindow);
sal_Int32 nIndent (aEmptyRB.CalcMinimumSize(0).Width());
sal_Int32 nLeft (mpPage1OpenRB->GetPosPixel().X() + nIndent);
sal_Int32 nWidth (mpPage1OpenRB->GetSizePixel().Width() - nIndent);
mpPage1OpenPB->SetPosSizePixel(
Point(nLeft, mpPage1OpenPB->GetPosPixel().Y()),
Size(mpPage1OpenPB->GetSizePixel()));
mpPage1OpenLB->SetPosSizePixel(
Point(nLeft, mpPage1OpenLB->GetPosPixel().Y()),
Size(nWidth, mpPage1OpenLB->GetSizePixel().Height()));
}
// Set text and icon of the 'Open...' button.
{
String sText (GetUiTextForCommand(::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM(".uno:Open"))));
// Remove the mnemonic and add a leading space so that icon and text
// are not too close together.
sText.SearchAndReplaceAll(String(RTL_CONSTASCII_STRINGPARAM("~")),String());
sText.Insert(String(RTL_CONSTASCII_STRINGPARAM(" ")),0);
mpPage1OpenPB->SetText(sText);
// Place icon left of text and both centered in the button.
mpPage1OpenPB->SetModeImage(
GetUiIconForCommand(::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM(".uno:Open"))),
BMP_COLOR_NORMAL);
mpPage1OpenPB->EnableImageDisplay(sal_True);
mpPage1OpenPB->EnableTextDisplay(sal_True);
mpPage1OpenPB->SetImageAlign(IMAGEALIGN_LEFT);
mpPage1OpenPB->SetStyle(mpPage1OpenPB->GetStyle() | WB_CENTER);
}
// links&handler
mpPage1RegionLB->SetSelectHdl(LINK(this,AssistentDlgImpl,SelectRegionHdl));
mpPage1RegionLB->SetDropDownLineCount( 6 );
mpPage1TemplateLB->SetSelectHdl(LINK(this,AssistentDlgImpl,SelectTemplateHdl));
mpPage1TemplateLB->InsertEntry(String(SdResId(STR_ISLOADING)));
mpPage1EmptyRB->SetClickHdl(LINK(this,AssistentDlgImpl,StartTypeHdl));
mpPage1TemplateRB->SetClickHdl(LINK(this,AssistentDlgImpl,StartTypeHdl));
mpPage1OpenRB->SetClickHdl(LINK(this,AssistentDlgImpl,StartTypeHdl));
mpPage1OpenLB->SetSelectHdl(LINK(this,AssistentDlgImpl,SelectFileHdl));
mpPage1OpenLB->SetDoubleClickHdl(rFinishLink);
mpPage1OpenPB->SetClickHdl(LINK(this,AssistentDlgImpl,OpenButtonHdl));
// mpPage1OpenLB->InsertEntry(String(SdResId(STR_WIZARD_POSITION)));
// Seite 2
maAssistentFunc.InsertControl(2, &maPreview );
maAssistentFunc.InsertControl(2, &maPreviewFlag );
maAssistentFunc.InsertControl(2,
mpPage2FB = new FixedBitmap(pWindow,SdResId(FB_PAGE2)));
maAssistentFunc.InsertControl(2,
mpPage2LayoutFL = new FixedLine( pWindow, SdResId(FL_PAGE2_LAYOUT) ));
maAssistentFunc.InsertControl(2,
mpPage2RegionLB = new ListBox(pWindow,SdResId(LB_PAGE2_REGION) ));
maAssistentFunc.InsertControl(2,
mpPage2LayoutLB = new ListBox(pWindow,SdResId(LB_PAGE2_LAYOUT) ));
maAssistentFunc.InsertControl(2,
mpPage2OutTypesFL = new FixedLine( pWindow, SdResId(FL_PAGE2_OUTPUTTYPES) ));
maAssistentFunc.InsertControl(2,
mpPage2Medium5RB = new RadioButton( pWindow, SdResId(RB_PAGE2_MEDIUM5) ));
maAssistentFunc.InsertControl(2,
mpPage2Medium3RB = new RadioButton( pWindow, SdResId(RB_PAGE2_MEDIUM3) ));
maAssistentFunc.InsertControl(2,
mpPage2Medium4RB = new RadioButton( pWindow, SdResId(RB_PAGE2_MEDIUM4) ));
maAssistentFunc.InsertControl(2,
mpPage2Medium1RB = new RadioButton( pWindow, SdResId(RB_PAGE2_MEDIUM1) ));
maAssistentFunc.InsertControl(2,
mpPage2Medium2RB = new RadioButton( pWindow, SdResId(RB_PAGE2_MEDIUM2) ));
mpPage2Medium5RB->Check();
mpPage2RegionLB->SetSelectHdl(LINK(this,AssistentDlgImpl,SelectRegionHdl));
mpPage2RegionLB->SetDropDownLineCount( 6 );
mpPage2LayoutLB->SetSelectHdl(LINK(this,AssistentDlgImpl,SelectLayoutHdl));
mpPage2LayoutLB->InsertEntry(String(SdResId(STR_ISLOADING)));
// Seite 3
maAssistentFunc.InsertControl(3, &maPreview );
maAssistentFunc.InsertControl(3, &maPreviewFlag );
maAssistentFunc.InsertControl(3,
mpPage3FB = new FixedBitmap(pWindow,SdResId(FB_PAGE3)));
maAssistentFunc.InsertControl(3,
mpPage3EffectFL = new FixedLine( pWindow, SdResId(FL_PAGE3_EFFECT) ));
maAssistentFunc.InsertControl(3,
mpPage3EffectFT = new FixedText( pWindow, SdResId(FT_PAGE3_EFFECT) ));
maAssistentFunc.InsertControl(3,
mpPage3EffectLB = new FadeEffectLB( pWindow, SdResId(LB_PAGE3_EFFECT) ));
maAssistentFunc.InsertControl(3,
mpPage3SpeedFT = new FixedText( pWindow, SdResId(FT_PAGE3_SPEED) ));
maAssistentFunc.InsertControl(3,
mpPage3SpeedLB = new FadeEffectLB( pWindow, SdResId(LB_PAGE3_SPEED) ));
maAssistentFunc.InsertControl(3,
mpPage3PresTypeFL = new FixedLine( pWindow, SdResId(FL_PAGE3_PRESTYPE) ));
maAssistentFunc.InsertControl(3,
mpPage3PresTypeLiveRB = new RadioButton( pWindow, SdResId(RB_PAGE3_LIVE) ));
maAssistentFunc.InsertControl(3,
mpPage3PresTypeKioskRB = new RadioButton( pWindow, SdResId(RB_PAGE3_KIOSK) ));
maAssistentFunc.InsertControl(3,
mpPage3PresTimeFT = new FixedText( pWindow, SdResId( FT_PAGE3_TIME) ));
maAssistentFunc.InsertControl(3,
mpPage3PresTimeTMF = new TimeField( pWindow, SdResId( TMF_PAGE3_TIME) ));
maAssistentFunc.InsertControl(3,
mpPage3BreakFT = new FixedText( pWindow, SdResId( FT_PAGE3_BREAK) ));
maAssistentFunc.InsertControl(3,
mpPage3BreakTMF = new TimeField( pWindow, SdResId( TMF_PAGE3_BREAK) ));
maAssistentFunc.InsertControl(3,
mpPage3LogoCB = new CheckBox( pWindow, SdResId( CB_PAGE3_LOGO) ));
mpPage3EffectLB->Fill();
// mpPage3EffectLB->SelectEffect( presentation::FadeEffect_NONE );
mpPage3EffectLB->SetSelectHdl( LINK(this,AssistentDlgImpl,SelectEffectHdl ));
mpPage3EffectLB->SetDropDownLineCount( 12 );
mpPage3SpeedLB->InsertEntry( String( SdResId(STR_SLOW) ));
mpPage3SpeedLB->InsertEntry( String( SdResId(STR_MEDIUM) ));
mpPage3SpeedLB->InsertEntry( String( SdResId(STR_FAST) ));
mpPage3SpeedLB->SetDropDownLineCount( 3 );
mpPage3SpeedLB->SetSelectHdl( LINK(this,AssistentDlgImpl,SelectEffectHdl ));
mpPage3SpeedLB->SelectEntryPos( 1 );
mpPage3PresTypeLiveRB->Check();
mpPage3PresTypeLiveRB->SetClickHdl( LINK(this,AssistentDlgImpl, PresTypeHdl ));
mpPage3PresTypeKioskRB->SetClickHdl( LINK(this,AssistentDlgImpl, PresTypeHdl ));
mpPage3PresTimeTMF->SetFormat( TIMEF_SEC );
mpPage3PresTimeTMF->SetTime( Time( 0, 0, 10 ) );
mpPage3BreakTMF->SetFormat( TIMEF_SEC );
mpPage3BreakTMF->SetTime( Time( 0, 0, 10 ) );
mpPage3LogoCB->Check();
// set cursor in timefield
Edit *pEditPage3PresTimeTMF = mpPage3PresTimeTMF->GetField();
Edit *pEditPage3BreakTMF = mpPage3BreakTMF->GetField();
Selection aSel1( pEditPage3PresTimeTMF->GetMaxTextLen(), pEditPage3PresTimeTMF->GetMaxTextLen() );
Selection aSel2( pEditPage3BreakTMF->GetMaxTextLen(), pEditPage3BreakTMF->GetMaxTextLen() );
pEditPage3PresTimeTMF->SetSelection( aSel1 );
pEditPage3BreakTMF->SetSelection( aSel2 );
// Seite 4
maAssistentFunc.InsertControl(4,
mpPage4FB = new FixedBitmap(pWindow,SdResId(FB_PAGE4)));
maAssistentFunc.InsertControl(4,
mpPage4PersonalFL = new FixedLine( pWindow, SdResId(FL_PAGE4_PERSONAL) ));
maAssistentFunc.InsertControl(4,
mpPage4AskNameFT = new FixedText( pWindow, SdResId(FT_PAGE4_ASKNAME) ));
maAssistentFunc.InsertControl(4,
mpPage4AskNameEDT = new Edit( pWindow, SdResId(EDT_PAGE4_ASKNAME) ));
maAssistentFunc.InsertControl(4,
mpPage4AskTopicFT= new FixedText( pWindow, SdResId(FT_PAGE4_ASKTOPIC) ));
maAssistentFunc.InsertControl(4,
mpPage4AskTopicEDT = new Edit( pWindow, SdResId(EDT_PAGE4_ASKTOPIC) ));
maAssistentFunc.InsertControl(4,
mpPage4AskInfoFT = new FixedText( pWindow, SdResId(FT_PAGE4_ASKINFORMATION) ));
maAssistentFunc.InsertControl(4,
mpPage4AskInfoEDT = new MultiLineEdit( pWindow, SdResId(EDT_PAGE4_ASKINFORMATION) ));
mpPage4AskNameEDT->SetModifyHdl(LINK(this,AssistentDlgImpl,UpdateUserDataHdl));
mpPage4AskTopicEDT->SetModifyHdl(LINK(this,AssistentDlgImpl,UpdateUserDataHdl));
mpPage4AskInfoEDT->SetModifyHdl(LINK(this,AssistentDlgImpl,UpdateUserDataHdl));
// page 5
maAssistentFunc.InsertControl(5, &maPreview );
maAssistentFunc.InsertControl(5, &maPreviewFlag );
maAssistentFunc.InsertControl(5,
mpPage5FB = new FixedBitmap(pWindow,SdResId(FB_PAGE5)));
maAssistentFunc.InsertControl(5,
mpPage5PageListFT = new FixedText( pWindow, SdResId( FT_PAGE5_PAGELIST ) ));
maAssistentFunc.InsertControl(5,
mpPage5PageListCT = new SdPageListControl( pWindow, SdResId( CT_PAGE5_PAGELIST ) ));
maAssistentFunc.InsertControl(5,
mpPage5SummaryCB = new CheckBox( pWindow, SdResId( CB_PAGE5_SUMMARY ) ));
mpPage5PageListCT->SetSelectHdl(LINK(this,AssistentDlgImpl, PageSelectHdl));
// generell
InterpolateFixedBitmap( mpPage1FB );
InterpolateFixedBitmap( mpPage2FB );
InterpolateFixedBitmap( mpPage3FB );
InterpolateFixedBitmap( mpPage4FB );
InterpolateFixedBitmap( mpPage5FB );
maLastPageButton.SetClickHdl(LINK(this,AssistentDlgImpl, LastPageHdl ));
maNextPageButton.SetClickHdl(LINK(this,AssistentDlgImpl, NextPageHdl ));
maPreviewFlag.Check( mbPreview );
maPreviewFlag.SetClickHdl(LINK(this, AssistentDlgImpl, PreviewFlagHdl ));
maPreview.SetClickHdl(LINK(this,AssistentDlgImpl, EffectPreviewHdl ));
//setzt die Ausgangsseite
maAssistentFunc.GotoPage(1);
maLastPageButton.Disable();
maPrevTimer.SetTimeout( 200 );
maPrevTimer.SetTimeoutHdl( LINK( this, AssistentDlgImpl, UpdatePreviewHdl));
maEffectPrevTimer.SetTimeout( 50 );
maEffectPrevTimer.SetTimeoutHdl( LINK( this, AssistentDlgImpl, EffectPreviewHdl ));
maUpdatePageListTimer.SetTimeout( 50 );
maUpdatePageListTimer.SetTimeoutHdl( LINK( this, AssistentDlgImpl, UpdatePageListHdl));
SetStartType( ST_EMPTY );
ChangePage();
mpWindowUpdater->RegisterWindow (&maPreview);
UpdatePreview( sal_True );
//check whether we should start with a template document initialy and preselect it
const ::rtl::OUString aServiceName( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.presentation.PresentationDocument" ) );
String aStandardTemplate( SfxObjectFactory::GetStandardTemplate( aServiceName ) );
if( aStandardTemplate.Len() )
{
ProvideTemplates();
//find aStandardTemplate in maPresentList
TemplateDir* pStandardTemplateDir = 0;
TemplateEntry* pStandardTemplateEntry = 0;
std::vector<TemplateDir*>::iterator I;
for (I=maPresentList.begin(); I!=maPresentList.end(); I++)
{
TemplateDir* pDir = *I;
std::vector<TemplateEntry*>::iterator J;
for (J=pDir->maEntries.begin(); J!=pDir->maEntries.end(); J++)
{
TemplateEntry* pEntry = *J;
if(pEntry->msPath == aStandardTemplate)
{
pStandardTemplateDir = pDir;
pStandardTemplateEntry = pEntry;
break;
}
}
if(pStandardTemplateDir)
break;
}
//preselect template
if( pStandardTemplateDir && pStandardTemplateEntry )
{
mpPage1RegionLB->SelectEntry( pStandardTemplateDir->msRegion );
SelectTemplateRegion( pStandardTemplateDir->msRegion );
mpPage1TemplateLB->SelectEntry( pStandardTemplateEntry->msTitle );
SelectTemplateHdl(mpPage1TemplateLB);
}
}
}
AssistentDlgImpl::~AssistentDlgImpl()
{
CloseDocShell();
DeletePassords();
// Delete the template file infos.
std::vector<TemplateDir*>::iterator I;
std::vector<TemplateEntry*>::iterator J;
for (I=maPresentList.begin(); I!=maPresentList.end(); I++)
{
for (J=(*I)->maEntries.begin(); J!=(*I)->maEntries.end(); J++)
delete (*J);
delete (*I);
}
// Seite 1
delete mpPage1FB;
delete mpPage1ArtFL;
delete mpPage1EmptyRB;
delete mpPage1TemplateRB;
delete mpPage1TemplateLB;
delete mpPage1RegionLB;
delete mpPage1OpenRB;
delete mpPage1OpenLB;
delete mpPage1OpenPB;
// Seite 2
delete mpPage2FB;
delete mpPage2LayoutFL;
delete mpPage2RegionLB;
delete mpPage2LayoutLB;
delete mpPage2OutTypesFL;
delete mpPage2Medium1RB;
delete mpPage2Medium2RB;
delete mpPage2Medium3RB;
delete mpPage2Medium4RB;
delete mpPage2Medium5RB;
// Seite 3
delete mpPage3FB;
delete mpPage3EffectFL;
delete mpPage3EffectFT;
delete mpPage3EffectLB;
delete mpPage3SpeedFT;
delete mpPage3SpeedLB;
delete mpPage3PresTypeFL;
delete mpPage3PresTypeLiveRB;
delete mpPage3PresTypeKioskRB;
delete mpPage3PresTimeFT;
delete mpPage3PresTimeTMF;
delete mpPage3BreakFT;
delete mpPage3BreakTMF;
delete mpPage3LogoCB;
// Seite 4
delete mpPage4FB;
delete mpPage4PersonalFL;
delete mpPage4AskNameFT;
delete mpPage4AskNameEDT;
delete mpPage4AskTopicFT;
delete mpPage4AskTopicEDT;
delete mpPage4AskInfoFT;
delete mpPage4AskInfoEDT;
// Seite 5
delete mpPage5FB;
delete mpPage5PageListFT;
delete mpPage5PageListCT;
delete mpPage5SummaryCB;
// Delete the file history list.
std::vector<String*>::iterator I2;
for (I2=maOpenFilesList.begin(); I2!=maOpenFilesList.end(); I2++)
delete *I2;
}
void AssistentDlgImpl::CloseDocShell()
{
if(xDocShell.Is())
{
//uno::Reference< lang::XComponent > xModel( xDocShell->GetModel(), uno::UNO_QUERY );
uno::Reference< util::XCloseable > xCloseable( xDocShell->GetModel(), uno::UNO_QUERY );
//if( xModel.is() )
if( xCloseable.is() )
{
xCloseable->close( sal_True );
xDocShell = NULL;
//xModel->dispose();
}
else
{
xDocShell->DoClose();
xDocShell = NULL;
}
}
}
void AssistentDlgImpl::EndDialog( long )
{
mpWindow = NULL;
}
void AssistentDlgImpl::ScanDocmenu (void)
{
if( mbRecentDocumentsReady )
return;
uno::Sequence<uno::Sequence<beans::PropertyValue> > aHistory =
SvtHistoryOptions().GetList (ePICKLIST);
uno::Reference< lang::XMultiServiceFactory > xFactory( ::comphelper::getProcessServiceFactory() );
uno::Reference< container::XNameAccess > xFilterFactory( xFactory->createInstance( rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "com.sun.star.document.FilterFactory" ) ) ), uno::UNO_QUERY );
Reference< ::com::sun::star::ucb::XSimpleFileAccess > xFileAccess(
xFactory->createInstance(
::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM("com.sun.star.ucb.SimpleFileAccess"))),
UNO_QUERY_THROW);
sal_uInt32 nCount = aHistory.getLength();
for (sal_uInt32 nItem=0; nItem<nCount; ++nItem)
{
// Get the current history item's properties.
uno::Sequence<beans::PropertyValue> aPropertySet = aHistory[nItem];
rtl::OUString sURL;
rtl::OUString sFilter;
rtl::OUString sTitle;
rtl::OUString sPassword;
sal_uInt32 nPropertyCount = aPropertySet.getLength();
for (sal_uInt32 nProperty=0; nProperty<nPropertyCount; ++nProperty)
if (aPropertySet[nProperty].Name == HISTORY_PROPERTYNAME_URL)
aPropertySet[nProperty].Value >>= sURL;
else if (aPropertySet[nProperty].Name == HISTORY_PROPERTYNAME_FILTER)
aPropertySet[nProperty].Value >>= sFilter;
else if (aPropertySet[nProperty].Name == HISTORY_PROPERTYNAME_TITLE)
aPropertySet[nProperty].Value >>= sTitle;
else if (aPropertySet[nProperty].Name == HISTORY_PROPERTYNAME_PASSWORD)
aPropertySet[nProperty].Value >>= sPassword;
// If the entry is an impress file then insert it into the
// history list and the list box.
uno::Sequence< beans::PropertyValue > lProps;
if (xFilterFactory->hasByName(sFilter))
{
uno::Any aFilterPropSet = xFilterFactory->getByName( sFilter );
aFilterPropSet >>= lProps;
}
sal_Int32 nPropCount = lProps.getLength();
rtl::OUString sFactoryName;
for( sal_Int32 i=0; i<nPropCount; ++i )
{
if( lProps[i].Name.compareToAscii( "DocumentService" ) == 0 &&
(lProps[i].Value >>= sFactoryName) &&
sFactoryName.compareToAscii( "com.sun.star.presentation.PresentationDocument" ) == 0 )
{
// yes, it's an impress document
INetURLObject aURL;
// Do not include the file if it does not exist.
if (xFileAccess.is() && ! xFileAccess->exists(sURL))
continue;
aURL.SetSmartURL (sURL);
// The password is set only when it is not empty.
if (sPassword.getLength() > 0)
aURL.SetPass (sPassword);
maOpenFilesList.push_back (new String (aURL.GetMainURL( INetURLObject::NO_DECODE )));
mpPage1OpenLB->InsertEntry (sTitle);
break;
}
}
}
mbRecentDocumentsReady = sal_True;
try
{
UpdatePreview(sal_True);
}
catch (uno::RuntimeException& )
{
// Ignore all exceptions.
}
}
void AssistentDlgImpl::ProvideTemplates (void)
{
if ( ! mbTemplatesReady)
{
TemplateScanner aScanner;
aScanner.Scan ();
TemplateScanDone (aScanner.GetFolderList());
try
{
UpdatePreview(sal_True);
}
catch (uno::RuntimeException& e)
{
// Ignore all exceptions.
(void) e;
}
}
}
void AssistentDlgImpl::TemplateScanDone (
std::vector<TemplateDir*>& rTemplateFolder)
{
// This method is called from a thread. Therefore we get the solar mutex.
::vos::OGuard aGuard (Application::GetSolarMutex());
// Copy the contents of the given template folders to a local list.
maPresentList.swap (rTemplateFolder);
// Fill in the list box on the first page.
int nFirstEntry = 0;
mpPage1RegionLB->Clear();
std::vector<TemplateDir*>::iterator I;
int i;
for (i=0,I=maPresentList.begin(); I!=maPresentList.end(); I++,i++)
{
TemplateDir* pDir = *I;
if (pDir == NULL)
continue;
// HACK! presnt directory is always initially selected.
// We have to look at the first entry to get a URL.
if (!pDir->maEntries.empty() )
{
TemplateEntry* pEntry = pDir->maEntries.front();
if (pEntry != NULL)
if (pEntry->msPath.SearchAscii("presnt") != STRING_NOTFOUND)
nFirstEntry = i;
}
mpPage1RegionLB->InsertEntry (pDir->msRegion);
}
mpPage1RegionLB->SelectEntryPos ((sal_uInt16)nFirstEntry);
mpPage1RegionLB->Update();
SelectTemplateRegion (mpPage1RegionLB->GetSelectEntry());
// Fill in the list box on the second page.
nFirstEntry = 0;
mpPage2RegionLB->Clear();
for (i=0,I=maPresentList.begin(); I!=maPresentList.end(); I++,i++)
{
TemplateDir* pDir = *I;
if (pDir == NULL)
continue;
// HACK! layout directory is always initially selected.
// We have to look at the first entry to get a URL.
if (!pDir->maEntries.empty() )
{
TemplateEntry* pEntry = pDir->maEntries.front();
if (pEntry != NULL)
if (pEntry->msPath.SearchAscii("layout") != STRING_NOTFOUND)
nFirstEntry = i;
}
mpPage2RegionLB->InsertEntry (pDir->msRegion);
}
mpPage2RegionLB->SelectEntryPos ((sal_uInt16)nFirstEntry);
mpPage2RegionLB->Update();
SelectLayoutRegion (mpPage2RegionLB->GetSelectEntry());
// Make the changes visible.
mbTemplatesReady = sal_True;
if (mpWindow)
UpdatePage();
}
// ********************************************************************
// Status Methoden
// ********************************************************************
void AssistentDlgImpl::SetStartType( StartType eType )
{
mpPage1EmptyRB->SetState( eType == ST_EMPTY );
mpPage1TemplateRB->SetState( eType == ST_TEMPLATE );
mpPage1OpenRB->SetState( eType == ST_OPEN );
maNextPageButton.Enable( eType != ST_OPEN );
mpPage1RegionLB->Show(eType == ST_TEMPLATE);
mpPage1TemplateLB->Show(eType == ST_TEMPLATE);
mpPage1OpenLB->Show(eType == ST_OPEN);
mpPage1OpenPB->Show(eType == ST_OPEN);
if (eType == ST_OPEN)
maFinishButton.SetText(maOpenStr);
else
maFinishButton.SetText(maCreateStr);
}
StartType AssistentDlgImpl::GetStartType()
{
if( mpPage1EmptyRB->IsChecked() )
return ST_EMPTY;
else if( mpPage1TemplateRB->IsChecked() )
return ST_TEMPLATE;
else
return ST_OPEN;
}
String AssistentDlgImpl::GetDocFileName()
{
String aTitle;
if(mpWindow)
{
aTitle = mpWindow->GetText();
sal_uInt16 nPos = aTitle.Search(sal_Unicode('('));
if(nPos != STRING_NOTFOUND)
aTitle.Erase( nPos-1 );
}
String aDocFile;
if( GetStartType() == ST_TEMPLATE )
{
const sal_uInt16 nEntry = mpPage1TemplateLB->GetSelectEntryPos();
TemplateEntry* pEntry = NULL;
if(nEntry != (sal_uInt16)-1)
pEntry = mpTemplateRegion->maEntries[nEntry];
if(pEntry)
{
aDocFile = pEntry->msPath;
aTitle.AppendAscii( RTL_CONSTASCII_STRINGPARAM( " (" ) );
aTitle.Append( pEntry->msTitle );
aTitle.Append( sal_Unicode(')') );
}
}
else if( GetStartType() == ST_OPEN )
{
const sal_uInt16 nEntry = mpPage1OpenLB->GetSelectEntryPos();
if(nEntry != (sal_uInt16)-1 )
aDocFile = *maOpenFilesList[nEntry];
}
if(mpWindow)
mpWindow->SetText(aTitle);
return aDocFile;
}
String AssistentDlgImpl::GetLayoutFileName()
{
String aFile;
const sal_uInt16 nEntry = mpPage2LayoutLB->GetSelectEntryPos();
TemplateEntry* pEntry = NULL;
if(nEntry != (sal_uInt16)-1 && nEntry > 0)
pEntry = mpLayoutRegion->maEntries[nEntry-1];
if(pEntry)
aFile = pEntry->msPath;
return aFile;
}
SfxObjectShellLock AssistentDlgImpl::GetDocument()
{
// mbPreview = sal_False; // Document nicht anzeigen
UpdatePreview(sal_False); // aber komplett laden
UpdatePageList();
SfxObjectShell* pShell = xDocShell;
::sd::DrawDocShell* pDocShell = PTR_CAST(::sd::DrawDocShell,pShell);
SdDrawDocument* pDoc = pDocShell?pDocShell->GetDoc():NULL;
if(pDoc)
{
const sal_uInt16 nPageCount = pDoc->GetSdPageCount(PK_STANDARD);
sal_Bool bKiosk = mpPage3PresTypeKioskRB->IsChecked();
sal_uInt32 nNewTime = (sal_uInt32)mpPage3PresTimeTMF->GetTime().GetMSFromTime() / 1000;
if(bKiosk)
{
PresentationSettings& rSettings = pDoc->getPresentationSettings();
rSettings.mbEndless = bKiosk;
rSettings.mnPauseTimeout = (sal_Int32)mpPage3BreakTMF->GetTime().GetMSFromTime() / 1000;
rSettings.mbShowPauseLogo = mpPage3LogoCB->IsChecked();
}
sal_uInt16 nPgAbsNum = 0;
sal_uInt16 nPgRelNum = 0;
while( nPgAbsNum < nPageCount )
{
SdPage* pPage = pDoc->GetSdPage( nPgRelNum, PK_STANDARD );
if( mpPage5PageListCT->IsPageChecked(nPgAbsNum) )
{
mpPage3EffectLB->applySelected(pPage);
const sal_uInt16 nPos = mpPage3SpeedLB->GetSelectEntryPos();
pPage->setTransitionDuration( (nPos == 0) ? 3.0 : (nPos == 1) ? 2.0 : 1.0 );
if(bKiosk)
{
pPage->SetPresChange( PRESCHANGE_AUTO );
pPage->SetTime(nNewTime);
}
nPgRelNum++;
}
else
{
// diese Seite loeschen
pDoc->DeletePage( (nPgRelNum << 1) + 2 ); // Notizseite loeschen
pDoc->DeletePage( (nPgRelNum << 1) + 1 ); // Seite loeschen
}
nPgAbsNum++;
}
}
else
{
DBG_ERROR("sd::AssistentDlgImpl::GetDocument(), no template?");
}
SfxObjectShellLock xRet = xDocShell;
xDocShell = NULL;
return xRet;
}
void AssistentDlgImpl::LeavePage()
{
int nPage = maAssistentFunc.GetCurrentPage();
if( nPage == 4 && mbUserDataDirty )
maPrevTimer.Start();
}
void AssistentDlgImpl::ChangePage()
{
maNextPageButton.Enable(!maAssistentFunc.IsLastPage());
maLastPageButton.Enable(!maAssistentFunc.IsFirstPage());
sal_uInt16 nPage = (sal_uInt16)maAssistentFunc.GetCurrentPage();
if( mpWindow )
{
mpWindow->SetHelpId( PageHelpIds[nPage-1]);
}
UpdatePage();
if( maNextPageButton.IsEnabled() )
{
maNextPageButton.ForceFocusEventBroadcast();
}
else
maFinishButton.GrabFocus();
}
void AssistentDlgImpl::UpdatePage()
{
sal_uInt16 nPage = (sal_uInt16)maAssistentFunc.GetCurrentPage();
switch(nPage)
{
case 1:
{
// Elemente auf der ersten Seite abhaengig vom Starttype Zeigen
SetStartType( GetStartType() );
mpPage1TemplateRB->Enable(sal_True /*mbTemplatesReady*/);
break;
}
case 2:
{
mpPage2RegionLB->Enable(mbTemplatesReady);
mpPage2LayoutLB->Enable(mbTemplatesReady);
if( GetStartType() != ST_EMPTY )
{
mpPage2Medium5RB->Enable( sal_True );
}
else
{
mpPage2Medium5RB->Enable( sal_False );
if(mpPage2Medium5RB->IsChecked())
mpPage2Medium1RB->Check();
}
break;
}
case 5:
{
if(mbDocPreview || maPageListFile != maDocFile)
mpPage5PageListCT->Clear();
maUpdatePageListTimer.Start();
break;
}
case 3:
{
if(GetStartType() != ST_TEMPLATE)
maNextPageButton.Enable(false);
sal_Bool bKiosk = mpPage3PresTypeKioskRB->IsChecked();
mpPage3PresTimeFT->Enable(bKiosk);
mpPage3BreakFT->Enable(bKiosk);
mpPage3PresTimeTMF->Enable(bKiosk);
mpPage3BreakTMF->Enable(bKiosk);
mpPage3LogoCB->Enable(bKiosk);
break;
}
}
}
// ********************************************************************
// UI-Handler
// ********************************************************************
IMPL_LINK( AssistentDlgImpl, SelectRegionHdl, ListBox *, pLB )
{
if( pLB == mpPage1RegionLB )
{
SelectTemplateRegion( pLB->GetSelectEntry() );
SetStartType( ST_TEMPLATE );
mpPage2Medium5RB->Check();
}
else
{
SelectLayoutRegion( pLB->GetSelectEntry() );
}
return 0;
}
IMPL_LINK( AssistentDlgImpl, SelectEffectHdl, void*, EMPTYARG )
{
maEffectPrevTimer.Start();
return 0;
}
IMPL_LINK( AssistentDlgImpl, OpenButtonHdl, Button*, pButton )
{
// Clear the selection and forward the call.
mpPage1OpenLB->SetNoSelection();
return mpPage1OpenLB->GetDoubleClickHdl().Call(pButton);
}
IMPL_LINK( AssistentDlgImpl, EffectPreviewHdl, Button *, EMPTYARG )
{
if(mbPreview && xDocShell.Is() )
{
SfxObjectShell* pShell = xDocShell;
DrawDocShell* pDocShell = dynamic_cast< DrawDocShell * >(pShell);
if( pDocShell )
{
SdDrawDocument* pDoc = pDocShell->GetDoc();
if( pDoc )
{
SdPage* pPage = pDoc->GetSdPage( mnShowPage, PK_STANDARD );
if( pPage )
mpPage3EffectLB->applySelected(pPage);
}
}
maPreview.startPreview();
}
return 0;
}
IMPL_LINK( AssistentDlgImpl, PreviewFlagHdl, CheckBox *, EMPTYARG )
{
if( maPreviewFlag.IsChecked() != mbPreview )
{
mbPreview = maPreviewFlag.IsChecked();
UpdatePreview(sal_True);
}
return 0;
}
IMPL_LINK( AssistentDlgImpl, SelectTemplateHdl, ListBox *, EMPTYARG )
{
SetStartType( ST_TEMPLATE );
mpPage2Medium5RB->Check();
mpPage2LayoutLB->SelectEntryPos(0);
maPrevTimer.Start();
return 0;
}
IMPL_LINK( AssistentDlgImpl, SelectLayoutHdl, ListBox *, EMPTYARG )
{
maPrevTimer.Start();
return 0;
}
IMPL_LINK( AssistentDlgImpl, SelectFileHdl, ListBox *, EMPTYARG )
{
SetStartType( ST_OPEN );
maPrevTimer.Start();
return 0;
}
IMPL_LINK( AssistentDlgImpl, PageSelectHdl, Control *, EMPTYARG )
{
sal_uInt16 nPage = mpPage5PageListCT->GetSelectedPage();
if( mnShowPage != nPage )
{
mnShowPage = nPage;
UpdatePreview(sal_False);
}
return 0;
}
IMPL_LINK( AssistentDlgImpl, UpdatePageListHdl, void *, EMPTYARG )
{
UpdatePageList();
return 0;
}
IMPL_LINK( AssistentDlgImpl, UpdatePreviewHdl, void *, EMPTYARG )
{
UpdatePreview( sal_True );
return 0;
}
IMPL_LINK( AssistentDlgImpl, StartTypeHdl, RadioButton *, pButton )
{
StartType eType = pButton == mpPage1EmptyRB?ST_EMPTY:pButton == mpPage1TemplateRB?ST_TEMPLATE:ST_OPEN;
if(eType == ST_TEMPLATE)
ProvideTemplates();
else if(eType == ST_OPEN)
ScanDocmenu();
SetStartType( eType );
if(eType == ST_TEMPLATE)
{
mpPage1TemplateLB->SelectEntryPos(0);
mpPage2Medium5RB->Check();
}
else if(eType == ST_OPEN)
mpPage1OpenLB->SelectEntryPos(0);
maPrevTimer.Start();
return 0;
}
IMPL_LINK( AssistentDlgImpl, NextPageHdl, PushButton *, EMPTYARG )
{
// When changing from the first to the second page make sure that the
// templates are present.
if (maAssistentFunc.GetCurrentPage() == 1)
ProvideTemplates();
// Change to the next page.
LeavePage();
maAssistentFunc.NextPage();
ChangePage();
return 0;
}
IMPL_LINK( AssistentDlgImpl, LastPageHdl, PushButton *, EMPTYARG )
{
LeavePage();
maAssistentFunc.PreviousPage();
ChangePage();
return 0;
}
IMPL_LINK( AssistentDlgImpl, PresTypeHdl, RadioButton*, EMPTYARG )
{
if(maDocFile.Len() == 0)
{
maNextPageButton.Enable(false);
}
sal_Bool bKiosk = mpPage3PresTypeKioskRB->IsChecked();
mpPage3PresTimeFT->Enable(bKiosk);
mpPage3BreakFT->Enable(bKiosk);
mpPage3PresTimeTMF->Enable(bKiosk);
mpPage3BreakTMF->Enable(bKiosk);
mpPage3LogoCB->Enable(bKiosk);
return 0;
}
IMPL_LINK( AssistentDlgImpl, UpdateUserDataHdl, Edit*, EMPTYARG )
{
mbUserDataDirty = sal_True;
String aTopic = mpPage4AskTopicEDT->GetText();
String aName = mpPage4AskNameEDT->GetText();
String aInfo = mpPage4AskInfoEDT->GetText();
if(aTopic.Len() == 0 && aName.Len() == 0 && aInfo.Len() == 0)
maDocFile.Erase();
return 0;
}
// ********************************************************************
// ********************************************************************
void AssistentDlgImpl::SelectTemplateRegion( const String& rRegion )
{
mpPage1TemplateLB->Clear();
std::vector<TemplateDir*>::iterator I;
for (I=maPresentList.begin(); I!=maPresentList.end(); I++)
{
TemplateDir * pDir = *I;
mpTemplateRegion = *I;
if (pDir->msRegion.Equals( rRegion ) )
{
std::vector<TemplateEntry*>::iterator J;
for (J=pDir->maEntries.begin(); J!=pDir->maEntries.end(); J++)
mpPage1TemplateLB->InsertEntry ((*J)->msTitle);
mpPage1TemplateLB->Update();
if(GetStartType() == ST_TEMPLATE)
{
mpPage1TemplateLB->SelectEntryPos( 0 );
SelectTemplateHdl(NULL);
}
break;
}
}
}
void AssistentDlgImpl::SelectLayoutRegion( const String& rRegion )
{
mpPage2LayoutLB->Clear();
mpPage2LayoutLB->InsertEntry(String(SdResId(STR_WIZARD_ORIGINAL)));
std::vector<TemplateDir*>::iterator I;
for (I=maPresentList.begin(); I!=maPresentList.end(); I++)
{
TemplateDir * pDir = *I;
mpLayoutRegion = *I;
if (pDir->msRegion.Equals (rRegion))
{
std::vector<TemplateEntry*>::iterator J;
for (J=pDir->maEntries.begin(); J!=pDir->maEntries.end(); J++)
mpPage2LayoutLB->InsertEntry ((*J)->msTitle);
mpPage2LayoutLB->Update();
break;
}
}
}
void AssistentDlgImpl::UpdateUserData()
{
String aTopic = mpPage4AskTopicEDT->GetText();
String aName = mpPage4AskNameEDT->GetText();
String aInfo = mpPage4AskInfoEDT->GetText();
SfxObjectShell* pShell = xDocShell;
DrawDocShell* pDocShell = PTR_CAST(DrawDocShell,pShell);
SdDrawDocument* pDoc = pDocShell?pDocShell->GetDoc():NULL;
SdPage* pPage = pDoc?pDoc->GetSdPage(0, PK_STANDARD):NULL;
if(pPage && ( aTopic.Len() != 0 || aName.Len() != 0 || aInfo.Len() != 0 ) )
{
if( pPage->GetAutoLayout() == AUTOLAYOUT_NONE )
pPage->SetAutoLayout(AUTOLAYOUT_TITLE, sal_True);
SdrTextObj* pObj;
String aEmptyString;
if( aTopic.Len() )
{
pObj = dynamic_cast<SdrTextObj*>( pPage->GetPresObj( PRESOBJ_TITLE ) );
if( pObj )
{
pPage->SetObjText( pObj, NULL, PRESOBJ_TITLE, aTopic );
pObj->NbcSetStyleSheet( pPage->GetStyleSheetForPresObj( PRESOBJ_TITLE ), sal_True );
pObj->SetEmptyPresObj(sal_False);
}
}
if ( aName.Len() || aInfo.Len() )
{
String aStrTmp( aName );
if( aName.Len() )
aStrTmp.AppendAscii( RTL_CONSTASCII_STRINGPARAM( "\n\n" ) );
aStrTmp.Append( aInfo );
pObj = dynamic_cast<SdrTextObj*>( pPage->GetPresObj( PRESOBJ_OUTLINE ) );
if( pObj )
{
pPage->SetObjText( pObj, NULL, PRESOBJ_OUTLINE, aStrTmp );
pObj->NbcSetStyleSheet( pPage->GetStyleSheetForPresObj( PRESOBJ_OUTLINE ), sal_True );
pObj->SetEmptyPresObj(sal_False);
}
else
{
pObj = dynamic_cast<SdrTextObj*>( pPage->GetPresObj( PRESOBJ_TEXT ) );
if( pObj )
{
pPage->SetObjText( pObj, NULL, PRESOBJ_TEXT, aStrTmp );
pObj->NbcSetStyleSheet( pPage->GetStyleSheetForPresObj( PRESOBJ_TEXT ), sal_True );
pObj->SetEmptyPresObj(sal_False);
}
}
}
}
mbUserDataDirty = sal_False;
}
void AssistentDlgImpl::UpdatePageList()
{
if(mbDocPreview || !mbPreview)
UpdatePreview(sal_False);
else if(maPageListFile == maDocFile)
return;
maPageListFile = maDocFile;
SfxObjectShell* pShell = xDocShell;
DrawDocShell* pDocShell = PTR_CAST(DrawDocShell,pShell);
SdDrawDocument* pDoc = pDocShell?pDocShell->GetDoc():NULL;
mpPage5PageListCT->Clear();
if(pDoc)
mpPage5PageListCT->Fill(pDoc);
}
void AssistentDlgImpl::UpdatePreview( sal_Bool bDocPreview )
{
// Guard against multiple concurrent execution to this method caused either
// by calls from different threads or recursion.
::osl::MutexGuard aGuard (maMutex);
if (mbPreviewUpdating)
return;
mbPreviewUpdating = sal_True;
if(!mbPreview && bDocPreview)
{
maPreview.Invalidate();
maPreview.SetObjectShell(0);
mbPreviewUpdating = sal_False;
return;
}
String aDocFile = GetDocFileName();
String aLayoutFile = GetLayoutFileName();
String aEmptyStr;
SfxApplication *pSfxApp = SFX_APP();
sal_uLong lErr;
sal_Bool bChangeMaster = aLayoutFile.Len() != 0;
if( aDocFile.Len() == 0 )
{
if( !xDocShell.Is() || maDocFile.Len() != 0 ||
(maDocFile.Len() == 0 && maLayoutFile.Len() != 0 && aLayoutFile.Len() == 0 ))
{
CloseDocShell();
DrawDocShell* pNewDocSh;
xDocShell = pNewDocSh = new DrawDocShell(SFX_CREATE_MODE_STANDARD, sal_False);
pNewDocSh->DoInitNew(NULL);
SdDrawDocument* pDoc = pNewDocSh->GetDoc();
pDoc->CreateFirstPages();
pDoc->StopWorkStartupDelay();
mbDocPreview = sal_False;
maDocFile = aDocFile;
mbUserDataDirty = sal_True;
}
else
bChangeMaster = (aLayoutFile.Len() != 0) && (maLayoutFile != aLayoutFile);
}
else if( aDocFile == maDocFile && ( mbDocPreview == bDocPreview || bDocPreview ) )
{
if( aLayoutFile != maLayoutFile )
{
SfxObjectShell* pShell = xDocShell;
DrawDocShell* pDocShell = PTR_CAST(DrawDocShell,pShell);
::svl::IUndoManager* pUndoMgr = pDocShell?pDocShell->GetUndoManager():NULL;
if(pUndoMgr)
pUndoMgr->Undo();
mbUserDataDirty = sal_True;
}
else
bChangeMaster = sal_False;
}
else
{
CloseDocShell();
::Window *pParent = Application::GetDefDialogParent();
Application::SetDefDialogParent( mpWindow );
SfxErrorContext eEC(ERRCTX_SFX_LOADTEMPLATE,mpWindow);
SfxItemSet* pSet = new SfxAllItemSet( pSfxApp->GetPool() );
if(IsOwnFormat(aDocFile))
{
pSet->Put( SfxBoolItem( SID_TEMPLATE, sal_True ) );
if(bDocPreview)
pSet->Put( SfxBoolItem( SID_PREVIEW, sal_True ) );
RestorePassword( pSet, aDocFile );
if( (lErr = pSfxApp->LoadTemplate( xDocShell, aDocFile, sal_True, pSet )) != 0 )
ErrorHandler::HandleError(lErr);
else
SavePassword( xDocShell, aDocFile );
}
else
{
const String aTargetStr( RTL_CONSTASCII_USTRINGPARAM("_default") );
SfxRequest aReq( SID_OPENDOC, SFX_CALLMODE_SYNCHRON, SFX_APP()->GetPool() );
aReq.AppendItem( SfxStringItem( SID_FILE_NAME, aDocFile ));
aReq.AppendItem( SfxStringItem( SID_REFERER, aEmptyStr ) );
aReq.AppendItem( SfxStringItem( SID_TARGETNAME, aTargetStr ) );
aReq.AppendItem( SfxBoolItem( SID_HIDDEN, sal_True ) );
aReq.AppendItem( SfxBoolItem( SID_PREVIEW, bDocPreview ) );
const SfxViewFrameItem* pRet = PTR_CAST( SfxViewFrameItem, SFX_APP()->ExecuteSlot( aReq ) );
if ( pRet && pRet->GetFrame() && pRet->GetFrame()->GetObjectShell() )
xDocShell = pRet->GetFrame()->GetObjectShell();
}
Application::SetDefDialogParent( pParent );
mnShowPage = 0;
mbDocPreview = bDocPreview;
maDocFile = aDocFile;
mbUserDataDirty = sal_True;
}
if(bChangeMaster && (aLayoutFile != maDocFile))
{
// Layoutvorlage laden
SfxObjectShellLock xLayoutDocShell;
SfxErrorContext eEC(ERRCTX_SFX_LOADTEMPLATE,mpWindow);
SfxItemSet* pSet = new SfxAllItemSet( pSfxApp->GetPool() );
::Window *pParent = Application::GetDefDialogParent();
Application::SetDefDialogParent( mpWindow );
if(IsOwnFormat(aLayoutFile))
{
pSet->Put( SfxBoolItem( SID_TEMPLATE, sal_True ) );
pSet->Put( SfxBoolItem( SID_PREVIEW, sal_True ) );
RestorePassword( pSet, aLayoutFile );
if( (lErr = pSfxApp->LoadTemplate( xLayoutDocShell, aLayoutFile, sal_True, pSet )) != 0 )
ErrorHandler::HandleError(lErr);
SavePassword( xLayoutDocShell, aLayoutFile );
}
Application::SetDefDialogParent( pParent );
// die Implementierung ermitteln
SfxObjectShell* pShell = xDocShell;
DrawDocShell* pDocShell = PTR_CAST(DrawDocShell,pShell);
SdDrawDocument* pDoc = pDocShell?pDocShell->GetDoc():NULL;
pShell = xLayoutDocShell;
pDocShell = PTR_CAST(DrawDocShell,pShell);
SdDrawDocument* pLayoutDoc = pDocShell?pDocShell->GetDoc():NULL;
if( pDoc && pLayoutDoc )
{
pDoc->SetMasterPage(0, aEmptyStr, pLayoutDoc, sal_True, sal_False );
}
else
{
DBG_ERROR("sd::AssistentDlgImpl::UpdatePreview(), no document for preview?");
}
mbUserDataDirty = sal_True;
}
maLayoutFile = aLayoutFile;
if(mbUserDataDirty)
UpdateUserData();
if ( !xDocShell.Is() || !mbPreview )
maPreview.SetObjectShell( 0 );
else
{
maPreview.SetObjectShell( xDocShell, mnShowPage );
}
mbPreviewUpdating = sal_False;
}
void AssistentDlgImpl::SavePassword( SfxObjectShellLock xDoc, const String& rPath )
{
if(xDoc.Is())
{
SfxMedium * pMedium = xDoc->GetMedium();
if(pMedium && pMedium->IsStorage())
{
SfxItemSet * pSet = pMedium->GetItemSet();
const SfxPoolItem *pItem = 0;
if( pSet->GetItemState(SID_PASSWORD, sal_True, &pItem) == SFX_ITEM_SET )
{
//TODO/MBA: testing
String aPass( ((const SfxStringItem*)pItem)->GetValue());
if(aPass.Len() == 0)
return;
PasswordEntry* pEntry = maPasswordList.First();
while(pEntry)
{
if(pEntry->maPath == rPath)
break;
pEntry = maPasswordList.Next();
}
if(pEntry == NULL)
{
pEntry = new PasswordEntry();
pEntry->maPath = rPath;
maPasswordList.Insert( pEntry );
}
if(pEntry)
pEntry->maPassword = aPass;
}
}
}
}
void AssistentDlgImpl::RestorePassword( SfxItemSet* pSet, const String& rPath )
{
String aPassword( GetPassword( rPath ) );
if(aPassword.Len())
pSet->Put( SfxStringItem( SID_PASSWORD, aPassword ) );
}
String AssistentDlgImpl::GetPassword( const String rPath )
{
PasswordEntry* pEntry = maPasswordList.First();
while(pEntry)
{
if(pEntry->maPath == rPath)
return pEntry->maPassword;
pEntry = maPasswordList.Next();
}
return String();
}
void AssistentDlgImpl::DeletePassords()
{
PasswordEntry* pEntry = maPasswordList.First();
while(pEntry)
{
delete pEntry;
pEntry = maPasswordList.Next();
}
}
sal_Bool AssistentDlgImpl::IsOwnFormat( const String& rPath )
{
INetURLObject aURL( rPath );
String aExt( aURL.GetFileExtension() );
DBG_ASSERT( aURL.GetProtocol() != INET_PROT_NOT_VALID, "invalid URL" );
return !aExt.EqualsIgnoreCaseAscii( "ppt" );
}
String AssistentDlgImpl::GetUiTextForCommand (const ::rtl::OUString& sCommandURL)
{
String sLabel;
Reference<container::XNameAccess> xUICommandLabels;
try
{
do
{
if (sCommandURL.getLength() == 0)
break;
// Retrieve popup menu labels
Reference<lang::XMultiServiceFactory> xFactory (
::comphelper::getProcessServiceFactory ());
if ( ! xFactory.is())
break;
::rtl::OUString sModuleIdentifier (
RTL_CONSTASCII_USTRINGPARAM("com.sun.star.presentation.PresentationDocument"));
Reference<container::XNameAccess> xNameAccess (
xFactory->createInstance(::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM(
"com.sun.star.frame.UICommandDescription"))),
UNO_QUERY);
if ( ! xNameAccess.is())
break;
Any a = xNameAccess->getByName(sModuleIdentifier);
a >>= xUICommandLabels;
if ( ! xUICommandLabels.is())
break;
::rtl::OUString sString;
Sequence<beans::PropertyValue> aProperties;
Any aAny (xUICommandLabels->getByName(sCommandURL));
if (aAny >>= aProperties)
{
sal_Int32 nCount (aProperties.getLength());
for (sal_Int32 i=0; i<nCount; i++)
{
::rtl::OUString sPropertyName (aProperties[i].Name);
if (sPropertyName.equalsAscii("Label"))
{
aProperties[i].Value >>= sString;
break;
}
}
}
sLabel = sString;
}
while(false);
}
catch (com::sun::star::uno::Exception& rException)
{
(void)rException;
}
return sLabel;
}
Image AssistentDlgImpl::GetUiIconForCommand (const ::rtl::OUString& sCommandURL)
{
Image aIcon;
Reference<container::XNameAccess> xUICommandLabels;
try
{
do
{
if (sCommandURL.getLength() == 0)
break;
// Retrieve popup menu labels
Reference<lang::XMultiServiceFactory> xFactory (
::comphelper::getProcessServiceFactory ());
if ( ! xFactory.is())
break;
::rtl::OUString sModuleIdentifier (
RTL_CONSTASCII_USTRINGPARAM("com.sun.star.presentation.PresentationDocument"));
Reference<com::sun::star::ui::XModuleUIConfigurationManagerSupplier> xSupplier (
xFactory->createInstance(::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM(
"com.sun.star.ui.ModuleUIConfigurationManagerSupplier"))),
UNO_QUERY_THROW);
Reference<com::sun::star::ui::XUIConfigurationManager> xManager (
xSupplier->getUIConfigurationManager(sModuleIdentifier));
if ( ! xManager.is())
break;
Reference<com::sun::star::ui::XImageManager> xImageManager (
xManager->getImageManager(),
UNO_QUERY_THROW);
Sequence<rtl::OUString> aCommandList(1);
aCommandList[0] = sCommandURL;
Sequence<Reference<graphic::XGraphic> > xIconList (
xImageManager->getImages(0,aCommandList));
if ( ! xIconList.hasElements())
break;
aIcon = Graphic(xIconList[0]).GetBitmapEx();
}
while(false);
}
catch (com::sun::star::uno::Exception& rException)
{
(void)rException;
}
return aIcon;
}
//////////////////////////////////////////////
AssistentDlg::AssistentDlg(Window* pParent, sal_Bool bAutoPilot) :
ModalDialog(pParent,SdResId(DLG_ASS))
{
Link aFinishLink = LINK(this,AssistentDlg, FinishHdl);
mpImpl = new AssistentDlgImpl( this, aFinishLink, bAutoPilot );
//Buttonbelegung
mpImpl->maFinishButton.SetClickHdl(LINK(this,AssistentDlg,FinishHdl));
FreeResource();
}
IMPL_LINK( AssistentDlg, FinishHdl, OKButton *, EMPTYARG )
{
if( GetStartType() == ST_OPEN )
{
//if we do not have a file here asked for one before ending the dialog
String aFileToOpen = GetDocPath();
if(aFileToOpen.Len() == 0)
{
sfx2::FileDialogHelper aFileDlg( WB_OPEN, ::String::CreateFromAscii("simpress") );
if ( aFileDlg.Execute() == ERRCODE_NONE )
aFileToOpen = aFileDlg.GetPath();
if( aFileToOpen.Len() == 0)
return 1;
else
{
//add the selected file to the recent-file-listbox and select the new entry
//this is necessary for 'GetDocPath()' returning the selected file after end of dialog
INetURLObject aURL;
aURL.SetSmartURL(aFileToOpen);
mpImpl->maOpenFilesList.push_back (new String (aURL.GetMainURL( INetURLObject::NO_DECODE )));
sal_uInt16 nNewPos = mpImpl->mpPage1OpenLB->InsertEntry(aURL.getName());
mpImpl->mpPage1OpenLB->SelectEntryPos(nNewPos);
}
}
}
//Ende
mpImpl->EndDialog(RET_OK);
EndDialog(RET_OK);
return 0;
}
AssistentDlg::~AssistentDlg()
{
delete mpImpl;
}
SfxObjectShellLock AssistentDlg::GetDocument()
{
return mpImpl->GetDocument();
}
OutputType AssistentDlg::GetOutputMedium() const
{
if(mpImpl->mpPage2Medium1RB->IsChecked())
return OUTPUT_PRESENTATION;
else if(mpImpl->mpPage2Medium2RB->IsChecked())
return OUTPUT_SLIDE;
else if(mpImpl->mpPage2Medium3RB->IsChecked())
return OUTPUT_OVERHEAD;
else if(mpImpl->mpPage2Medium4RB->IsChecked())
return OUTPUT_PAGE;
else
return OUTPUT_ORIGINAL;
}
sal_Bool AssistentDlg::IsSummary() const
{
return mpImpl->mpPage5SummaryCB->IsChecked();
}
StartType AssistentDlg::GetStartType() const
{
return mpImpl->GetStartType();
}
String AssistentDlg::GetDocPath() const
{
return mpImpl->GetDocFileName();
}
sal_Bool AssistentDlg::GetStartWithFlag() const
{
return !mpImpl->maStartWithFlag.IsChecked();
}
sal_Bool AssistentDlg::IsDocEmpty() const
{
return mpImpl->GetDocFileName().Len() == 0 &&
mpImpl->GetLayoutFileName().Len() == 0;
}
String AssistentDlg::GetPassword()
{
return mpImpl->GetPassword( mpImpl->maDocFile );
}
//===== NextButton ============================================================
NextButton::NextButton (::Window* pParent, const ResId& rResId)
: maNextButton1(pParent, rResId),
maNextButton2(pParent, rResId),
mbIsFirstButtonActive(true)
{
// Hide the unused button.
maNextButton2.Hide();
}
void NextButton::ForceFocusEventBroadcast (void)
{
// Hide the currently visible button and show and focus the other one.
if (mbIsFirstButtonActive)
{
mbIsFirstButtonActive = false;
maNextButton2.Show();
maNextButton2.GrabFocus();
maNextButton1.Hide();
}
else
{
mbIsFirstButtonActive = true;
maNextButton1.Show();
maNextButton1.GrabFocus();
maNextButton2.Hide();
}
}
void NextButton::SetClickHdl (const Link& rLink)
{
// Forward the setting of the click handler to the two buttons
// regardless of which one is currently visible.
maNextButton1.SetClickHdl(rLink);
maNextButton2.SetClickHdl(rLink);
}
bool NextButton::IsEnabled (void)
{
// Because the buttons are both either enabled or disabled, it is
// sufficient to ask one to determine the state.
return maNextButton1.IsEnabled();
}
void NextButton::Enable (bool bEnable)
{
// Enable or disable both buttons but do not change visibility or focus.
maNextButton1.Enable(bEnable);
maNextButton2.Enable(bEnable);
}
| 24,741 |
443 | /*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.autofill.service.data;
import com.example.android.autofill.service.AutofillHints;
import com.example.android.autofill.service.model.AutofillDataset;
import com.example.android.autofill.service.model.DatasetWithFilledAutofillFields;
import com.example.android.autofill.service.model.FieldTypeWithHeuristics;
import com.example.android.autofill.service.model.FilledAutofillField;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.UUID;
public class FakeAutofillDataBuilder implements AutofillDataBuilder {
private final List<FieldTypeWithHeuristics> mFieldTypesWithHints;
private final String mPackageName;
private final int mSeed;
public FakeAutofillDataBuilder(List<FieldTypeWithHeuristics> fieldTypesWithHints,
String packageName, int seed) {
mFieldTypesWithHints = fieldTypesWithHints;
mSeed = seed;
mPackageName = packageName;
}
@Override
public List<DatasetWithFilledAutofillFields> buildDatasetsByPartition(int datasetNumber) {
ImmutableList.Builder<DatasetWithFilledAutofillFields> listBuilder =
new ImmutableList.Builder<>();
for (int partition : AutofillHints.PARTITIONS) {
AutofillDataset autofillDataset = new AutofillDataset(UUID.randomUUID().toString(),
"dataset-" + datasetNumber + "." + partition, mPackageName);
DatasetWithFilledAutofillFields datasetWithFilledAutofillFields =
buildCollectionForPartition(autofillDataset, partition);
if (datasetWithFilledAutofillFields != null &&
datasetWithFilledAutofillFields.filledAutofillFields != null &&
!datasetWithFilledAutofillFields.filledAutofillFields.isEmpty()) {
listBuilder.add(datasetWithFilledAutofillFields);
}
}
return listBuilder.build();
}
private DatasetWithFilledAutofillFields buildCollectionForPartition(
AutofillDataset dataset, int partition) {
DatasetWithFilledAutofillFields datasetWithFilledAutofillFields =
new DatasetWithFilledAutofillFields();
datasetWithFilledAutofillFields.autofillDataset = dataset;
for (FieldTypeWithHeuristics fieldTypeWithHeuristics : mFieldTypesWithHints) {
if (AutofillHints.matchesPartition(
fieldTypeWithHeuristics.getFieldType().getPartition(), partition)) {
FilledAutofillField fakeField =
AutofillHints.generateFakeField(fieldTypeWithHeuristics, mPackageName,
mSeed, dataset.getId());
datasetWithFilledAutofillFields.add(fakeField);
}
}
return datasetWithFilledAutofillFields;
}
}
| 1,355 |
805 | <reponame>gaoyouyadan/kaldi<filename>src/rnnlmbin/rnnlm-train.cc
// rnnlmbin/rnnlm-train.cc
// Copyright 2015-2017 Johns Hopkins University (author: <NAME>)
// See ../../COPYING for clarification regarding multiple authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include "base/kaldi-common.h"
#include "util/common-utils.h"
#include "rnnlm/rnnlm-training.h"
#include "rnnlm/rnnlm-example-utils.h"
#include "nnet3/nnet-utils.h"
int main(int argc, char *argv[]) {
try {
using namespace kaldi;
using namespace kaldi::rnnlm;
typedef kaldi::int32 int32;
typedef kaldi::int64 int64;
// rnnlm_rxfilename must be supplied, via --read-rnnlm option.
std::string rnnlm_rxfilename;
// For now, rnnlm_wxfilename must be supplied (later we could make it possible
// to train the embedding matrix without training the RNNLM itself, if there
// is a need).
std::string rnnlm_wxfilename;
// embedding_rxfilename must be supplied, via --read-embedding option.
std::string embedding_rxfilename;
std::string embedding_wxfilename;
std::string word_features_rxfilename;
// binary mode for writing output.
bool binary = true;
RnnlmCoreTrainerOptions core_config;
RnnlmEmbeddingTrainerOptions embedding_config;
RnnlmObjectiveOptions objective_config;
const char *usage =
"Train nnet3-based RNNLM language model (reads minibatches prepared\n"
"by rnnlm-get-egs). Supports various modes depending which parameters\n"
"we are training.\n"
"Usage:\n"
" rnnlm-train [options] <egs-rspecifier>\n"
"e.g.:\n"
" rnnlm-get-egs ... ark:- | \\\n"
" rnnlm-train --read-rnnlm=foo/0.raw --write-rnnlm=foo/1.raw --read-embedding=foo/0.embedding \\\n"
" --write-embedding=foo/1.embedding --read-sparse-word-features=foo/word_feats.txt ark:-\n"
"See also: rnnlm-get-egs\n";
std::string use_gpu = "yes";
ParseOptions po(usage);
po.Register("use-gpu", &use_gpu,
"yes|no|optional|wait, only has effect if compiled with CUDA");
po.Register("read-rnnlm", &rnnlm_rxfilename,
"Read RNNLM from this location (e.g. 0.raw). Must be supplied.");
po.Register("write-rnnlm", &rnnlm_wxfilename,
"Write RNNLM to this location (e.g. 1.raw)."
"If not supplied, the core RNNLM is not trained "
"(but other parts of the model might be.");
po.Register("read-embedding", &embedding_rxfilename,
"Location to read dense (feature or word) embedding matrix, "
"of dimension (num-words or num-features) by (embedding-dim).");
po.Register("write-embedding", &embedding_wxfilename,
"Location to write embedding matrix (c.f. --read-embedding). "
"If not provided, the embedding will not be trained.");
po.Register("read-sparse-word-features", &word_features_rxfilename,
"Location to read sparse word-feature matrix, e.g. "
"word_feats.txt. Format is lines like: '1 30 1.0 516 1.0':"
"starting with word-index, then a list of pairs "
"(feature-index, value) only including nonzero features. "
"This will usually be determined in an ad-hoc way based on "
"letters and other hand-built features; it's not trainable."
" If present, the embedding matrix read via --read-embedding "
"will be interpreted as a feature-embedding matrix.");
po.Register("binary", &binary,
"If true, write outputs in binary form.");
objective_config.Register(&po);
// register the core RNNLM training options options with the prefix "rnnlm",
// so they will appear as --rnnlm.max-change and the like. This is done
// with a prefix because later we may add a neural net to transform the word
// embedding, and it would have options that would have a name conflict with
// some of these options.
ParseOptions core_opts("rnnlm", &po);
core_config.Register(&core_opts);
// ... and register the embedding options with the prefix "embedding".
ParseOptions embedding_opts("embedding", &po);
embedding_config.Register(&embedding_opts);
po.Read(argc, argv);
if (po.NumArgs() != 1) {
po.PrintUsage();
exit(1);
}
if (rnnlm_rxfilename == "" ||
rnnlm_wxfilename == "" ||
embedding_rxfilename == "") {
KALDI_WARN << "--read-rnnlm, --write-rnnlm and --read-embedding "
"options are required.";
po.PrintUsage();
exit(1);
}
std::string examples_rspecifier = po.GetArg(1);
#if HAVE_CUDA==1
CuDevice::Instantiate().SelectGpuId(use_gpu);
CuDevice::Instantiate().AllowMultithreading();
#endif
kaldi::nnet3::Nnet rnnlm;
ReadKaldiObject(rnnlm_rxfilename, &rnnlm);
if (!IsSimpleNnet(rnnlm))
KALDI_ERR << "Input RNNLM in " << rnnlm_rxfilename
<< " is not the type of neural net we were looking for; "
"failed IsSimpleNnet().";
CuMatrix<BaseFloat> embedding_mat;
ReadKaldiObject(embedding_rxfilename, &embedding_mat);
CuSparseMatrix<BaseFloat> word_feature_mat;
if (word_features_rxfilename != "") {
// binary mode is not supported here; it's a text format.
Input input(word_features_rxfilename);
int32 feature_dim = embedding_mat.NumRows();
SparseMatrix<BaseFloat> cpu_word_feature_mat;
ReadSparseWordFeatures(input.Stream(), feature_dim,
&cpu_word_feature_mat);
word_feature_mat.Swap(&cpu_word_feature_mat); // copy to GPU, if we have
// one.
}
{
bool train_embedding = (embedding_wxfilename != "");
RnnlmTrainer trainer(
train_embedding, core_config, embedding_config, objective_config,
(word_features_rxfilename != "" ? &word_feature_mat : NULL),
&embedding_mat, &rnnlm);
SequentialRnnlmExampleReader example_reader(examples_rspecifier);
for (; !example_reader.Done(); example_reader.Next())
trainer.Train(&(example_reader.Value()));
if (trainer.NumMinibatchesProcessed() == 0)
KALDI_ERR << "There was no data to train on.";
// The destructor of 'trainer' trains on the last minibatch
// and writes out anything we need to write out.
}
WriteKaldiObject(rnnlm, rnnlm_wxfilename, binary);
KALDI_LOG << "Wrote RNNLM to "
<< PrintableWxfilename(rnnlm_wxfilename);
if (embedding_wxfilename != "") {
WriteKaldiObject(embedding_mat, embedding_wxfilename, binary);
KALDI_LOG << "Wrote embedding matrix to "
<< PrintableWxfilename(embedding_wxfilename);
}
#if HAVE_CUDA==1
CuDevice::Instantiate().PrintProfile();
#endif
return 0;
} catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
}
| 3,109 |
495 | <filename>tiny_benchmark/maskrcnn_benchmark/modeling/rpn/cascade_fcos/inference.py
import torch
from ..inference import RPNPostProcessor
from ..utils import permute_and_flatten
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
import torch.nn.functional as F
import numpy as np
class FCOSPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RetinaNet boxes.
This is only used in the testing.
"""
def __init__(
self,
pre_nms_thresh,
pre_nms_top_n,
nms_thresh,
fpn_post_nms_top_n,
min_size,
num_classes,
vis_labels
):
"""
Arguments:
pre_nms_thresh (float)
pre_nms_top_n (int)
nms_thresh (float)
fpn_post_nms_top_n (int)
min_size (int)
num_classes (int)
box_coder (BoxCoder)
"""
super(FCOSPostProcessor, self).__init__()
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_top_n = pre_nms_top_n
self.nms_thresh = nms_thresh
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.min_size = min_size
self.num_classes = num_classes
self.vis_labels = vis_labels
def forward_for_single_feature_map(
self, locations, box_cls_set,
box_regression, centerness,
image_sizes, show_box_cls):
"""
Arguments:
anchors: list[BoxList]
box_cls: tensor of size N, A * C, H, W
box_regression: tensor of size N, A * 4, H, W
"""
box_prob_set = []
for _box_cls in np.array(list(box_cls_set.values()))[[2]]: #
N, C, H, W = _box_cls.shape
_box_cls = _box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
box_prob_set.append(_box_cls.reshape(N, -1, C).sigmoid())
box_cls = torch.exp(torch.log(torch.stack(box_prob_set)).mean(dim=0))
# max_score = box_prob_set[-1].max()
# box_prob_set[:-1] = [box_prob / box_prob.max() * max_score for box_prob in box_prob_set[:-1]]
# box_cls = torch.stack(box_prob_set).max(dim=0)[0]
centerness = None
# put in the same format as locations
box_regression = box_regression.view(N, 4, H, W).permute(0, 2, 3, 1)
box_regression = box_regression.reshape(N, -1, 4)
if centerness is not None:
centerness = centerness.view(N, 1, H, W).permute(0, 2, 3, 1)
centerness = centerness.reshape(N, -1).sigmoid()
if self.vis_labels:
# box_prob_set.extend([box_cls, centerness, centerness[:,:,None]*box_prob_set[-1]])
show_box_cls(box_prob_set, N, H, W, C, self.pre_nms_thresh)
candidate_inds = box_cls > self.pre_nms_thresh
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
# multiply the classification scores with centerness scores
if centerness is not None:
box_cls = (box_cls * centerness[:, :, None])
results = []
for i in range(N):
per_box_cls = box_cls[i]
per_candidate_inds = candidate_inds[i]
per_box_cls = per_box_cls[per_candidate_inds]
per_candidate_nonzeros = per_candidate_inds.nonzero()
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1] + 1
per_box_regression = box_regression[i]
per_box_regression = per_box_regression[per_box_loc]
per_locations = locations[per_box_loc]
per_pre_nms_top_n = pre_nms_top_n[i]
if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_class = per_class[top_k_indices]
per_box_regression = per_box_regression[top_k_indices]
per_locations = per_locations[top_k_indices]
detections = torch.stack([
per_locations[:, 0] - per_box_regression[:, 0],
per_locations[:, 1] - per_box_regression[:, 1],
per_locations[:, 0] + per_box_regression[:, 2],
per_locations[:, 1] + per_box_regression[:, 3],
], dim=1)
h, w = image_sizes[i]
boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
boxlist.add_field("labels", per_class)
boxlist.add_field("scores", per_box_cls)
boxlist.add_field("det_locations", per_locations)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
results.append(boxlist)
return results
def forward(self, locations, box_cls_set, box_regression, centerness, image_sizes, images=None, targets=None):
"""
Arguments:
anchors: list[list[BoxList]]
box_cls: list[tensor]
box_regression: list[tensor]
image_sizes: list[(h, w)]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
cascade_num = len(box_cls_set)
for box_cls in box_cls_set.values():
fpn_level_num = len(box_cls)
break
box_cls_set_level_first = [{} for _ in range(fpn_level_num)]
for name, box_cls in box_cls_set.items():
for lvl, cls in enumerate(box_cls):
box_cls_set_level_first[lvl][name] = cls
box_cls_set = box_cls_set_level_first
if centerness is None:
centerness = [None] * fpn_level_num
show_box_cls = BoxClsShower(fpn_level_num, images=images, targets=targets) if self.vis_labels else None
sampled_boxes = []
for _, (l, o, b, c) in enumerate(zip(locations, box_cls_set, box_regression, centerness)):
sampled_boxes.append(
self.forward_for_single_feature_map(
l, o, b, c, image_sizes, show_box_cls
)
)
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
boxlists = self.select_over_all_levels(boxlists)
return boxlists
# TODO very similar to filter_results from PostProcessor
# but filter_results is per image
# TODO Yang: solve this issue in the future. No good solution
# right now.
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
results = []
for i in range(num_images):
scores = boxlists[i].get_field("scores")
labels = boxlists[i].get_field("labels")
locations = boxlists[i].get_field("det_locations") # add here
boxes = boxlists[i].bbox
boxlist = boxlists[i]
result = []
# skip the background
for j in range(1, self.num_classes):
inds = (labels == j).nonzero().view(-1)
scores_j = scores[inds]
boxes_j = boxes[inds, :].view(-1, 4)
locations_j = locations[inds]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class.add_field("det_locations", locations_j) # add here
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms_thresh,
score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j,
dtype=torch.int64,
device=scores.device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(),
number_of_detections - self.fpn_post_nms_top_n + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
return results
def make_fcos_postprocessor(config):
pre_nms_thresh = config.MODEL.FCOS.INFERENCE_TH
pre_nms_top_n = config.MODEL.FCOS.PRE_NMS_TOP_N
nms_thresh = config.MODEL.FCOS.NMS_TH
fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG
box_selector = FCOSPostProcessor(
pre_nms_thresh=pre_nms_thresh,
pre_nms_top_n=pre_nms_top_n,
nms_thresh=nms_thresh,
fpn_post_nms_top_n=fpn_post_nms_top_n,
min_size=0,
num_classes=config.MODEL.FCOS.NUM_CLASSES,
vis_labels=config.MODEL.FCOS.DEBUG.VIS_LABELS
)
return box_selector
class BoxClsShower(object):
"""
map0-4: [0.125, 0.25, 0.5, 0.75, 1.0] ** 2 area range
map5: centerness
"""
def __init__(self, fpn_level=5, scatter_topk=10, EPS=1e-8, images=None, targets=None):
self.fpn_level = fpn_level
self.level_count = 0
self.box_probs = []
self.scatter_topk = scatter_topk
self.EPS = EPS
self.row_sub_fig = 4
self.single_fig_size = 4
self.titles = None
self.images = images
self.targets = targets
def find_local_max(self, box_prob):
B, C, H, W = box_prob.shape
max_prob, idx = F.max_pool2d_with_indices(box_prob, 3, 1, 1, return_indices=True)
max_prob = max_prob[0, 0]
box_prob = box_prob[0, 0]
is_local_max = torch.nonzero(box_prob == max_prob)
y, x = is_local_max[:, 0], is_local_max[:, 1]
idx = torch.argsort(-box_prob[y, x])
k = self.scatter_topk
y = y[idx[:k]]
x = x[idx[:k]]
return y.cpu().numpy(), x.cpu().numpy(), box_prob[y, x]
def mask_to_image(self, box_prob, upsample=False):
img = self.images.tensors[0:1]
if upsample:
box_prob = F.upsample(box_prob[None, None, :, :], img.shape[2:], mode='bilinear')[0, 0]
else:
img = F.upsample(img, box_prob.shape, mode='bilinear')
img = img[0].permute((1, 2, 0)).cpu() + torch.Tensor([102.9801, 115.9465, 122.7717])
return img * box_prob[:, :, None] / 255
def __call__(self, box_prob_set, N, H, W, C, th):
import matplotlib.pyplot as plt
box_probs = []
for i, box_prob in enumerate(box_prob_set):
if box_prob.numel() == N*H*W*C:
box_prob = box_prob.reshape(-1, H, W, C)[:1]
elif box_prob.numel() == N*H*W:
box_prob = box_prob.reshape(-1, H, W, 1)[:1]
box_prob = box_prob.max(dim=-1, keepdim=True)[0].permute((0, 3, 1, 2))
box_probs.append(box_prob.cpu())
# merge FPN multi-level score map to one map by resize add.
if len(self.box_probs) == 0:
self.box_probs = box_probs
else:
for i, p in enumerate(box_probs): # for each area th score map
box_prob = self.box_probs[i]
if box_prob.numel() < p.numel():
box_prob = F.upsample(box_prob, p.shape[2:], mode='bilinear')
else:
p = F.upsample(p, box_prob.shape[2:], mode='bilinear')
self.box_probs[i] = torch.max(torch.stack([p, box_prob]), dim=0)[0]
self.level_count += 1
if self.level_count == self.fpn_level:
# show each area th score map
n_figs = len(self.box_probs)
r = self.row_sub_fig if n_figs >= self.row_sub_fig else n_figs
c = int(np.ceil((n_figs/r)))
plt.figure(figsize=(r * self.single_fig_size, c * self.single_fig_size)) # (W, H)
for i, box_prob in enumerate(self.box_probs):
y, x, score = self.find_local_max(box_prob)
box_prob = box_prob[0, 0]
max_p = box_prob.max()
std = box_prob.std()
box_prob /= max_p
if self.images is not None:
box_prob = self.mask_to_image(box_prob)
box_prob = box_prob.numpy()
plt.subplot(c, r, i+1)
plt.imshow(box_prob)
plt.scatter(x, y, color='r', s=20 * score)
if self.titles is not None:
title = self.titles[i]
else:
title = 'map {}'.format(i)
plt.title("{}, max:{:.2f}, std: {:.2f}".format(title, float(max_p), float(std)))
plt.show()
self.level_count = 0
del self.box_probs
self.box_probs = []
| 7,181 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package complex.storages;
import java.net.URI;
import java.io.File;
import java.io.FileInputStream;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipEntry;
import com.sun.star.uno.XInterface;
import com.sun.star.lang.XMultiServiceFactory;
import com.sun.star.lang.XSingleServiceFactory;
import com.sun.star.bridge.XUnoUrlResolver;
import com.sun.star.uno.UnoRuntime;
import com.sun.star.uno.XInterface;
import com.sun.star.io.XStream;
import com.sun.star.io.XInputStream;
import com.sun.star.embed.*;
import share.LogWriter;
import complex.storages.TestHelper;
import complex.storages.StorageTest;
public class RegressionTest_i61909 implements StorageTest {
XMultiServiceFactory m_xMSF;
XSingleServiceFactory m_xStorageFactory;
TestHelper m_aTestHelper;
public RegressionTest_i61909( XMultiServiceFactory xMSF, XSingleServiceFactory xStorageFactory, LogWriter aLogWriter )
{
m_xMSF = xMSF;
m_xStorageFactory = xStorageFactory;
m_aTestHelper = new TestHelper( aLogWriter, "RegressionTest_i61909: " );
}
public boolean test()
{
try
{
String sTempFileURL = m_aTestHelper.CreateTempFile( m_xMSF );
if ( sTempFileURL == null || sTempFileURL == "" )
{
m_aTestHelper.Error( "No valid temporary file was created!" );
return false;
}
// create storage based on the temporary stream
Object pArgs[] = new Object[2];
pArgs[0] = (Object) sTempFileURL;
pArgs[1] = new Integer( ElementModes.WRITE );
Object oTempStorage = m_xStorageFactory.createInstanceWithArguments( pArgs );
XStorage xTempStorage = (XStorage) UnoRuntime.queryInterface( XStorage.class, oTempStorage );
if ( xTempStorage == null )
{
m_aTestHelper.Error( "Can't create temporary storage representation!" );
return false;
}
byte pBytes[] = new byte[36000];
for ( int nInd = 0; nInd < 36000; nInd++ )
pBytes[nInd] = (byte)( nInd % 128 );
// open a new substream, set "MediaType" and "Compressed" properties to it and write some bytes
if ( !m_aTestHelper.WriteBytesToSubstream( xTempStorage, "SubStream1", "MediaType1", true, pBytes ) )
return false;
// open a new substorage
XStorage xTempSubStorage = m_aTestHelper.openSubStorage( xTempStorage,
"SubStorage1",
ElementModes.WRITE );
if ( xTempSubStorage == null )
{
m_aTestHelper.Error( "Can't create substorage!" );
return false;
}
// open a new substream, set "MediaType" and "Compressed" properties to it and write some bytes
if ( !m_aTestHelper.WriteBytesToSubstream( xTempSubStorage, "SubStream2", "MediaType2", true, pBytes ) )
return false;
// set "MediaType" property for storages and check that "IsRoot" and "OpenMode" properties are set correctly
if ( !m_aTestHelper.setStorageTypeAndCheckProps( xTempStorage,
"MediaType3",
true,
ElementModes.WRITE ) )
return false;
// set "MediaType" property for storages and check that "IsRoot" and "OpenMode" properties are set correctly
if ( !m_aTestHelper.setStorageTypeAndCheckProps( xTempSubStorage,
"MediaType4",
false,
ElementModes.WRITE ) )
return false;
// commit substorage first
if ( !m_aTestHelper.commitStorage( xTempSubStorage ) )
return false;
// commit the root storage so the contents must be stored now
if ( !m_aTestHelper.commitStorage( xTempStorage ) )
return false;
// dispose used storage to free resources
if ( !m_aTestHelper.disposeStorage( xTempStorage ) )
return false;
// ================================================
// now reopen the storage, and insert a new stream
// ================================================
Object oStep2TempStorage = m_xStorageFactory.createInstanceWithArguments( pArgs );
XStorage xStep2TempStorage = (XStorage) UnoRuntime.queryInterface( XStorage.class, oStep2TempStorage );
if ( xStep2TempStorage == null )
{
m_aTestHelper.Error( "Can't create temporary storage representation!" );
return false;
}
// open a new substream, set "MediaType" and "Compressed" properties to it and write some bytes
if ( !m_aTestHelper.WriteBytesToSubstream( xStep2TempStorage, "SubStream3", "MediaType5", true, pBytes ) )
return false;
// commit the root storage so the contents must be stored now
if ( !m_aTestHelper.commitStorage( xStep2TempStorage ) )
return false;
// dispose used storage to free resources
if ( !m_aTestHelper.disposeStorage( xStep2TempStorage ) )
return false;
// ================================================
// now access the stream using ZipInputStream
// ================================================
URI aUri = new URI( sTempFileURL );
File aFile = new File( aUri );
FileInputStream aFileStream = new FileInputStream( aFile );
ZipInputStream aZipStream = new ZipInputStream( aFileStream );
ZipEntry aEntry;
int nNumber = 0;
m_aTestHelper.Message( "Available entries:" );
while ( ( aEntry = aZipStream.getNextEntry() ) != null )
{
nNumber++;
m_aTestHelper.Message( aEntry.getName() );
}
if ( nNumber != 6 )
{
m_aTestHelper.Error( "Wrong number of entries: " + nNumber + ", Expected: 6" );
return false;
}
return true;
}
catch( Exception e )
{
m_aTestHelper.Error( "Exception: " + e );
return false;
}
}
}
| 2,214 |
465 | <gh_stars>100-1000
/*
* Copyright (c) 2018 Uber Technologies, Inc.
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
package com.uber.marmaray.common.configuration;
import com.uber.marmaray.utilities.ConfigUtil;
import org.hibernate.validator.constraints.NotEmpty;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
/**
* {@link LockManagerConfiguration} defines configurations taking locks on jobs via ZooKeeper
*
* All properties start with {@link #LOCK_MANAGER_PREFIX}.
*/
@Slf4j
public class LockManagerConfiguration {
public static final String LOCK_MANAGER_PREFIX = Configuration.MARMARAY_PREFIX + "lock_manager.";
public static final String IS_ENABLED = LOCK_MANAGER_PREFIX + "is_enabled";
public static final boolean DEFAULT_IS_ENABLED = true;
public static final String ZK_BASE_PATH = LOCK_MANAGER_PREFIX + "zk_base_path";
public static final String ZK_SESSION_TIMEOUT_MS = LOCK_MANAGER_PREFIX + "zk_session_timeout_ms";
public static final int DEFAULT_ZK_SESSION_TIMEOUT_MS = 60 * 1000;
public static final String ZK_CONNECTION_TIMEOUT_MS = LOCK_MANAGER_PREFIX + "zk_connection_timeout_ms";
public static final int DEFAULT_ZK_CONNECTION_TIMEOUT_MS = 15 * 1000;
public static final String ACQUIRE_LOCK_TIME_MS = LOCK_MANAGER_PREFIX + "acquire_lock_time_ms";
public static final int DEFAULT_ACQUIRE_LOCK_TIME_MS = 60 * 1000;
@Getter
private final Configuration conf;
@Getter
private final boolean isEnabled;
@Getter
private final String zkBasePath;
@Getter
private final int zkSessionTimeoutMs;
@Getter
private final int zkConnectionTimeoutMs;
@Getter
private final int acquireLockTimeMs;
public LockManagerConfiguration(@NonNull final Configuration conf) {
this.conf = conf;
this.isEnabled = this.getConf().getBooleanProperty(IS_ENABLED, DEFAULT_IS_ENABLED);
if (this.isEnabled()) {
ConfigUtil.checkMandatoryProperties(conf, getMandatoryProperties());
this.zkBasePath = cleanZkBasePath(this.getConf().getProperty(ZK_BASE_PATH).get());
} else {
this.zkBasePath = null;
}
this.zkSessionTimeoutMs = this.getConf().getIntProperty(ZK_SESSION_TIMEOUT_MS, DEFAULT_ZK_SESSION_TIMEOUT_MS);
this.zkConnectionTimeoutMs = this.getConf().getIntProperty(ZK_CONNECTION_TIMEOUT_MS,
DEFAULT_ZK_CONNECTION_TIMEOUT_MS);
this.acquireLockTimeMs = this.getConf().getIntProperty(ACQUIRE_LOCK_TIME_MS, DEFAULT_ACQUIRE_LOCK_TIME_MS);
}
private String cleanZkBasePath(@NotEmpty final String orig) {
final String cleaned = orig.replaceAll("//*", "/").replaceAll("^/", "").replaceAll("/$", "");
return cleaned;
}
private static List<String> getMandatoryProperties() {
return Collections.unmodifiableList(Arrays.asList(ZK_BASE_PATH));
}
}
| 1,387 |
5,823 | // Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <gtest/gtest.h>
#include <string>
#include "flutter/fml/task_runner.h"
#include "flutter/shell/common/thread_host.h"
#include "fml/make_copyable.h"
#include "fml/message_loop.h"
#include "fml/synchronization/waitable_event.h"
#include "fml/time/time_delta.h"
#include "fml/time/time_point.h"
#include "vsync_waiter.h"
namespace flutter_runner {
TEST(VSyncWaiterFuchsia, FrameScheduledForStartTime) {
using flutter::ThreadHost;
std::string prefix = "vsync_waiter_test";
fml::MessageLoop::EnsureInitializedForCurrentThread();
auto platform_task_runner = fml::MessageLoop::GetCurrent().GetTaskRunner();
ThreadHost thread_host =
ThreadHost(prefix, flutter::ThreadHost::Type::RASTER |
flutter::ThreadHost::Type::UI |
flutter::ThreadHost::Type::IO);
const flutter::TaskRunners task_runners(
prefix, // Dart thread labels
platform_task_runner, // platform
thread_host.raster_thread->GetTaskRunner(), // raster
thread_host.ui_thread->GetTaskRunner(), // ui
thread_host.io_thread->GetTaskRunner() // io
);
// await vsync will invoke the callback right away, but vsync waiter will post
// the task for frame_start time.
VsyncWaiter vsync_waiter(
[](FireCallbackCallback callback) {
const auto now = fml::TimePoint::Now();
const auto frame_start = now + fml::TimeDelta::FromMilliseconds(20);
const auto frame_end = now + fml::TimeDelta::FromMilliseconds(36);
callback(frame_start, frame_end);
},
/*secondary callback*/ nullptr, task_runners);
fml::AutoResetWaitableEvent latch;
task_runners.GetUITaskRunner()->PostTask([&]() {
vsync_waiter.AsyncWaitForVsync(
[&](std::unique_ptr<flutter::FrameTimingsRecorder> recorder) {
const auto now = fml::TimePoint::Now();
EXPECT_GT(now, recorder->GetVsyncStartTime());
latch.Signal();
});
});
latch.Wait();
}
} // namespace flutter_runner
| 901 |
2,151 | //===- MSFStreamLayout.h - Describes the layout of a stream -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
#define LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
#include "llvm/Support/Endian.h"
#include <cstdint>
#include <vector>
namespace llvm {
namespace msf {
/// \brief Describes the layout of a stream in an MSF layout. A "stream" here
/// is defined as any logical unit of data which may be arranged inside the MSF
/// file as a sequence of (possibly discontiguous) blocks. When we want to read
/// from a particular MSF Stream, we fill out a stream layout structure and the
/// reader uses it to determine which blocks in the underlying MSF file contain
/// the data, so that it can be pieced together in the right order.
class MSFStreamLayout {
public:
uint32_t Length;
std::vector<support::ulittle32_t> Blocks;
};
} // namespace msf
} // namespace llvm
#endif // LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
| 369 |
879 | package org.zstack.test.compute.zone;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.core.db.SimpleQuery;
import org.zstack.header.message.APIDeleteMessage;
import org.zstack.header.zone.APIDeleteZoneEvent;
import org.zstack.header.zone.APIDeleteZoneMsg;
import org.zstack.header.zone.ZoneInventory;
import org.zstack.header.zone.ZoneVO;
import org.zstack.test.*;
import java.util.List;
public class TestZoneDeleteExtensionPoint {
Api api;
ComponentLoader loader;
DatabaseFacade dbf;
PreventZoneDeleteExtensionPoint ext;
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
BeanConstructor con = new WebBeanConstructor();
/* This loads spring application context */
loader = con.addXml("PortalForUnitTest.xml").addXml("ZoneManager.xml").addXml("ZoneUnitTest.xml").addXml("AccountManager.xml").build();
dbf = loader.getComponent(DatabaseFacade.class);
ext = loader.getComponent(PreventZoneDeleteExtensionPoint.class);
api = new Api();
api.startServer();
}
@Test
public void test() throws ApiSenderException {
try {
List<ZoneInventory> zones = api.createZones(1);
ZoneInventory zone = zones.get(0);
APIDeleteZoneMsg msg = new APIDeleteZoneMsg(zone.getUuid());
msg.setSession(api.getAdminSession());
api.getApiSender().send(msg, APIDeleteZoneEvent.class, false);
SimpleQuery<ZoneVO> query = dbf.createQuery(ZoneVO.class);
long count = query.count();
Assert.assertEquals(1, count);
msg.setDeletionMode(APIDeleteMessage.DeletionMode.Enforcing);
api.getApiSender().send(msg, APIDeleteZoneEvent.class);
count = query.count();
Assert.assertEquals(0, count);
Assert.assertTrue(ext.isAfterCalled());
Assert.assertTrue(ext.isBeforeCalled());
} finally {
api.stopServer();
}
}
}
| 889 |
531 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.karaf.shell.ssh.keygenerator;
import static java.util.Collections.singleton;
import java.io.IOException;
import java.io.InputStream;
import java.io.InvalidClassException;
import java.io.ObjectInputStream;
import java.io.ObjectStreamClass;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.attribute.PosixFilePermission;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.KeyPair;
import java.security.NoSuchAlgorithmException;
import java.security.PrivateKey;
import java.security.PublicKey;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.PKCS8EncodedKeySpec;
import java.security.spec.X509EncodedKeySpec;
import java.util.Base64;
import java.util.HashSet;
import java.util.Set;
import org.apache.sshd.common.keyprovider.AbstractKeyPairProvider;
import org.apache.sshd.common.session.SessionContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OpenSSHKeyPairProvider extends AbstractKeyPairProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenSSHKeyPairProvider.class);
private Path privateKeyPath;
private Path publicKeyPath;
private String password;
private KeyPair cachedKey;
private String algorithm;
private int keySize;
public OpenSSHKeyPairProvider(Path privateKeyPath, Path publicKeyPath, String algorithm, int keySize, String password) {
this.privateKeyPath = privateKeyPath;
this.publicKeyPath = publicKeyPath;
this.algorithm = algorithm;
this.keySize = keySize;
this.password = password;
}
@Override
public synchronized Iterable<KeyPair> loadKeys(SessionContext sessionContext) throws IOException, GeneralSecurityException {
if (cachedKey != null) {
return singleton(cachedKey);
}
if (!privateKeyPath.toFile().exists()) {
createServerKey();
}
// 1. Try to read the PKCS8 private key. If it is RSA or DSA we can infer the public key directly from the
// private key, so there is no need to load the public key.
try (InputStream is = Files.newInputStream(privateKeyPath)) {
KeyPair kp = KeyPairLoader.getKeyPair(is, password);
cachedKey = kp;
return singleton(kp);
} catch (Exception e) {
// 2. Failed to parse PKCS8 private key. Try to parse it directly and use the public key to create a KeyPair
// This is what will happen if it is an elliptic curve key for example
LOGGER.warn("Failed to parse keypair in {}. Attempting to parse it 'directly'", privateKeyPath);
try {
KeyPair kp = getKeyPairUsingPublicKeyFile();
LOGGER.info("Successfully loaded key pair");
cachedKey = kp;
return singleton(cachedKey);
} catch (IOException | NoSuchAlgorithmException | InvalidKeySpecException | IllegalArgumentException e1) {
// 3. On a failure, see if we are dealing with a "legacy" keypair.
LOGGER.warn("Failed to parse keypair in {}. Attempting to parse it as a legacy 'simple' key", privateKeyPath);
try {
KeyPair kp = convertLegacyKey(privateKeyPath);
LOGGER.info("Successfully loaded legacy simple key. Converted to PEM format");
cachedKey = kp;
return singleton(kp);
} catch (Exception nested) {
LOGGER.warn(privateKeyPath + " is not a 'simple' key either", nested);
}
}
throw new RuntimeException(e);
}
}
private KeyPair convertLegacyKey(Path privateKeyPath) throws GeneralSecurityException, IOException {
KeyPair keypair = null;
try (ObjectInputStream r = new KeyPairObjectInputStream(Files.newInputStream(privateKeyPath))) {
keypair = (KeyPair)r.readObject();
}
catch (ClassNotFoundException e) {
throw new InvalidKeySpecException("Missing classes: " + e.getMessage(), e);
}
new PemWriter(privateKeyPath, publicKeyPath).writeKeyPair(algorithm, keypair);
return keypair;
}
private KeyPair getKeyPairUsingPublicKeyFile() throws IOException, NoSuchAlgorithmException, InvalidKeySpecException {
KeyFactory keyFactory = KeyFactory.getInstance(algorithm);
// Read private key
String content = new String(Files.readAllBytes(privateKeyPath), StandardCharsets.UTF_8);
content = content.replace("-----BEGIN PRIVATE KEY-----", "");
content = content.replace("-----END PRIVATE KEY-----", "");
PKCS8EncodedKeySpec encodedKeySpec = new PKCS8EncodedKeySpec(Base64.getMimeDecoder().decode(content));
PrivateKey privateKey = keyFactory.generatePrivate(encodedKeySpec);
// Read public key
content = new String(Files.readAllBytes(publicKeyPath), StandardCharsets.UTF_8);
content = content.replace("-----BEGIN PUBLIC KEY-----", "");
content = content.replace("-----END PUBLIC KEY-----", "");
X509EncodedKeySpec encodedX509KeySpec = new X509EncodedKeySpec(Base64.getMimeDecoder().decode(content));
PublicKey publicKey = keyFactory.generatePublic(encodedX509KeySpec);
return new KeyPair(publicKey, privateKey);
}
private KeyPair createServerKey() {
try {
LOGGER.info("Creating ssh server private key at " + privateKeyPath);
KeyPair kp = new OpenSSHKeyPairGenerator(algorithm, keySize).generate();
new PemWriter(privateKeyPath, publicKeyPath).writeKeyPair(algorithm, kp);
LOGGER.debug("Changing key files permissions");
Set<PosixFilePermission> permissions = new HashSet<>();
permissions.add(PosixFilePermission.OWNER_READ);
permissions.add(PosixFilePermission.OWNER_WRITE);
try {
Files.setPosixFilePermissions(privateKeyPath, permissions);
Files.setPosixFilePermissions(publicKeyPath, permissions);
} catch (Exception e) {
LOGGER.debug("Can't change file permissions", e);
}
return kp;
} catch (Exception e) {
throw new RuntimeException("Key file generation failed", e);
}
}
/**
* Check the first Object that is resolved is a KeyPair instance
*/
private static class KeyPairObjectInputStream extends ObjectInputStream {
private boolean valid;
public KeyPairObjectInputStream(InputStream is) throws IOException {
super(is);
}
@Override
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
if (!valid) {
if (!desc.getName().equals(KeyPair.class.getName())) {
throw new InvalidClassException("Unauthorized deserialization attempt", desc.getName());
}
valid = true;
}
return super.resolveClass(desc);
}
}
}
| 3,040 |
1,144 | <gh_stars>1000+
package de.metas.dunning.api.impl;
/*
* #%L
* de.metas.dunning
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import java.util.Date;
import java.util.Properties;
import org.adempiere.ad.trx.api.ITrxRunConfig;
import org.compiere.util.TimeUtil;
import de.metas.dunning.api.IDunningConfig;
import de.metas.dunning.api.IDunningContext;
import de.metas.dunning.interfaces.I_C_DunningLevel;
import de.metas.util.Check;
/**
* Dunning context
*
* @author tsa
*
*/
public class DunningContext extends AbstractDunningContext
{
private final IDunningConfig dunningConfig;
private final Properties ctx;
private final Date dunningDate;
private final I_C_DunningLevel dunningLevel;
private final String trxName;
private final ITrxRunConfig trxRunnerConfig;
public DunningContext(final Properties ctx, final IDunningConfig config, final I_C_DunningLevel dunningLevel, final Date dunningDate, final ITrxRunConfig trxRunnerConfig, final String trxName)
{
super();
Check.assume(ctx != null, "ctx is not null");
Check.assume(config != null, "config is not null");
this.dunningConfig = config;
this.ctx = ctx;
this.dunningLevel = dunningLevel;
this.dunningDate = dunningDate == null ? null : TimeUtil.trunc(dunningDate, TimeUtil.TRUNC_DAY);
this.trxName = trxName;
this.trxRunnerConfig = trxRunnerConfig;
}
public DunningContext(final IDunningContext context, final String trxName)
{
super(context);
this.dunningConfig = context.getDunningConfig();
this.ctx = context.getCtx();
this.dunningLevel = context.getC_DunningLevel();
this.dunningDate = context.getDunningDate();
this.trxName = trxName;
this.trxRunnerConfig = context.getTrxRunnerConfig();
}
@Override
public String toString()
{
return "DunningContext ["
+ "dunningLevel=" + dunningLevel
+ ", dunningDate=" + dunningDate
+ ", trxName=" + trxName
+ ", config=" + dunningConfig
+ ", ctx=" + ctx
+ "]";
}
@Override
public Properties getCtx()
{
return ctx;
}
@Override
public String getTrxName()
{
return trxName;
}
@Override
public ITrxRunConfig getTrxRunnerConfig()
{
return trxRunnerConfig;
}
@Override
public I_C_DunningLevel getC_DunningLevel()
{
return dunningLevel;
}
@Override
public IDunningConfig getDunningConfig()
{
return dunningConfig;
}
@Override
public Date getDunningDate()
{
if (dunningDate == null)
{
return dunningDate;
}
return (Date)dunningDate.clone();
}
}
| 1,105 |
2,023 | <reponame>tdiprima/code
#!/usr/bin/env python
"""Play sound files using the pygame mixer module."""
__program__ = "soundplay.py"
__author__ = "<NAME>"
__version__ = "1.1"
__revision__ = "$Rev: 136 $"
__date__ = "$Date: 2007-06-06 19:18:47 +0200 (Mi, 06 Jun 2007) $"
__copyright__ = "Public domain"
import sys
import pygame
# global constants
FREQ = 44100 # same as audio CD
BITSIZE = -16 # unsigned 16 bit
CHANNELS = 2 # 1 == mono, 2 == stereo
BUFFER = 1024 # audio buffer size in no. of samples
FRAMERATE = 30 # how often to check if playback has finished
def playsound(soundfile):
"""Play sound through default mixer channel in blocking manner.
This will load the whole sound into memory before playback
"""
sound = pygame.mixer.Sound(soundfile)
clock = pygame.time.Clock()
sound.play()
while pygame.mixer.get_busy():
clock.tick(FRAMERATE)
def playmusic(soundfile):
"""Stream music with mixer.music module in blocking manner.
This will stream the sound from disk while playing.
"""
clock = pygame.time.Clock()
pygame.mixer.music.load(soundfile)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
clock.tick(FRAMERATE)
def playmusic2(soundfile):
"""Stream music with mixer.music module using the event module to wait
until the playback has finished.
This method doesn't use a busy/poll loop, but has the disadvantage that
you neet to initialize the video module to use the event module.
Also, interrupting the playback with Ctrl-C does not work :-(
Change the call to 'playmusic' in the 'main' function to 'playmusic2'
to use this method.
"""
pygame.init()
pygame.mixer.music.load(soundfile)
pygame.mixer.music.set_endevent(pygame.constants.USEREVENT)
pygame.event.set_allowed(pygame.constants.USEREVENT)
pygame.mixer.music.play()
pygame.event.wait()
def main(args):
# look at command line
streaming = False
if args and args[0] == '-s':
streaming = True
args.pop(0)
if not args:
print >>sys.stderr, "usage: soundplay [-s] FILE"
print >>sys.stderr, " -s use streaming mode"
return 2
# initialize pygame.mixer module
# if these setting do not work with your audio system
# change the global constants accordingly
try:
pygame.mixer.init(FREQ, BITSIZE, CHANNELS, BUFFER)
except pygame.error, exc:
print >>sys.stderr, "Could not initialize sound system: %s" % exc
return 1
try:
for soundfile in args:
try:
# play it!
if streaming:
playmusic(soundfile)
else:
playsound(soundfile)
except pygame.error, exc:
print >>sys.stderr, "Could not play sound file: %s" % soundfile
print exc
continue
except KeyboardInterrupt:
# if user hits Ctrl-C, exit gracefully
pass
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1,276 |
14,425 | <reponame>WarpspeedSCP/hadoop
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.cosn;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.io.IOException;
import java.io.OutputStream;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
import com.qcloud.cos.model.PartETag;
import org.apache.hadoop.conf.Configuration;
/**
* The output stream for the COS blob store.
* Implement streaming upload to COS based on the multipart upload function.
* ( the maximum size of each part is 5GB)
* Support up to 40TB single file by multipart upload (each part is 5GB).
* Improve the upload performance of writing large files by using byte buffers
* and a fixed thread pool.
*/
public class CosNOutputStream extends OutputStream {
private static final Logger LOG =
LoggerFactory.getLogger(CosNOutputStream.class);
private final Configuration conf;
private final NativeFileSystemStore store;
private MessageDigest digest;
private long blockSize;
private String key;
private int currentBlockId = 0;
private Set<ByteBufferWrapper> blockCacheBuffers = new HashSet<>();
private ByteBufferWrapper currentBlockBuffer;
private OutputStream currentBlockOutputStream;
private String uploadId = null;
private ListeningExecutorService executorService;
private List<ListenableFuture<PartETag>> etagList = new LinkedList<>();
private int blockWritten = 0;
private boolean closed = false;
public CosNOutputStream(Configuration conf, NativeFileSystemStore store,
String key, long blockSize, ExecutorService executorService)
throws IOException {
this.conf = conf;
this.store = store;
this.key = key;
this.blockSize = blockSize;
if (this.blockSize < Constants.MIN_PART_SIZE) {
LOG.warn(
String.format(
"The minimum size of a single block is limited to %d.",
Constants.MIN_PART_SIZE));
this.blockSize = Constants.MIN_PART_SIZE;
}
if (this.blockSize > Constants.MAX_PART_SIZE) {
LOG.warn(
String.format(
"The maximum size of a single block is limited to %d.",
Constants.MAX_PART_SIZE));
this.blockSize = Constants.MAX_PART_SIZE;
}
// Use a blocking thread pool with fair scheduling
this.executorService = MoreExecutors.listeningDecorator(executorService);
try {
this.currentBlockBuffer =
BufferPool.getInstance().getBuffer((int) this.blockSize);
} catch (IOException e) {
throw new IOException("Getting a buffer size: "
+ String.valueOf(this.blockSize)
+ " from buffer pool occurs an exception: ", e);
}
try {
this.digest = MessageDigest.getInstance("MD5");
this.currentBlockOutputStream = new DigestOutputStream(
new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer()),
this.digest);
} catch (NoSuchAlgorithmException e) {
this.digest = null;
this.currentBlockOutputStream =
new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer());
}
}
@Override
public void flush() throws IOException {
this.currentBlockOutputStream.flush();
}
@Override
public synchronized void close() throws IOException {
if (this.closed) {
return;
}
this.currentBlockOutputStream.flush();
this.currentBlockOutputStream.close();
LOG.info("The output stream has been close, and "
+ "begin to upload the last block: [{}].", this.currentBlockId);
this.blockCacheBuffers.add(this.currentBlockBuffer);
if (this.blockCacheBuffers.size() == 1) {
byte[] md5Hash = this.digest == null ? null : this.digest.digest();
store.storeFile(this.key,
new ByteBufferInputStream(this.currentBlockBuffer.getByteBuffer()),
md5Hash, this.currentBlockBuffer.getByteBuffer().remaining());
} else {
PartETag partETag = null;
if (this.blockWritten > 0) {
LOG.info("Upload the last part..., blockId: [{}], written bytes: [{}]",
this.currentBlockId, this.blockWritten);
partETag = store.uploadPart(
new ByteBufferInputStream(currentBlockBuffer.getByteBuffer()),
key, uploadId, currentBlockId + 1,
currentBlockBuffer.getByteBuffer().remaining());
}
final List<PartETag> futurePartETagList = this.waitForFinishPartUploads();
if (null == futurePartETagList) {
throw new IOException("Failed to multipart upload to cos, abort it.");
}
List<PartETag> tmpPartEtagList = new LinkedList<>(futurePartETagList);
if (null != partETag) {
tmpPartEtagList.add(partETag);
}
store.completeMultipartUpload(this.key, this.uploadId, tmpPartEtagList);
}
try {
BufferPool.getInstance().returnBuffer(this.currentBlockBuffer);
} catch (InterruptedException e) {
LOG.error("An exception occurred "
+ "while returning the buffer to the buffer pool.", e);
}
LOG.info("The outputStream for key: [{}] has been uploaded.", key);
this.blockWritten = 0;
this.closed = true;
}
private List<PartETag> waitForFinishPartUploads() throws IOException {
try {
LOG.info("Wait for all parts to finish their uploading.");
return Futures.allAsList(this.etagList).get();
} catch (InterruptedException e) {
LOG.error("Interrupt the part upload.", e);
return null;
} catch (ExecutionException e) {
LOG.error("Cancelling futures.");
for (ListenableFuture<PartETag> future : this.etagList) {
future.cancel(true);
}
(store).abortMultipartUpload(this.key, this.uploadId);
LOG.error("Multipart upload with id: [{}] to COS key: [{}]",
this.uploadId, this.key, e);
throw new IOException("Multipart upload with id: "
+ this.uploadId + " to " + this.key, e);
}
}
private void uploadPart() throws IOException {
this.currentBlockOutputStream.flush();
this.currentBlockOutputStream.close();
this.blockCacheBuffers.add(this.currentBlockBuffer);
if (this.currentBlockId == 0) {
uploadId = (store).getUploadId(key);
}
ListenableFuture<PartETag> partETagListenableFuture =
this.executorService.submit(
new Callable<PartETag>() {
private final ByteBufferWrapper buf = currentBlockBuffer;
private final String localKey = key;
private final String localUploadId = uploadId;
private final int blockId = currentBlockId;
@Override
public PartETag call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("{} is uploading a part.",
Thread.currentThread().getName());
}
PartETag partETag = (store).uploadPart(
new ByteBufferInputStream(this.buf.getByteBuffer()),
this.localKey, this.localUploadId,
this.blockId + 1, this.buf.getByteBuffer().remaining());
BufferPool.getInstance().returnBuffer(this.buf);
return partETag;
}
});
this.etagList.add(partETagListenableFuture);
try {
this.currentBlockBuffer =
BufferPool.getInstance().getBuffer((int) this.blockSize);
} catch (IOException e) {
String errMsg = String.format("Getting a buffer [size:%d] from "
+ "the buffer pool failed.", this.blockSize);
throw new IOException(errMsg, e);
}
this.currentBlockId++;
if (null != this.digest) {
this.digest.reset();
this.currentBlockOutputStream = new DigestOutputStream(
new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer()),
this.digest);
} else {
this.currentBlockOutputStream =
new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer());
}
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
if (this.closed) {
throw new IOException("block stream has been closed.");
}
while (len > 0) {
long writeBytes;
if (this.blockWritten + len > this.blockSize) {
writeBytes = this.blockSize - this.blockWritten;
} else {
writeBytes = len;
}
this.currentBlockOutputStream.write(b, off, (int) writeBytes);
this.blockWritten += writeBytes;
if (this.blockWritten >= this.blockSize) {
this.uploadPart();
this.blockWritten = 0;
}
len -= writeBytes;
off += writeBytes;
}
}
@Override
public void write(byte[] b) throws IOException {
this.write(b, 0, b.length);
}
@Override
public void write(int b) throws IOException {
if (this.closed) {
throw new IOException("block stream has been closed.");
}
byte[] singleBytes = new byte[1];
singleBytes[0] = (byte) b;
this.currentBlockOutputStream.write(singleBytes, 0, 1);
this.blockWritten += 1;
if (this.blockWritten >= this.blockSize) {
this.uploadPart();
this.blockWritten = 0;
}
}
}
| 3,914 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.subversion.ui.wizards.urlpatternstep;
/**
*
* @author <NAME>
*/
public class URLPatternPanel extends javax.swing.JPanel {
/**
* Creates new form CheckoutPanel
*/
public URLPatternPanel() {
initComponents();
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
buttonGroup1 = new javax.swing.ButtonGroup();
jLabel3 = new javax.swing.JLabel();
jLabel4 = new javax.swing.JLabel();
jLabel1 = new javax.swing.JLabel();
jLabel2 = new javax.swing.JLabel();
java.util.ResourceBundle bundle = java.util.ResourceBundle.getBundle("org/netbeans/modules/subversion/ui/wizards/urlpatternstep/Bundle"); // NOI18N
setName(bundle.getString("CTL_Name")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(jLabel3, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_URLPattern_RepositoryHint")); // NOI18N
jLabel4.setLabelFor(repositoryPathTextField);
org.openide.awt.Mnemonics.setLocalizedText(jLabel4, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_URLPattern_RepositoryFolder")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(browseRepositoryButton, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_URLPattern_Browse")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(anyURLCheckBox, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_AnyUrl")); // NOI18N
anyURLCheckBox.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0));
anyURLCheckBox.setMargin(new java.awt.Insets(0, 0, 0, 0));
org.openide.awt.Mnemonics.setLocalizedText(jLabel1, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_FolderName")); // NOI18N
buttonGroup1.add(useFolderRadioButton);
useFolderRadioButton.setSelected(true);
org.openide.awt.Mnemonics.setLocalizedText(useFolderRadioButton, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_UseFolderName")); // NOI18N
useFolderRadioButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0));
useFolderRadioButton.setMargin(new java.awt.Insets(0, 0, 0, 0));
buttonGroup1.add(useSubfolderRadioButton);
org.openide.awt.Mnemonics.setLocalizedText(useSubfolderRadioButton, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_UseSubfolderName")); // NOI18N
useSubfolderRadioButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0));
useSubfolderRadioButton.setMargin(new java.awt.Insets(0, 0, 0, 0));
org.openide.awt.Mnemonics.setLocalizedText(jLabel2, org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "CTL_Preview")); // NOI18N
depthComboBox.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20" }));
org.openide.awt.Mnemonics.setLocalizedText(previewLabel, " ");
previewLabel.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Nb.ScrollPane.Border.color")));
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(anyURLCheckBox)
.addContainerGap())
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel1)
.addContainerGap())
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addComponent(jLabel2)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(previewLabel, javax.swing.GroupLayout.DEFAULT_SIZE, 526, Short.MAX_VALUE))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel3, javax.swing.GroupLayout.DEFAULT_SIZE, 494, Short.MAX_VALUE)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addComponent(jLabel4)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(repositoryPathTextField, javax.swing.GroupLayout.DEFAULT_SIZE, 343, Short.MAX_VALUE)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(browseRepositoryButton))
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(useFolderRadioButton)
.addGroup(layout.createSequentialGroup()
.addComponent(useSubfolderRadioButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(depthComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 286, Short.MAX_VALUE)))
.addGap(0, 0, 0))
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel3, javax.swing.GroupLayout.PREFERRED_SIZE, 15, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel4)
.addComponent(repositoryPathTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(browseRepositoryButton))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(anyURLCheckBox)
.addGap(32, 32, 32)
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(useFolderRadioButton)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(useSubfolderRadioButton)
.addComponent(depthComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(14, 14, 14)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel2)
.addComponent(previewLabel))
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
jLabel4.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_RepositoryFolder")); // NOI18N
browseRepositoryButton.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_BrowseFolders")); // NOI18N
anyURLCheckBox.getAccessibleContext().setAccessibleName(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSN_AnyUrl")); // NOI18N
anyURLCheckBox.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_AnyUrl")); // NOI18N
useFolderRadioButton.getAccessibleContext().setAccessibleName(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSN_UseFolderName")); // NOI18N
useFolderRadioButton.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_UseFolderName")); // NOI18N
useSubfolderRadioButton.getAccessibleContext().setAccessibleName(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSN_UseSubfolderName")); // NOI18N
useSubfolderRadioButton.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_UseSubfolderName")); // NOI18N
depthComboBox.getAccessibleContext().setAccessibleName(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSN_DepthComboBox")); // NOI18N
depthComboBox.getAccessibleContext().setAccessibleDescription(org.openide.util.NbBundle.getMessage(URLPatternPanel.class, "ACSD_DepthComboBox")); // NOI18N
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify
// Variables declaration - do not modify//GEN-BEGIN:variables
final javax.swing.JCheckBox anyURLCheckBox = new javax.swing.JCheckBox();
final javax.swing.JButton browseRepositoryButton = new javax.swing.JButton();
private javax.swing.ButtonGroup buttonGroup1;
final javax.swing.JComboBox depthComboBox = new javax.swing.JComboBox();
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
final javax.swing.JLabel previewLabel = new javax.swing.JLabel();
final javax.swing.JTextField repositoryPathTextField = new javax.swing.JTextField();
final javax.swing.JRadioButton useFolderRadioButton = new javax.swing.JRadioButton();
final javax.swing.JRadioButton useSubfolderRadioButton = new javax.swing.JRadioButton();
// End of variables declaration//GEN-END:variables
}
| 4,884 |
475 | <filename>include/ecst/make_inl.hpp
// Copyright (c) 2015-2016 <NAME>
// License: Academic Free License ("AFL") v. 3.0
// AFL License page: http://opensource.org/licenses/AFL-3.0
// http://vittorioromeo.info | <EMAIL>
#pragma once
#include "./make_inl/settings_make.inl"
#include "./make_inl/signature_component_make.inl"
#include "./make_inl/signature_system_make.inl"
| 148 |
686 | <filename>src/nb/deser/support/ClassDetails.java
package nb.deser.support;
import java.util.ArrayList;
/***********************************************************
* Support class for serialization data parsing that holds
* details of a single class to enable class data for that
* class to be read (classDescFlags, field descriptions).
*
* Written by <NAME> (@NickstaDB).
**********************************************************/
public class ClassDetails {
/*******************
* Properties
******************/
private final String _className; //The name of the class
private int _refHandle; //The reference handle for the class
private byte _classDescFlags; //The classDescFlags value for the class
private final ArrayList<ClassField> _fieldDescriptions; //The class field descriptions
/*******************
* Construct the ClassDetails object.
*
* @param className The name of the class.
******************/
public ClassDetails(String className) {
this._className = className;
this._refHandle = -1;
this._classDescFlags = 0;
this._fieldDescriptions = new ArrayList<ClassField>();
}
/*******************
* Get the class name.
*
* @return The class name.
******************/
public String getClassName() {
return this._className;
}
/*******************
* Set the reference handle of the class.
*
* @param handle The reference handle value.
******************/
public void setHandle(int handle) {
this._refHandle = handle;
}
/*******************
* Get the reference handle.
*
* @return The reference handle value for this class.
******************/
public int getHandle() {
return this._refHandle;
}
/*******************
* Set the classDescFlags property.
*
* @param classDescFlags The classDescFlags value.
******************/
public void setClassDescFlags(byte classDescFlags) {
this._classDescFlags = classDescFlags;
}
/*******************
* Check whether the class is SC_SERIALIZABLE.
*
* @return True if the classDescFlags includes SC_SERIALIZABLE.
******************/
public boolean isSC_SERIALIZABLE() {
return (this._classDescFlags & 0x02) == 0x02;
}
/*******************
* Check whether the class is SC_EXTERNALIZABLE.
*
* @return True if the classDescFlags includes SC_EXTERNALIZABLE.
******************/
public boolean isSC_EXTERNALIZABLE() {
return (this._classDescFlags & 0x04) == 0x04;
}
/*******************
* Check whether the class is SC_WRITE_METHOD.
*
* @return True if the classDescFlags includes SC_WRITE_METHOD.
******************/
public boolean isSC_WRITE_METHOD() {
return (this._classDescFlags & 0x01) == 0x01;
}
/*******************
* Check whether the class is SC_BLOCKDATA.
*
* @return True if the classDescFlags includes SC_BLOCKDATA.
******************/
public boolean isSC_BLOCKDATA() {
return (this._classDescFlags & 0x08) == 0x08;
}
/*******************
* Add a field description to the class details object.
*
* @param cf The ClassField object describing the field.
******************/
public void addField(ClassField cf) {
this._fieldDescriptions.add(cf);
}
/*******************
* Get the class field descriptions.
*
* @return An array of field descriptions for the class.
******************/
public ArrayList<ClassField> getFields() {
return this._fieldDescriptions;
}
/*******************
* Set the name of the last field to be added to the ClassDetails object.
*
* @param name The field name.
******************/
public void setLastFieldName(String name) {
this._fieldDescriptions.get(this._fieldDescriptions.size() - 1).setName(name);
}
/*******************
* Set the className1 of the last field to be added to the ClassDetails
* object.
*
* @param cn1 The className1 value.
******************/
public void setLastFieldClassName1(String cn1) {
this._fieldDescriptions.get(this._fieldDescriptions.size() - 1).setClassName1(cn1);
}
}
| 1,262 |
1,350 | <reponame>Shashi-rk/azure-sdk-for-java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.managedapplications.fluent;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.Response;
import com.azure.core.management.polling.PollResult;
import com.azure.core.util.Context;
import com.azure.core.util.polling.SyncPoller;
import com.azure.resourcemanager.managedapplications.fluent.models.ApplicationDefinitionInner;
/** An instance of this class provides access to all the operations defined in ApplicationDefinitionsClient. */
public interface ApplicationDefinitionsClient {
/**
* Gets the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner getByResourceGroup(String resourceGroupName, String applicationDefinitionName);
/**
* Gets the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Response<ApplicationDefinitionInner> getByResourceGroupWithResponse(
String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition to delete.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<Void>, Void> beginDelete(String resourceGroupName, String applicationDefinitionName);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition to delete.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<Void>, Void> beginDelete(
String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition to delete.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void delete(String resourceGroupName, String applicationDefinitionName);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition to delete.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void delete(String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update an managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<ApplicationDefinitionInner>, ApplicationDefinitionInner> beginCreateOrUpdate(
String resourceGroupName, String applicationDefinitionName, ApplicationDefinitionInner parameters);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update an managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<ApplicationDefinitionInner>, ApplicationDefinitionInner> beginCreateOrUpdate(
String resourceGroupName,
String applicationDefinitionName,
ApplicationDefinitionInner parameters,
Context context);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update an managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner createOrUpdate(
String resourceGroupName, String applicationDefinitionName, ApplicationDefinitionInner parameters);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update an managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner createOrUpdate(
String resourceGroupName,
String applicationDefinitionName,
ApplicationDefinitionInner parameters,
Context context);
/**
* Lists the managed application definitions in a resource group.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return list of managed application definitions.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
PagedIterable<ApplicationDefinitionInner> listByResourceGroup(String resourceGroupName);
/**
* Lists the managed application definitions in a resource group.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return list of managed application definitions.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
PagedIterable<ApplicationDefinitionInner> listByResourceGroup(String resourceGroupName, Context context);
/**
* Gets the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner getById(String resourceGroupName, String applicationDefinitionName);
/**
* Gets the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Response<ApplicationDefinitionInner> getByIdWithResponse(
String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<Void>, Void> beginDeleteById(String resourceGroupName, String applicationDefinitionName);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<Void>, Void> beginDeleteById(
String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void deleteById(String resourceGroupName, String applicationDefinitionName);
/**
* Deletes the managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void deleteById(String resourceGroupName, String applicationDefinitionName, Context context);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update a managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<ApplicationDefinitionInner>, ApplicationDefinitionInner> beginCreateOrUpdateById(
String resourceGroupName, String applicationDefinitionName, ApplicationDefinitionInner parameters);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update a managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
SyncPoller<PollResult<ApplicationDefinitionInner>, ApplicationDefinitionInner> beginCreateOrUpdateById(
String resourceGroupName,
String applicationDefinitionName,
ApplicationDefinitionInner parameters,
Context context);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update a managed application definition.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner createOrUpdateById(
String resourceGroupName, String applicationDefinitionName, ApplicationDefinitionInner parameters);
/**
* Creates a new managed application definition.
*
* @param resourceGroupName The name of the resource group. The name is case insensitive.
* @param applicationDefinitionName The name of the managed application definition.
* @param parameters Parameters supplied to the create or update a managed application definition.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.managedapplications.models.ErrorResponseException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return information about managed application definition.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
ApplicationDefinitionInner createOrUpdateById(
String resourceGroupName,
String applicationDefinitionName,
ApplicationDefinitionInner parameters,
Context context);
}
| 5,640 |
3,603 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.raptor.legacy.metadata;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import io.airlift.slice.Slice;
import io.trino.plugin.raptor.legacy.RaptorColumnHandle;
import io.trino.spi.TrinoException;
import io.trino.spi.predicate.Domain;
import io.trino.spi.predicate.Range;
import io.trino.spi.predicate.Ranges;
import io.trino.spi.predicate.TupleDomain;
import io.trino.spi.type.Type;
import java.sql.JDBCType;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.List;
import java.util.Map.Entry;
import java.util.StringJoiner;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.base.Preconditions.checkArgument;
import static io.trino.plugin.raptor.legacy.metadata.DatabaseShardManager.maxColumn;
import static io.trino.plugin.raptor.legacy.metadata.DatabaseShardManager.minColumn;
import static io.trino.plugin.raptor.legacy.storage.ColumnIndexStatsUtils.jdbcType;
import static io.trino.plugin.raptor.legacy.storage.ShardStats.truncateIndexValue;
import static io.trino.plugin.raptor.legacy.util.UuidUtil.uuidStringToBytes;
import static io.trino.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
class ShardPredicate
{
private final String predicate;
private final List<JDBCType> types;
private final List<Object> values;
private static final int MAX_RANGE_COUNT = 100;
private ShardPredicate(String predicate, List<JDBCType> types, List<Object> values)
{
this.predicate = requireNonNull(predicate, "predicate is null");
this.types = ImmutableList.copyOf(requireNonNull(types, "types is null"));
this.values = ImmutableList.copyOf(requireNonNull(values, "values is null"));
checkArgument(types.size() == values.size(), "types and values sizes do not match");
}
public String getPredicate()
{
return predicate;
}
public void bind(PreparedStatement statement)
throws SQLException
{
for (int i = 0; i < types.size(); i++) {
JDBCType type = types.get(i);
Object value = values.get(i);
bindValue(statement, type, value, i + 1);
}
}
@Override
public String toString()
{
return toStringHelper(this)
.addValue(predicate)
.toString();
}
public static ShardPredicate create(TupleDomain<RaptorColumnHandle> tupleDomain)
{
StringJoiner predicate = new StringJoiner(" AND ").setEmptyValue("true");
ImmutableList.Builder<JDBCType> types = ImmutableList.builder();
ImmutableList.Builder<Object> values = ImmutableList.builder();
for (Entry<RaptorColumnHandle, Domain> entry : tupleDomain.getDomains().get().entrySet()) {
Domain domain = entry.getValue();
if (domain.isNullAllowed() || domain.isAll()) {
continue;
}
RaptorColumnHandle handle = entry.getKey();
Type type = handle.getColumnType();
JDBCType jdbcType = jdbcType(type);
if (jdbcType == null) {
continue;
}
if (handle.isShardUuid()) {
predicate.add(createShardPredicate(types, values, domain, jdbcType));
continue;
}
if (!domain.getType().isOrderable()) {
continue;
}
StringJoiner columnPredicate = new StringJoiner(" OR ", "(", ")").setEmptyValue("true");
Ranges ranges = domain.getValues().getRanges();
// prevent generating complicated metadata queries
if (ranges.getRangeCount() > MAX_RANGE_COUNT) {
continue;
}
for (Range range : ranges.getOrderedRanges()) {
String min;
String max;
if (handle.isBucketNumber()) {
min = "bucket_number";
max = "bucket_number";
}
else {
min = minColumn(handle.getColumnId());
max = maxColumn(handle.getColumnId());
}
StringJoiner rangePredicate = new StringJoiner(" AND ", "(", ")").setEmptyValue("true");
if (!range.isLowUnbounded()) {
rangePredicate.add(format("(%s >= ? OR %s IS NULL)", max, max));
types.add(jdbcType);
values.add(range.getLowBoundedValue());
}
if (!range.isHighUnbounded()) {
rangePredicate.add(format("(%s <= ? OR %s IS NULL)", min, min));
types.add(jdbcType);
values.add(range.getHighBoundedValue());
}
columnPredicate.add(rangePredicate.toString());
}
predicate.add(columnPredicate.toString());
}
return new ShardPredicate(predicate.toString(), types.build(), values.build());
}
private static String createShardPredicate(ImmutableList.Builder<JDBCType> types, ImmutableList.Builder<Object> values, Domain domain, JDBCType jdbcType)
{
List<Range> ranges = domain.getValues().getRanges().getOrderedRanges();
// only apply predicates if all ranges are single values
if (ranges.isEmpty() || !ranges.stream().allMatch(Range::isSingleValue)) {
return "true";
}
ImmutableList.Builder<Object> valuesBuilder = ImmutableList.builder();
ImmutableList.Builder<JDBCType> typesBuilder = ImmutableList.builder();
StringJoiner rangePredicate = new StringJoiner(" OR ");
for (Range range : ranges) {
Slice uuidText = (Slice) range.getSingleValue();
try {
Slice uuidBytes = uuidStringToBytes(uuidText);
typesBuilder.add(jdbcType);
valuesBuilder.add(uuidBytes);
}
catch (IllegalArgumentException e) {
return "true";
}
rangePredicate.add("shard_uuid = ?");
}
types.addAll(typesBuilder.build());
values.addAll(valuesBuilder.build());
return rangePredicate.toString();
}
@VisibleForTesting
protected List<JDBCType> getTypes()
{
return types;
}
@VisibleForTesting
protected List<Object> getValues()
{
return values;
}
public static void bindValue(PreparedStatement statement, JDBCType type, Object value, int index)
throws SQLException
{
if (value == null) {
statement.setNull(index, type.getVendorTypeNumber());
return;
}
switch (type) {
case BOOLEAN:
statement.setBoolean(index, (boolean) value);
return;
case INTEGER:
statement.setInt(index, ((Number) value).intValue());
return;
case BIGINT:
statement.setLong(index, ((Number) value).longValue());
return;
case DOUBLE:
statement.setDouble(index, ((Number) value).doubleValue());
return;
case VARBINARY:
statement.setBytes(index, truncateIndexValue((Slice) value).getBytes());
return;
default:
throw new TrinoException(GENERIC_INTERNAL_ERROR, "Unhandled type: " + type);
}
}
}
| 3,569 |
834 | // Copyright 2004-present Facebook. All Rights Reserved.
#include "fboss/agent/hw/bcm/BcmWarmBootHelper.h"
#include "fboss/agent/SysError.h"
#include "fboss/agent/Utils.h"
#include "fboss/agent/hw/bcm/BcmAPI.h"
#include "fboss/agent/hw/bcm/BcmError.h"
#include "fboss/agent/hw/bcm/BcmFacebookAPI.h"
#include "fboss/agent/hw/bcm/BcmSdkVer.h"
#include "fboss/agent/hw/bcm/BcmUnit.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <folly/FileUtil.h>
#include <folly/json.h>
#include <folly/logging/xlog.h>
#include <glog/logging.h>
extern "C" {
#include <bcm/switch.h>
#if (defined(IS_OPENNSA))
#include <soc/opensoc.h>
#else
#include <soc/scache.h>
#endif
} // extern "C"
using std::string;
namespace facebook::fboss {
BcmWarmBootHelper::BcmWarmBootHelper(int unit, const std::string& warmBootDir)
: HwSwitchWarmBootHelper(unit, warmBootDir, "bcm_sdk_state_") {
if (!warmBootDir.empty()) {
setupSdkWarmBoot();
}
}
int BcmWarmBootHelper::warmBootReadCallback(
int unitNumber,
uint8_t* buf,
int offset,
int nbytes) {
try {
auto wbHelper = BcmAPI::getUnit(unitNumber)->warmBootHelper();
wbHelper->warmBootRead(buf, offset, nbytes);
return nbytes;
} catch (const std::exception& ex) {
XLOG(ERR) << "error performing warm boot read of " << nbytes
<< " bytes for unit " << unitNumber << ": "
<< folly::exceptionStr(ex);
return BCM_E_FAIL;
}
}
int BcmWarmBootHelper::warmBootWriteCallback(
int unitNumber,
uint8_t* buf,
int offset,
int nbytes) {
try {
auto wbHelper = BcmAPI::getUnit(unitNumber)->warmBootHelper();
wbHelper->warmBootWrite(buf, offset, nbytes);
return nbytes;
} catch (const std::exception& ex) {
XLOG(ERR) << "error performing warm boot write of " << nbytes
<< " bytes for unit " << unitNumber << ": "
<< folly::exceptionStr(ex);
// Ugh. Unfortunately the Broadcom SDK code doesn't appear to
// check the return value from the warm boot write callback, so
// it won't correctly handle errors at this point.
return BCM_E_FAIL;
}
}
void BcmWarmBootHelper::setupSdkWarmBoot() {
auto rv = soc_stable_set(getSwitchId(), BCM_SWITCH_STABLE_APPLICATION, 0);
bcmCheckError(
rv, "unable to configure for warm boot for unit ", getSwitchId());
rv = soc_switch_stable_register(
getSwitchId(),
&warmBootReadCallback,
&warmBootWriteCallback,
nullptr,
nullptr);
bcmCheckError(
rv,
"unable to register read, write callbacks for warm boot "
"on unit ",
getSwitchId());
auto stableSize = BcmAPI::getConfigStableSize();
// Remove this code once the new values for stable are propagated
// to all switches
constexpr uint64_t kStableSizeMinimum = 0x6000000;
if (stableSize < kStableSizeMinimum) {
stableSize = kStableSizeMinimum;
}
XLOG(DBG0) << "Initializing sdk stable storage with max size of "
<< stableSize << " bytes.";
rv = soc_stable_size_set(getSwitchId(), stableSize);
bcmCheckError(
rv, "unable to set size for warm boot storage for unit ", getSwitchId());
}
void BcmWarmBootHelper::warmBootRead(uint8_t* buf, int offset, int nbytes) {
if (warmBootFd() < 0) {
// This shouldn't ever happen. We only register the warm boot
// callbacks after opening the fd.
throw FbossError(
"attempted warm boot read on unit ",
getSwitchId(),
" but warm boot not configured");
}
// The Broadcom code assumes that the read callback always returns
// exactly as much data as requested, so use folly::preadFull().
// This should always return the full amount read, or an error.
auto bytesRead = folly::preadFull(warmBootFd(), buf, nbytes, offset);
if (bytesRead < 0) {
throw SysError(
errno,
"error reading ",
nbytes,
" bytes from warm boot file for unit ",
getSwitchId());
}
}
void BcmWarmBootHelper::warmBootWrite(
const uint8_t* buf,
int offset,
int nbytes) {
if (warmBootFd() < 0) {
// This shouldn't ever happen. We only register the warm boot
// callbacks after opening the fd.
throw FbossError(
"attempted warm boot write on unit ",
getSwitchId(),
" but warm boot not configured");
}
auto bytesWritten = folly::pwriteFull(warmBootFd(), buf, nbytes, offset);
if (bytesWritten < 0) {
throw SysError(
errno,
"error writing ",
nbytes,
" bytes to warm boot file for unit ",
getSwitchId());
}
}
} // namespace facebook::fboss
| 1,812 |
1,939 | package com.tencent.bk.codecc.apiquery.defect.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.tencent.bk.codecc.apiquery.utils.EntityIdDeserializer;
import lombok.Data;
@Data
public class CheckerStatisticModel {
private String id;
private String name;
@JsonProperty("defect_count")
private int defectCount;
// 1=>严重,2=>一般,3=>提示
private int severity;
}
| 190 |
2,881 | package com.example.zhpan.banner.net.interceptor;
import com.blankj.utilcode.util.LogUtils;
import com.blankj.utilcode.util.NetworkUtils;
import java.io.IOException;
import okhttp3.CacheControl;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
/**
* Created by zhpan on 2018/3/21.
*/
public class HttpCacheInterceptor implements Interceptor {
// 配置缓存的拦截器
@Override
public Response intercept(Chain chain) throws IOException {
Request request = chain.request();
if (!NetworkUtils.isConnected()) { //没网强制从缓存读取
request = request.newBuilder()
.cacheControl(CacheControl.FORCE_CACHE)
.build();
LogUtils.d("Okhttp", "no network");
}
Response originalResponse = chain.proceed(request);
if (NetworkUtils.isConnected()) {
//有网的时候读接口上的@Headers里的配置,你可以在这里进行统一的设置
String cacheControl = request.cacheControl().toString();
return originalResponse.newBuilder()
.header("Cache-Control", cacheControl)
.removeHeader("Pragma")
.build();
} else {
return originalResponse.newBuilder()
.header("Cache-Control", "public, only-if-cached, max-stale=2419200")
.removeHeader("Pragma")
.build();
}
}
}
| 569 |
8,969 | <reponame>omerlevran46/sdk<filename>runtime/vm/json_stream.h<gh_stars>1000+
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_JSON_STREAM_H_
#define RUNTIME_VM_JSON_STREAM_H_
#include "include/dart_api.h" // for Dart_Port
#include "platform/allocation.h"
#include "platform/text_buffer.h"
#include "vm/json_writer.h"
#include "vm/service.h"
#include "vm/token_position.h"
namespace dart {
class Array;
class Breakpoint;
class BreakpointLocation;
class Field;
class GrowableObjectArray;
class Instance;
class JSONArray;
class JSONObject;
class MessageQueue;
class Metric;
class Object;
class Script;
class ServiceEvent;
class String;
class TimelineEvent;
class TimelineEventBlock;
class Thread;
class ThreadRegistry;
class Zone;
// Keep this enum in sync with:
//
// - runtime/vm/service/vmservice.dart
// - runtime/observatory/lib/src/service/object.dart
// - pkg/dds/lib/src/rpc_error_codes.dart
//
enum JSONRpcErrorCode {
kParseError = -32700,
kInvalidRequest = -32600,
kMethodNotFound = -32601,
kInvalidParams = -32602,
kInternalError = -32603,
kExtensionError = -32000,
kFeatureDisabled = 100,
kCannotAddBreakpoint = 102,
kStreamAlreadySubscribed = 103,
kStreamNotSubscribed = 104,
kIsolateMustBeRunnable = 105,
kIsolateMustBePaused = 106,
kCannotResume = 107,
kIsolateIsReloading = 108,
kIsolateReloadBarred = 109,
kIsolateMustHaveReloaded = 110,
kServiceAlreadyRegistered = 111,
kServiceDisappeared = 112,
kExpressionCompilationError = 113,
kInvalidTimelineRequest = 114,
// Experimental (used in private rpcs).
kFileSystemAlreadyExists = 1001,
kFileSystemDoesNotExist = 1002,
kFileDoesNotExist = 1003,
};
// Builds on JSONWriter to provide support for serializing various objects
// used in the VM service protocol.
class JSONStream : ValueObject {
public:
explicit JSONStream(intptr_t buf_size = 256);
void Setup(Zone* zone,
Dart_Port reply_port,
const Instance& seq,
const String& method,
const Array& param_keys,
const Array& param_values,
bool parameters_are_dart_objects = false);
void SetupError();
void PrintError(intptr_t code, const char* details_format, ...)
PRINTF_ATTRIBUTE(3, 4);
void PostReply();
void set_id_zone(ServiceIdZone* id_zone) { id_zone_ = id_zone; }
ServiceIdZone* id_zone() { return id_zone_; }
TextBuffer* buffer() { return writer_.buffer(); }
const char* ToCString() { return writer_.ToCString(); }
void Steal(char** buffer, intptr_t* buffer_length) {
writer_.Steal(buffer, buffer_length);
}
void set_reply_port(Dart_Port port);
void SetParams(const char** param_keys,
const char** param_values,
intptr_t num_params);
Dart_Port reply_port() const { return reply_port_; }
intptr_t NumObjectParameters() const;
ObjectPtr GetObjectParameterKey(intptr_t i) const;
ObjectPtr GetObjectParameterValue(intptr_t i) const;
ObjectPtr LookupObjectParam(const char* key) const;
intptr_t num_params() const { return num_params_; }
const char* GetParamKey(intptr_t i) const { return param_keys_[i]; }
const char* GetParamValue(intptr_t i) const { return param_values_[i]; }
const char* LookupParam(const char* key) const;
bool HasParam(const char* key) const;
// Returns true if there is an param with key and value, false
// otherwise.
bool ParamIs(const char* key, const char* value) const;
const char* method() const { return method_; }
const char** param_keys() const { return param_keys_; }
const char** param_values() const { return param_values_; }
void set_offset(intptr_t value) {
ASSERT(value > 0);
offset_ = value;
}
void set_count(intptr_t value) {
ASSERT(value > 0);
count_ = value;
}
void ComputeOffsetAndCount(intptr_t length,
intptr_t* offset,
intptr_t* count);
// Append |serialized_object| to the stream.
void AppendSerializedObject(const char* serialized_object) {
writer_.AppendSerializedObject(serialized_object);
}
// Append |buffer| to the stream.
void AppendSerializedObject(const uint8_t* buffer, intptr_t buffer_length) {
writer_.AppendSerializedObject(buffer, buffer_length);
}
// Append |serialized_object| to the stream with |property_name|.
void AppendSerializedObject(const char* property_name,
const char* serialized_object) {
writer_.AppendSerializedObject(property_name, serialized_object);
}
void PrintCommaIfNeeded() { writer_.PrintCommaIfNeeded(); }
private:
void Clear() { writer_.Clear(); }
void PostNullReply(Dart_Port port);
void OpenObject(const char* property_name = NULL) {
writer_.OpenObject(property_name);
}
void CloseObject() { writer_.CloseObject(); }
void UncloseObject() { writer_.UncloseObject(); }
void OpenArray(const char* property_name = NULL) {
writer_.OpenArray(property_name);
}
void CloseArray() { writer_.CloseArray(); }
void PrintValueNull() { writer_.PrintValueNull(); }
void PrintValueBool(bool b) { writer_.PrintValueBool(b); }
void PrintValue(intptr_t i) { writer_.PrintValue(i); }
void PrintValue64(int64_t i) { writer_.PrintValue64(i); }
void PrintValueTimeMillis(int64_t millis) { writer_.PrintValue64(millis); }
void PrintValueTimeMicros(int64_t micros) { writer_.PrintValue64(micros); }
void PrintValue(double d) { writer_.PrintValue(d); }
void PrintValueBase64(const uint8_t* bytes, intptr_t length) {
writer_.PrintValueBase64(bytes, length);
}
void PrintValue(const char* s) { writer_.PrintValue(s); }
void PrintValueNoEscape(const char* s) { writer_.PrintValueNoEscape(s); }
bool PrintValueStr(const String& s, intptr_t offset, intptr_t count) {
return writer_.PrintValueStr(s, offset, count);
}
void PrintfValue(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
void VPrintfValue(const char* format, va_list args) {
writer_.VPrintfValue(format, args);
}
void PrintValue(const Object& o, bool ref = true);
void PrintValue(Breakpoint* bpt);
void PrintValue(TokenPosition tp);
void PrintValue(const ServiceEvent* event);
void PrintValue(Metric* metric);
void PrintValue(MessageQueue* queue);
void PrintValue(Isolate* isolate, bool ref = true);
void PrintValue(IsolateGroup* isolate, bool ref = true);
void PrintValue(const TimelineEvent* timeline_event);
void PrintValue(const TimelineEventBlock* timeline_event_block);
void PrintValueVM(bool ref = true);
void PrintServiceId(const Object& o);
void PrintPropertyBool(const char* name, bool b) {
writer_.PrintPropertyBool(name, b);
}
void PrintProperty(const char* name, intptr_t i) {
writer_.PrintProperty(name, i);
}
void PrintProperty64(const char* name, int64_t i) {
writer_.PrintProperty64(name, i);
}
void PrintPropertyTimeMillis(const char* name, int64_t millis) {
writer_.PrintProperty64(name, millis);
}
void PrintPropertyTimeMicros(const char* name, int64_t micros) {
writer_.PrintProperty64(name, micros);
}
void PrintProperty(const char* name, double d) {
writer_.PrintProperty(name, d);
}
void PrintPropertyBase64(const char* name,
const uint8_t* bytes,
intptr_t length) {
writer_.PrintPropertyBase64(name, bytes, length);
}
void PrintProperty(const char* name, const char* s) {
writer_.PrintProperty(name, s);
}
bool PrintPropertyStr(const char* name,
const String& s,
intptr_t offset,
intptr_t count) {
return writer_.PrintPropertyStr(name, s, offset, count);
}
void PrintPropertyNoEscape(const char* name, const char* s) {
writer_.PrintPropertyNoEscape(name, s);
}
void PrintfProperty(const char* name, const char* format, ...)
PRINTF_ATTRIBUTE(3, 4);
void VPrintfProperty(const char* name, const char* format, va_list args) {
writer_.VPrintfProperty(name, format, args);
}
void PrintProperty(const char* name, const Object& o, bool ref = true);
void PrintProperty(const char* name, const ServiceEvent* event);
void PrintProperty(const char* name, Breakpoint* bpt);
void PrintProperty(const char* name, TokenPosition tp);
void PrintProperty(const char* name, Metric* metric);
void PrintProperty(const char* name, MessageQueue* queue);
void PrintProperty(const char* name, Isolate* isolate);
void PrintProperty(const char* name, Zone* zone);
void PrintProperty(const char* name, const TimelineEvent* timeline_event);
void PrintProperty(const char* name,
const TimelineEventBlock* timeline_event_block);
void PrintPropertyVM(const char* name, bool ref = true);
void PrintPropertyName(const char* name) { writer_.PrintPropertyName(name); }
void AddEscapedUTF8String(const char* s, intptr_t len) {
writer_.AddEscapedUTF8String(s, len);
}
JSONWriter writer_;
// Default service id zone.
RingServiceIdZone default_id_zone_;
ServiceIdZone* id_zone_;
Dart_Port reply_port_;
Instance* seq_;
Array* parameter_keys_;
Array* parameter_values_;
const char* method_;
const char** param_keys_;
const char** param_values_;
intptr_t num_params_;
intptr_t offset_;
intptr_t count_;
int64_t setup_time_micros_;
friend class JSONObject;
friend class JSONArray;
friend class TimelineEvent;
};
class JSONObject : public ValueObject {
public:
explicit JSONObject(JSONStream* stream) : stream_(stream) {
stream_->OpenObject();
}
JSONObject(const JSONObject* obj, const char* name) : stream_(obj->stream_) {
stream_->OpenObject(name);
}
explicit JSONObject(const JSONArray* arr);
~JSONObject() { stream_->CloseObject(); }
void AddServiceId(const Object& o) const { stream_->PrintServiceId(o); }
void AddFixedServiceId(const char* format, ...) const PRINTF_ATTRIBUTE(2, 3);
void AddServiceId(const char* format, ...) const PRINTF_ATTRIBUTE(2, 3);
void AddLocation(
const Script& script,
TokenPosition token_pos,
TokenPosition end_token_pos = TokenPosition::kNoSource) const;
void AddLocation(const BreakpointLocation* bpt_loc) const;
void AddUnresolvedLocation(const BreakpointLocation* bpt_loc) const;
void AddProperty(const char* name, bool b) const {
stream_->PrintPropertyBool(name, b);
}
void AddProperty(const char* name, intptr_t i) const {
stream_->PrintProperty(name, i);
}
void AddProperty64(const char* name, int64_t i) const {
stream_->PrintProperty64(name, i);
}
void AddPropertyTimeMillis(const char* name, int64_t millis) const {
stream_->PrintPropertyTimeMillis(name, millis);
}
void AddPropertyTimeMicros(const char* name, int64_t micros) const {
stream_->PrintPropertyTimeMicros(name, micros);
}
void AddProperty(const char* name, double d) const {
stream_->PrintProperty(name, d);
}
void AddPropertyBase64(const char* name,
const uint8_t* bytes,
intptr_t length) const {
stream_->PrintPropertyBase64(name, bytes, length);
}
void AddProperty(const char* name, const char* s) const {
stream_->PrintProperty(name, s);
}
bool AddPropertyStr(const char* name,
const String& s,
intptr_t offset = 0,
intptr_t count = -1) const {
return stream_->PrintPropertyStr(name, s, offset, count);
}
void AddPropertyNoEscape(const char* name, const char* s) const {
stream_->PrintPropertyNoEscape(name, s);
}
void AddProperty(const char* name, const Object& obj, bool ref = true) const {
stream_->PrintProperty(name, obj, ref);
}
void AddProperty(const char* name, const ServiceEvent* event) const {
stream_->PrintProperty(name, event);
}
void AddProperty(const char* name, Breakpoint* bpt) const {
stream_->PrintProperty(name, bpt);
}
void AddProperty(const char* name, TokenPosition tp) const {
stream_->PrintProperty(name, tp);
}
void AddProperty(const char* name, Metric* metric) const {
stream_->PrintProperty(name, metric);
}
void AddProperty(const char* name, MessageQueue* queue) const {
stream_->PrintProperty(name, queue);
}
void AddProperty(const char* name, Isolate* isolate) const {
stream_->PrintProperty(name, isolate);
}
void AddProperty(const char* name, Zone* zone) const {
stream_->PrintProperty(name, zone);
}
void AddProperty(const char* name,
const TimelineEvent* timeline_event) const {
stream_->PrintProperty(name, timeline_event);
}
void AddProperty(const char* name,
const TimelineEventBlock* timeline_event_block) const {
stream_->PrintProperty(name, timeline_event_block);
}
void AddPropertyVM(const char* name, bool ref = true) const {
stream_->PrintPropertyVM(name, ref);
}
void AddPropertyF(const char* name, const char* format, ...) const
PRINTF_ATTRIBUTE(3, 4);
private:
JSONStream* stream_;
friend class JSONArray;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(JSONObject);
};
class JSONArray : public ValueObject {
public:
explicit JSONArray(JSONStream* stream) : stream_(stream) {
stream_->OpenArray();
}
JSONArray(const JSONObject* obj, const char* name) : stream_(obj->stream_) {
stream_->OpenArray(name);
}
explicit JSONArray(const JSONArray* arr) : stream_(arr->stream_) {
stream_->OpenArray();
}
~JSONArray() { stream_->CloseArray(); }
void AddValueNull() const { stream_->PrintValueNull(); }
void AddValue(bool b) const { stream_->PrintValueBool(b); }
void AddValue(intptr_t i) const { stream_->PrintValue(i); }
void AddValue64(int64_t i) const { stream_->PrintValue64(i); }
void AddValueTimeMillis(int64_t millis) const {
stream_->PrintValueTimeMillis(millis);
}
void AddValueTimeMicros(int64_t micros) const {
stream_->PrintValueTimeMicros(micros);
}
void AddValue(double d) const { stream_->PrintValue(d); }
void AddValue(const char* s) const { stream_->PrintValue(s); }
void AddValue(const Object& obj, bool ref = true) const {
stream_->PrintValue(obj, ref);
}
void AddValue(Isolate* isolate, bool ref = true) const {
stream_->PrintValue(isolate, ref);
}
void AddValue(IsolateGroup* isolate_group, bool ref = true) const {
stream_->PrintValue(isolate_group, ref);
}
void AddValue(Breakpoint* bpt) const { stream_->PrintValue(bpt); }
void AddValue(TokenPosition tp) const { stream_->PrintValue(tp); }
void AddValue(const ServiceEvent* event) const { stream_->PrintValue(event); }
void AddValue(Metric* metric) const { stream_->PrintValue(metric); }
void AddValue(MessageQueue* queue) const { stream_->PrintValue(queue); }
void AddValue(const TimelineEvent* timeline_event) const {
stream_->PrintValue(timeline_event);
}
void AddValue(const TimelineEventBlock* timeline_event_block) const {
stream_->PrintValue(timeline_event_block);
}
void AddValueVM(bool ref = true) const { stream_->PrintValueVM(ref); }
void AddValueF(const char* format, ...) const PRINTF_ATTRIBUTE(2, 3);
private:
JSONStream* stream_;
friend class JSONObject;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(JSONArray);
};
} // namespace dart
#endif // RUNTIME_VM_JSON_STREAM_H_
| 5,525 |
1,225 | //
// YUCIReflectTile.h
// Pods
//
// Created by YuAo on 2/16/16.
//
//
#import <CoreImage/CoreImage.h>
typedef NS_ENUM(NSInteger, YUCIReflectedTileMode) {
YUCIReflectedTileModeReflectWithoutBorder = 0,
YUCIReflectedTileModeReflectWithBorder = 1,
};
@interface YUCIReflectedTile : CIFilter
@property (nonatomic, strong, nullable) CIImage *inputImage;
@property (nonatomic, copy, null_resettable) NSNumber *inputMode; //default: YUCIReflectedTileModeReflectWithoutBorder
@end
| 186 |
695 | <gh_stars>100-1000
// Time: O(n)
// Space: O(n)
class Solution {
public:
/**
* @param expression: A string array
* @return: The Polish notation of this expression
*/
vector<string> convertToPN(vector<string> &expression) {
vector<string> output;
infixToPrefix(expression, output);
return output;
}
// Convert Infix to Prefix Expression.
void infixToPrefix(vector<string>& infix, vector<string>& prefix) {
reverse(infix.begin(), infix.end());
stack<string> s;
for (auto& tok : infix) {
if (atoi(tok.c_str())) {
prefix.emplace_back(tok);
} else if (tok == ")") {
s.emplace(tok);
} else if (tok == "(") {
while (!s.empty()) {
tok = s.top();
s.pop();
if (tok == ")") {
break;
}
prefix.emplace_back(tok);
}
} else {
while (!s.empty() && precedence(tok) < precedence(s.top())) {
prefix.emplace_back(s.top());
s.pop();
}
s.emplace(tok);
}
}
while (!s.empty()) {
prefix.emplace_back(s.top());
s.pop();
}
reverse(prefix.begin(), prefix.end());
}
int precedence(string x) {
if (x == ")") {
return 0;
} else if (x == "+" || x == "-") {
return 1;
} else if (x == "*" || x == "/") {
return 2;
}
return 3;
}
};
| 952 |
669 | /*
* MinIO Java SDK for Amazon S3 Compatible Cloud Storage,
* (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.minio.CopyObjectArgs;
import io.minio.CopySource;
import io.minio.MinioClient;
import io.minio.ServerSideEncryption;
import io.minio.ServerSideEncryptionCustomerKey;
import io.minio.ServerSideEncryptionKms;
import io.minio.ServerSideEncryptionS3;
import io.minio.errors.MinioException;
import java.io.IOException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Map;
import javax.crypto.KeyGenerator;
public class CopyObject {
/** MinioClient.copyObject() example. */
public static void main(String[] args)
throws IOException, NoSuchAlgorithmException, InvalidKeyException {
try {
/* play.min.io for test and development. */
MinioClient minioClient =
MinioClient.builder()
.endpoint("https://play.min.io")
.credentials("Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
.build();
/* Amazon S3: */
// MinioClient minioClient =
// MinioClient.builder()
// .endpoint("https://s3.amazonaws.com")
// .credentials("YOUR-ACCESSKEY", "YOUR-SECRETACCESSKEY")
// .build();
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryptionCustomerKey ssec =
new ServerSideEncryptionCustomerKey(keyGen.generateKey());
Map<String, String> myContext = new HashMap<>();
myContext.put("key1", "value1");
ServerSideEncryption sseKms = new ServerSideEncryptionKms("Key-Id", myContext);
ServerSideEncryption sseS3 = new ServerSideEncryptionS3();
String versionId = "ac38316c-fe14-4f96-9f76-8f675ae5a79e";
Map<String, String> headers = new HashMap<>();
headers.put("Content-Type", "application/json");
headers.put("x-amz-meta-my-project", "Project One");
String etag = "9855d05ab7a1cfd5ea304f0547c24496";
{
// Create object "my-objectname" in bucket "my-bucketname" by copying from object
// "my-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-objectname")
.build())
.build());
System.out.println(
"my-source-bucketname/my-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" by copying from object
// "my-source-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-source-objectname")
.build())
.build());
System.out.println(
"my-source-bucketname/my-source-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" with SSE-KMS server-side
// encryption by copying from object "my-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-objectname")
.build())
.sse(sseKms) // Replace with actual key.
.build());
System.out.println(
"my-source-bucketname/my-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" with SSE-S3 server-side
// encryption by copying from object "my-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-objectname")
.build())
.sse(sseS3) // Replace with actual key.
.build());
System.out.println(
"my-source-bucketname/my-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" with SSE-C server-side encryption
// by copying from object "my-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-objectname")
.build())
.sse(ssec) // Replace with actual key.
.build());
System.out.println(
"my-source-bucketname/my-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" by copying from SSE-C encrypted
// object "my-source-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-source-objectname")
.ssec(ssec) // Replace with actual key.
.build())
.build());
System.out.println(
"my-source-bucketname/my-source-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
{
// Create object "my-objectname" in bucket "my-bucketname" with custom headers conditionally
// by copying from object "my-objectname" in bucket "my-source-bucketname".
minioClient.copyObject(
CopyObjectArgs.builder()
.bucket("my-bucketname")
.object("my-objectname")
.source(
CopySource.builder()
.bucket("my-source-bucketname")
.object("my-objectname")
.matchETag(etag) // Replace with actual etag.
.build())
.headers(headers) // Replace with actual headers.
.build());
System.out.println(
"my-source-bucketname/my-objectname copied "
+ "to my-bucketname/my-objectname successfully");
}
} catch (MinioException e) {
System.out.println("Error occurred: " + e);
}
}
}
| 3,936 |
5,938 | <reponame>glasnt/python-docs-samples<filename>servicedirectory/snippets_test.py
#!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ
import uuid
from google.api_core import exceptions
from google.cloud import servicedirectory_v1
import snippets
PROJECT_ID = environ['GOOGLE_CLOUD_PROJECT']
LOCATION_ID = 'us-east1'
NAMESPACE_ID = f'test-namespace-{uuid.uuid4().hex}'
SERVICE_ID = f'test-service-{uuid.uuid4().hex}'
ENDPOINT_ID = f'test-endpoint-{uuid.uuid4().hex}'
ADDRESS = '1.2.3.4'
PORT = 443
def teardown_module():
client = servicedirectory_v1.RegistrationServiceClient()
namespace_name = client.namespace_path(PROJECT_ID, LOCATION_ID, NAMESPACE_ID)
try:
namespace = client.get_namespace(name=namespace_name)
client.delete_namespace(name=namespace.name)
except exceptions.NotFound:
pass
def test_create_namespace():
response = snippets.create_namespace(PROJECT_ID, LOCATION_ID, NAMESPACE_ID)
assert NAMESPACE_ID in response.name
def test_create_service():
response = snippets.create_service(PROJECT_ID, LOCATION_ID, NAMESPACE_ID,
SERVICE_ID)
assert SERVICE_ID in response.name
def test_create_endpoint():
response = snippets.create_endpoint(PROJECT_ID, LOCATION_ID, NAMESPACE_ID,
SERVICE_ID, ENDPOINT_ID, ADDRESS, PORT)
assert ENDPOINT_ID in response.name
def test_resolve_service():
response = snippets.resolve_service(PROJECT_ID, LOCATION_ID, NAMESPACE_ID,
SERVICE_ID)
assert len(response.service.endpoints) == 1
assert ENDPOINT_ID in response.service.endpoints[0].name
def test_delete_endpoint(capsys):
snippets.delete_endpoint(PROJECT_ID, LOCATION_ID, NAMESPACE_ID, SERVICE_ID,
ENDPOINT_ID)
out, _ = capsys.readouterr()
assert ENDPOINT_ID in out
def test_delete_service(capsys):
snippets.delete_service(PROJECT_ID, LOCATION_ID, NAMESPACE_ID, SERVICE_ID)
out, _ = capsys.readouterr()
assert SERVICE_ID in out
def test_delete_namespace(capsys):
snippets.delete_namespace(PROJECT_ID, LOCATION_ID, NAMESPACE_ID)
out, _ = capsys.readouterr()
assert NAMESPACE_ID in out
| 1,117 |
1,429 | /******************************************************************************
* Copyright (C) 2014-2020 <NAME> <<EMAIL>>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
******************************************************************************/
#include "libgevent.h"
#include <libdarray.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/select.h>
#define SELECT_MAX_FD 1024
struct select_ctx {
int nfds; /* Highest fd in fd set */
fd_set rfds;
fd_set wfds;
fd_set efds;
DARRAY(struct gevent) ev_list;
};
static void *select_init(void)
{
struct select_ctx *c = calloc(1, sizeof(struct select_ctx));
if (!c) {
printf("malloc select_ctx failed!\n");
return NULL;
}
FD_ZERO(&c->rfds);
FD_ZERO(&c->wfds);
FD_ZERO(&c->efds);
da_init(c->ev_list);
return c;
}
static void select_deinit(void *ctx)
{
struct select_ctx *c = (struct select_ctx *)ctx;
if (c) {
da_free(c->ev_list);
free(c);
}
}
static int select_add(struct gevent_base *eb, struct gevent *e)
{
struct select_ctx *c = (struct select_ctx *)eb->ctx;
if (c->nfds < e->evfd)
c->nfds = e->evfd;
if (e->flags & EVENT_READ)
FD_SET(e->evfd, &c->rfds);
if (e->flags & EVENT_WRITE)
FD_SET(e->evfd, &c->wfds);
if (e->flags & EVENT_EXCEPT)
FD_SET(e->evfd, &c->efds);
da_push_back(c->ev_list, e);
return 0;
}
static int select_del(struct gevent_base *eb, struct gevent *e)
{
struct select_ctx *c = (struct select_ctx *)eb->ctx;
FD_CLR(e->evfd, &c->rfds);
FD_CLR(e->evfd, &c->wfds);
FD_CLR(e->evfd, &c->efds);
da_erase_item(c->ev_list, e);
return 0;
}
static int select_dispatch(struct gevent_base *eb, struct timeval *tv)
{
int i, n;
struct select_ctx *c = (struct select_ctx *)eb->ctx;
int nfds = c->nfds + 1;
n = select(nfds, &c->rfds, &c->wfds, &c->efds, tv);
if (-1 == n) {
printf("errno=%d %s\n", errno, strerror(errno));
return -1;
}
if (0 == n) {
printf("select timeout\n");
return 0;
}
for (i = 0; i < c->ev_list.num; i++) {
struct gevent *e = &c->ev_list.array[i];
if (FD_ISSET(e->evfd, &c->rfds) && e->evcb.ev_in) {
e->evcb.ev_in(e->evfd, e->evcb.args);
}
if (FD_ISSET(e->evfd, &c->wfds) && e->evcb.ev_out) {
e->evcb.ev_out(e->evfd, e->evcb.args);
}
if (FD_ISSET(e->evfd, &c->efds) && e->evcb.ev_err) {
e->evcb.ev_err(e->evfd, e->evcb.args);
}
}
return 0;
}
struct gevent_ops selectops = {
.init = select_init,
.deinit = select_deinit,
.add = select_add,
.del = select_del,
.dispatch = select_dispatch,
};
| 1,642 |
1,099 | package com.example.video.dagger.news.othernews;
import com.example.commonlibrary.mvp.model.DefaultModel;
import com.example.video.mvp.news.othernew.OtherNewsListAdapter;
import com.example.video.mvp.news.othernew.OtherNewsListFragment;
import com.example.video.mvp.news.othernew.OtherNewsListPresenter;
import dagger.Module;
import dagger.Provides;
/**
* 项目名称: NewFastFrame
* 创建人: 陈锦军
* 创建时间: 2017/9/24 18:33
* QQ: 1981367757
*/
@Module
public class OtherNewsModule {
private OtherNewsListFragment otherNewsListFragment;
public OtherNewsModule(OtherNewsListFragment otherNewsListFragment) {
this.otherNewsListFragment = otherNewsListFragment;
}
@Provides
public OtherNewsListAdapter provideOtherNewsListAdapter() {
return new OtherNewsListAdapter();
}
@Provides
public OtherNewsListPresenter provideOtherNewsListPresenter(DefaultModel otherNewsListModel) {
return new OtherNewsListPresenter(otherNewsListFragment, otherNewsListModel);
}
}
| 399 |
337 | /*
* Copyright 2010-2019 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.idea.caches.resolve;
public class AssertionErrorWithCause extends AssertionError {
public AssertionErrorWithCause(String detailMessage, Throwable cause) {
super(detailMessage);
initCause(cause);
}
}
| 149 |
739 | <reponame>tchudyk/RichTextFX
package org.fxmisc.richtext;
import javafx.beans.property.BooleanProperty;
import javafx.scene.control.IndexRange;
import org.fxmisc.richtext.model.StyleSpans;
/**
* Specifies actions related to getting and setting styles throughout a {@link TextEditingArea}.
*
* @param <PS> the paragraph style
* @param <S> the segment's style
*/
public interface StyleActions<PS, S> {
/**
* Indicates whether the initial style should also be used for plain text
* inserted into this text area. When {@code false}, the style immediately
* preceding the insertion position is used. Default value is {@code false}.
*/
default boolean getUseInitialStyleForInsertion() { return useInitialStyleForInsertionProperty().get(); }
default void setUseInitialStyleForInsertion(boolean value) { useInitialStyleForInsertionProperty().set(value); }
BooleanProperty useInitialStyleForInsertionProperty();
/**
* Style used by default when no other style is provided.
*/
S getInitialTextStyle();
/**
* Style used by default when no other style is provided.
*/
PS getInitialParagraphStyle();
/**
* Returns {@link #getInitialTextStyle()} if {@link #getUseInitialStyleForInsertion()} is true;
* otherwise, returns the style at the given position.
*/
S getTextStyleForInsertionAt(int pos);
/**
* Returns {@link #getInitialParagraphStyle()} if {@link #getUseInitialStyleForInsertion()} is true;
* otherwise, returns the paragraph style at the given position.
*/
PS getParagraphStyleForInsertionAt(int pos);
/**
* Indicates whether style should be preserved on undo/redo (and in the future copy/paste and text move).
*/
boolean isPreserveStyle();
/**
* Returns the style of the character with the given index.
* If {@code index} points to a line terminator character,
* the last style used in the paragraph terminated by that
* line terminator is returned.
*/
S getStyleOfChar(int index);
/**
* Returns the style at the given position. That is the style of the
* character immediately preceding {@code position}, except when
* {@code position} points to a paragraph boundary, in which case it
* is the style at the beginning of the latter paragraph.
*
* <p>In other words, most of the time {@code getStyleAtPosition(p)}
* is equivalent to {@code getStyleOfChar(p-1)}, except when {@code p}
* points to a paragraph boundary, in which case it is equivalent to
* {@code getStyleOfChar(p)}.
*/
S getStyleAtPosition(int position);
/**
* Returns the range of homogeneous style that includes the given position.
* If {@code position} points to a boundary between two styled ranges, then
* the range preceding {@code position} is returned. If {@code position}
* points to a boundary between two paragraphs, then the first styled range
* of the latter paragraph is returned.
*/
IndexRange getStyleRangeAtPosition(int position);
/**
* Returns the styles in the given character range.
*/
StyleSpans<S> getStyleSpans(int from, int to);
/**
* Returns the styles in the given character range.
*/
default StyleSpans<S> getStyleSpans(IndexRange range) {
return getStyleSpans(range.getStart(), range.getEnd());
}
/**
* Returns the style of the character with the given index in the given
* paragraph. If {@code index} is beyond the end of the paragraph, the
* style at the end of line is returned. If {@code index} is negative, it
* is the same as if it was 0.
*/
S getStyleOfChar(int paragraph, int index);
/**
* Returns the style at the given position in the given paragraph.
* This is equivalent to {@code getStyleOfChar(paragraph, position-1)}.
*/
S getStyleAtPosition(int paragraph, int position);
/**
* Returns the range of homogeneous style that includes the given position
* in the given paragraph. If {@code position} points to a boundary between
* two styled ranges, then the range preceding {@code position} is returned.
*/
IndexRange getStyleRangeAtPosition(int paragraph, int position);
/**
* Returns styles of the whole paragraph.
*/
StyleSpans<S> getStyleSpans(int paragraph);
/**
* Returns the styles in the given character range of the given paragraph.
*/
StyleSpans<S> getStyleSpans(int paragraph, int from, int to);
/**
* Returns the styles in the given character range of the given paragraph.
*/
default StyleSpans<S> getStyleSpans(int paragraph, IndexRange range) {
return getStyleSpans(paragraph, range.getStart(), range.getStart());
}
/**
* Sets style for the given character range.
*/
void setStyle(int from, int to, S style);
/**
* Sets style for the whole paragraph.
*/
void setStyle(int paragraph, S style);
/**
* Sets style for the given range relative in the given paragraph.
*/
void setStyle(int paragraph, int from, int to, S style);
/**
* Set multiple style ranges at once. This is equivalent to
* <pre>
* for(StyleSpan{@code <S>} span: styleSpans) {
* setStyle(from, from + span.getLength(), span.getStyle());
* from += span.getLength();
* }
* </pre>
* but the actual implementation in {@link org.fxmisc.richtext.model.SimpleEditableStyledDocument} is
* more efficient.
*/
void setStyleSpans(int from, StyleSpans<? extends S> styleSpans);
/**
* Set multiple style ranges of a paragraph at once. This is equivalent to
* <pre>
* for(StyleSpan{@code <S>} span: styleSpans) {
* setStyle(paragraph, from, from + span.getLength(), span.getStyle());
* from += span.getLength();
* }
* </pre>
* but the actual implementation in {@link org.fxmisc.richtext.model.SimpleEditableStyledDocument} is
* more efficient.
*/
void setStyleSpans(int paragraph, int from, StyleSpans<? extends S> styleSpans);
/**
* Sets style for the whole paragraph.
*/
void setParagraphStyle(int paragraph, PS paragraphStyle);
/**
* Resets the style of the given range to the initial style.
*/
default void clearStyle(int from, int to) {
setStyle(from, to, getInitialTextStyle());
}
/**
* Resets the style of the given range in the given paragraph
* to the initial style.
*/
default void clearStyle(int paragraph, int from, int to) {
setStyle(paragraph, from, to, getInitialTextStyle());
}
/**
* Resets the style of the given paragraph to the initial style.
*/
default void clearStyle(int paragraph) {
setStyle(paragraph, getInitialTextStyle());
}
/**
* Resets the style of the given paragraph to the initial style.
*/
default void clearParagraphStyle(int paragraph) {
setParagraphStyle(paragraph, getInitialParagraphStyle());
}
}
| 2,388 |
312 | /*******************************************************************************
Copyright (c) 2005-2009 <NAME>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*******************************************************************************/
#ifndef __PolyVox_LargeVolume_H__
#define __PolyVox_LargeVolume_H__
#include "PolyVoxCore/BaseVolume.h"
#include "Impl/Block.h"
#include "PolyVoxCore/Log.h"
#include "PolyVoxCore/Region.h"
#include "PolyVoxCore/Vector.h"
#include <limits>
#include <cassert>
#include <cstdlib> //For abort()
#include <cstring> //For memcpy
#include <list>
#include <map>
#include <memory>
#include <stdexcept> //For invalid_argument
#include <vector>
namespace PolyVox
{
template <typename VoxelType> class ConstVolumeProxy;
/// The LargeVolume class provides a memory efficient method of storing voxel data while also allowing fast access and modification.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// A LargeVolume is essentially a 3D array in which each element (or <i>voxel</i>) is identified by a three dimensional (x,y,z) coordinate.
/// We use the LargeVolume class to store our data in an efficient way, and it is the input to many of the algorithms (such as the surface
/// extractors) which form the heart of PolyVox. The LargeVolume class is templatised so that different types of data can be stored within each voxel.
///
/// Basic usage
/// -----------
///
/// The following code snippet shows how to construct a volume and demonstrates basic usage:
///
/// \code
/// LargeVolume<Material8> volume(Region(Vector3DInt32(0,0,0), Vector3DInt32(63,127,255)));
/// volume.setVoxelAt(15, 90, 42, Material8(5));
/// std::cout << "Voxel at (15, 90, 42) has value: " << volume.getVoxelAt(15, 90, 42).getMaterial() << std::endl;
/// std::cout << "Width = " << volume.getWidth() << ", Height = " << volume.getHeight() << ", Depth = " << volume.getDepth() << std::endl;
/// \endcode
///
/// In this particular example each voxel in the LargeVolume is of type 'Material8', as specified by the template parameter. This is one of several
/// predefined voxel types, and it is also possible to define your own. The Material8 type simply holds an integer value where zero represents
/// empty space and any other value represents a solid material.
///
/// The LargeVolume constructor takes a Region as a parameter. This specifies the valid range of voxels which can be held in the volume, so in this
/// particular case the valid voxel positions are (0,0,0) to (63, 127, 255). Attempts to access voxels outside this range will result is accessing the
/// border value (see getBorderValue() and setBorderValue()). PolyVox also has support for near infinite volumes which will be discussed later.
///
/// Access to individual voxels is provided via the setVoxelAt() and getVoxelAt() member functions. Advanced users may also be interested in
/// the Sampler class for faster read-only access to a large number of voxels.
///
/// Lastly the example prints out some properties of the LargeVolume. Note that the dimentsions getWidth(), getHeight(), and getDepth() are inclusive, such
/// that the width is 64 when the range of valid x coordinates goes from 0 to 63.
///
/// Data Representaion
/// ------------------
/// If stored carelessly, volume data can take up a huge amount of memory. For example, a volume of dimensions 1024x1024x1024 with
/// 1 byte per voxel will require 1GB of memory if stored in an uncompressed form. Natuarally our LargeVolume class is much more efficient
/// than this and it is worth understanding (at least at a high level) the approach which is used.
///
/// Essentially, the LargeVolume class stores its data as a collection of blocks. Each of these block is much smaller than the whole volume,
/// for example a typical size might be 32x32x32 voxels (though is is configurable by the user). In this case, a 256x512x1024 volume
/// would contain 8x16x32 = 4096 blocks. The data for each block is stored in a compressed form, which uses only a small amout of
/// memory but it is hard to modify the data. Therefore, before any given voxel can be modified, its corresponding block must be uncompressed.
///
/// The compression and decompression of block is a relatively slow process and so we aim to do this as rarely as possible. In order
/// to achive this, the volume class stores a cache of recently used blocks and their associated uncompressed data. Each time a voxel
/// is touched a timestamp is updated on the corresponding block. When the cache becomes full the block with the oldest timestamp is
/// recompressed and moved out of the cache.
///
/// Achieving high compression rates
/// --------------------------------
/// The compression rates which can be achieved can vary significantly depending the nature of the data you are storing, but you can
/// encourage high compression rates by making your data as homogenous as possible. If you are simply storing a material with each
/// voxel then this will probably happen naturally. Games such as Minecraft which use this approach will typically involve large areas
/// of the same material which will compress down well.
///
/// However, if you are storing density values then you may want to take some care. The advantage of storing smoothly changing values
/// is that you can get smooth surfaces extracted, but storing smoothly changing values inside or outside objects (rather than just
/// on the boundary) does not benefit the surface and is very hard to compress effectively. You may wish to apply some thresholding to
/// your density values to reduce this problem (this threasholding should only be applied to voxels who don't contribute to the surface).
///
/// Paging large volumes
/// --------------------
/// The compression scheme described previously will typically allow you to load several billion voxels into a few hundred megabytes of memory,
/// though as explained the exact compression rate is highly dependant on your data. If you have more data than this then PolyVox provides a
/// mechanism by which parts of the volume can be paged out of memory by calling user supplied callback functions. This mechanism allows a
/// potentially unlimited amount of data to be loaded, provided the user is able to take responsibility for storing any data which PolyVox
/// cannot fit in memory, and then returning it back to PolyVox on demand. For example, the user might choose to temporarily store this data
/// on disk or stream it to a remote database.
///
/// You can construct such a LargeVolume as follows:
///
/// \code
/// void myDataRequiredHandler(const ConstVolumeProxy<MaterialDensityPair44>& volume, const PolyVox::Region& reg)
/// {
/// //This function is being called because part of the data is missing from memory and needs to be supplied. The parameter
/// //'volume' provides access to the volume data, and the parameter 'reg' indicates which region of the volume you need fill.
/// }
///
/// void myDataOverflowHandler(const ConstVolumeProxy<MaterialDensityPair44>& vol, const PolyVox::Region& reg)
/// {
/// //This function is being called because part of the data is about to be removed from memory. The parameter 'volume'
/// //provides access to the volume data, and the parameter 'reg' indicates which region of the volume you need to store.
/// }
///
/// LargeVolume<Density>volData(&myDataRequiredHandler, &myDataOverflowHandler);
/// \endcode
///
/// Essentially you are providing an extension to the LargeVolume class - a way for data to be stored once PolyVox has run out of memory for it. Note
/// that you don't actually have to do anything with the data - you could simply decide that once it gets removed from memory it doesn't matter
/// anymore. But you still need to be ready to then provide something to PolyVox (even if it's just default data) in the event that it is requested.
///
/// Cache-aware traversal
/// ---------------------
/// You might be suprised at just how many cache misses can occur when you traverse the volume in a naive manner. Consider a 1024x1024x1024 volume
/// with blocks of size 32x32x32. And imagine you iterate over this volume with a simple three-level for loop which iterates over x, the y, then z.
/// If you start at position (0,0,0) then ny the time you reach position (1023,0,0) you have touched 1024 voxels along one edge of the volume and
/// have pulled 32 blocks into the cache. By the time you reach (1023,1023,0) you have hit 1024x1024 voxels and pulled 32x32 blocks into the cache.
/// You are now ready to touch voxel (0,0,1) which is right nect to where you started, but unless your cache is at least 32x32 blocks large then this
/// initial block has already been cleared from the cache.
///
/// Ensuring you have a large enough cache size can obviously help the above situation, but you might also consider iterating over the voxels in a
/// different order. For example, if you replace your three-level loop with a six-level loop then you can first process all the voxels between (0,0,0)
/// and (31,31,31), then process all the voxels between (32,0,0) and (63,0,0), and so forth. Using this approach you will have no cache misses even
/// is your cache sise is only one. Of course the logic is more complex, but writing code in such a cache-aware manner may be beneficial in some situations.
///
/// Threading
/// ---------
/// The LargeVolume class does not make any guarentees about thread safety. You should ensure that all accesses are performed from the same thread.
/// This is true even if you are only reading data from the volume, as concurrently reading from different threads can invalidate the contents
/// of the block cache (amoung other problems).
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename VoxelType>
class LargeVolume : public BaseVolume<VoxelType>
{
public:
//There seems to be some descrepency between Visual Studio and GCC about how the following class should be declared.
//There is a work around (see also See http://goo.gl/qu1wn) given below which appears to work on VS2010 and GCC, but
//which seems to cause internal compiler errors on VS2008 when building with the /Gm 'Enable Minimal Rebuild' compiler
//option. For now it seems best to 'fix' it with the preprocessor insstead, but maybe the workaround can be reinstated
//in the future
//typedef Volume<VoxelType> VolumeOfVoxelType; //Workaround for GCC/VS2010 differences.
//class Sampler : public VolumeOfVoxelType::template Sampler< LargeVolume<VoxelType> >
#if defined(_MSC_VER)
class Sampler : public BaseVolume<VoxelType>::Sampler< LargeVolume<VoxelType> > //This line works on VS2010
#else
class Sampler : public BaseVolume<VoxelType>::template Sampler< LargeVolume<VoxelType> > //This line works on GCC
#endif
{
public:
Sampler(LargeVolume<VoxelType>* volume);
~Sampler();
Sampler& operator=(const Sampler& rhs);
VoxelType getSubSampledVoxel(uint8_t uLevel) const;
inline VoxelType getVoxel(void) const;
void setPosition(const Vector3DInt32& v3dNewPos);
void setPosition(int32_t xPos, int32_t yPos, int32_t zPos);
inline bool setVoxel(VoxelType tValue);
void movePositiveX(void);
void movePositiveY(void);
void movePositiveZ(void);
void moveNegativeX(void);
void moveNegativeY(void);
void moveNegativeZ(void);
inline VoxelType peekVoxel1nx1ny1nz(void) const;
inline VoxelType peekVoxel1nx1ny0pz(void) const;
inline VoxelType peekVoxel1nx1ny1pz(void) const;
inline VoxelType peekVoxel1nx0py1nz(void) const;
inline VoxelType peekVoxel1nx0py0pz(void) const;
inline VoxelType peekVoxel1nx0py1pz(void) const;
inline VoxelType peekVoxel1nx1py1nz(void) const;
inline VoxelType peekVoxel1nx1py0pz(void) const;
inline VoxelType peekVoxel1nx1py1pz(void) const;
inline VoxelType peekVoxel0px1ny1nz(void) const;
inline VoxelType peekVoxel0px1ny0pz(void) const;
inline VoxelType peekVoxel0px1ny1pz(void) const;
inline VoxelType peekVoxel0px0py1nz(void) const;
inline VoxelType peekVoxel0px0py0pz(void) const;
inline VoxelType peekVoxel0px0py1pz(void) const;
inline VoxelType peekVoxel0px1py1nz(void) const;
inline VoxelType peekVoxel0px1py0pz(void) const;
inline VoxelType peekVoxel0px1py1pz(void) const;
inline VoxelType peekVoxel1px1ny1nz(void) const;
inline VoxelType peekVoxel1px1ny0pz(void) const;
inline VoxelType peekVoxel1px1ny1pz(void) const;
inline VoxelType peekVoxel1px0py1nz(void) const;
inline VoxelType peekVoxel1px0py0pz(void) const;
inline VoxelType peekVoxel1px0py1pz(void) const;
inline VoxelType peekVoxel1px1py1nz(void) const;
inline VoxelType peekVoxel1px1py0pz(void) const;
inline VoxelType peekVoxel1px1py1pz(void) const;
private:
//Other current position information
VoxelType* mCurrentVoxel;
};
// Make the ConstVolumeProxy a friend
friend class ConstVolumeProxy<VoxelType>;
struct LoadedBlock
{
public:
LoadedBlock(uint16_t uSideLength = 0)
:block(uSideLength)
,timestamp(0)
{
}
Block<VoxelType> block;
uint32_t timestamp;
};
public:
/// Constructor for creating a very large paging volume.
LargeVolume
(
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler,
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler,
uint16_t uBlockSideLength = 32
);
/// Constructor for creating a fixed size volume.
LargeVolume
(
const Region& regValid,
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler = 0,
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler = 0,
bool bPagingEnabled = false,
uint16_t uBlockSideLength = 32
);
/// Destructor
~LargeVolume();
/// Gets the value used for voxels which are outside the volume
VoxelType getBorderValue(void) const;
/// Gets a voxel at the position given by <tt>x,y,z</tt> coordinates
VoxelType getVoxelAt(int32_t uXPos, int32_t uYPos, int32_t uZPos) const;
/// Gets a voxel at the position given by a 3D vector
VoxelType getVoxelAt(const Vector3DInt32& v3dPos) const;
//Sets whether or not blocks are compressed in memory
void setCompressionEnabled(bool bCompressionEnabled);
/// Sets the number of blocks for which uncompressed data is stored
void setMaxNumberOfUncompressedBlocks(uint32_t uMaxNumberOfUncompressedBlocks);
/// Sets the number of blocks which can be in memory before the paging system starts unloading them
void setMaxNumberOfBlocksInMemory(uint32_t uMaxNumberOfBlocksInMemory);
/// Sets the value used for voxels which are outside the volume
void setBorderValue(const VoxelType& tBorder);
/// Sets the voxel at the position given by <tt>x,y,z</tt> coordinates
bool setVoxelAt(int32_t uXPos, int32_t uYPos, int32_t uZPos, VoxelType tValue);
/// Sets the voxel at the position given by a 3D vector
bool setVoxelAt(const Vector3DInt32& v3dPos, VoxelType tValue);
/// Tries to ensure that the voxels within the specified Region are loaded into memory.
void prefetch(Region regPrefetch);
/// Ensures that any voxels within the specified Region are removed from memory.
void flush(Region regFlush);
/// Removes all voxels from memory
void flushAll();
/// Empties the cache of uncompressed blocks
void clearBlockCache(void);
/// Calculates the approximate compression ratio of the store volume data
float calculateCompressionRatio(void);
/// Calculates approximatly how many bytes of memory the volume is currently using.
uint32_t calculateSizeInBytes(void);
protected:
/// Copy constructor
LargeVolume(const LargeVolume& rhs);
/// Assignment operator
LargeVolume& operator=(const LargeVolume& rhs);
private:
void initialise(const Region& regValidRegion, uint16_t uBlockSideLength);
/// gets called when a new region is allocated and needs to be filled
/// NOTE: accessing ANY voxels outside this region during the process of this function
/// is absolutely unsafe
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> m_funcDataRequiredHandler;
/// gets called when a Region needs to be stored by the user, because LargeVolume will erase it right after
/// this function returns
/// NOTE: accessing ANY voxels outside this region during the process of this function
/// is absolutely unsafe
polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> m_funcDataOverflowHandler;
Block<VoxelType>* getUncompressedBlock(int32_t uBlockX, int32_t uBlockY, int32_t uBlockZ) const;
void eraseBlock(typename std::map<Vector3DInt32, LoadedBlock >::iterator itBlock) const;
/// this function can be called by m_funcDataRequiredHandler without causing any weird effects
bool setVoxelAtConst(int32_t uXPos, int32_t uYPos, int32_t uZPos, VoxelType tValue) const;
//The block data
mutable std::map<Vector3DInt32, LoadedBlock > m_pBlocks;
//The cache of uncompressed blocks. The uncompressed block data and the timestamps are stored here rather
//than in the Block class. This is so that in the future each VolumeIterator might to maintain its own cache
//of blocks. However, this could mean the same block data is uncompressed and modified in more than one
//location in memory... could be messy with threading.
mutable std::vector< LoadedBlock* > m_vecUncompressedBlockCache;
mutable uint32_t m_uTimestamper;
mutable Vector3DInt32 m_v3dLastAccessedBlockPos;
mutable Block<VoxelType>* m_pLastAccessedBlock;
uint32_t m_uMaxNumberOfUncompressedBlocks;
uint32_t m_uMaxNumberOfBlocksInMemory;
//We don't store an actual Block for the border, just the uncompressed data. This is partly because the border
//block does not have a position (so can't be passed to getUncompressedBlock()) and partly because there's a
//good chance we'll often hit it anyway. It's a chunk of homogenous data (rather than a single value) so that
//the VolumeIterator can do it's usual pointer arithmetic without needing to know it's gone outside the volume.
VoxelType* m_pUncompressedBorderData;
//The size of the volume
Region m_regValidRegionInBlocks;
//The size of the blocks
uint16_t m_uBlockSideLength;
uint8_t m_uBlockSideLengthPower;
bool m_bCompressionEnabled;
bool m_bPagingEnabled;
};
}
#include "PolyVoxCore/LargeVolume.inl"
#include "PolyVoxCore/LargeVolumeSampler.inl"
#endif //__PolyVox_LargeVolume_H__
| 5,773 |
5,169 | {
"name": "BatFoundation",
"version": "0.0.3",
"summary": "Augmented Foundation and UIKit",
"homepage": "http://bitbucket.org/bsansierra/batfoundation",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": null
},
"source": {
"git": "http://bitbucket.org/bsansierra/batfoundation.git",
"tag": "0.0.3"
},
"source_files": [
"BatFoundation/*.{h,m}",
"BatFoundation/**/*.{h,m}"
]
}
| 199 |
798 | <gh_stars>100-1000
/*
* The MIT License
*
* Copyright (c) 2014-2016 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package picard.sam.markduplicates.util;
import htsjdk.samtools.util.Log;
import htsjdk.samtools.util.ProgressLogger;
import picard.sam.util.PhysicalLocation;
import picard.sam.util.ReadNameParser;
import picard.util.GraphUtils;
import java.io.Serializable;
import java.util.*;
/**
* Contains methods for finding optical/co-localized/sequencing duplicates.
*
* @author <NAME>
* @author <NAME>
*/
public class OpticalDuplicateFinder extends ReadNameParser implements Serializable {
public int opticalDuplicatePixelDistance;
public static final int DEFAULT_OPTICAL_DUPLICATE_DISTANCE = 100;
public static final int DEFAULT_BIG_DUPLICATE_SET_SIZE = 1000;
public static final int DEFAULT_MAX_DUPLICATE_SET_SIZE = 300000; // larger than this number will generate over 100 billion comparisons in the n^2 algorithm below
private int bigDuplicateSetSize = DEFAULT_BIG_DUPLICATE_SET_SIZE;
private long maxDuplicateSetSize = DEFAULT_MAX_DUPLICATE_SET_SIZE;
/**
* Sets the size of a set that is big enough to log progress about.
* Defaults to {@value picard.sam.markduplicates.util.OpticalDuplicateFinder#DEFAULT_BIG_DUPLICATE_SET_SIZE}
*
* @param bigDuplicateSetSize the size of a set that is big enough to log progress about
*/
public void setBigDuplicateSetSize(final int bigDuplicateSetSize) {
this.bigDuplicateSetSize = bigDuplicateSetSize;
}
/**
* Sets the size of a set that is too big to process.
* Defaults to {@value picard.sam.markduplicates.util.OpticalDuplicateFinder#DEFAULT_MAX_DUPLICATE_SET_SIZE}
*
* @param maxDuplicateSetSize the size of a set that is too big enough to process
*/
public void setMaxDuplicateSetSize(final long maxDuplicateSetSize) {
if (maxDuplicateSetSize < 1) {
this.maxDuplicateSetSize = Long.MAX_VALUE;
}
this.maxDuplicateSetSize = maxDuplicateSetSize;
}
/**
* Uses the default duplicate distance {@link OpticalDuplicateFinder#DEFAULT_OPTICAL_DUPLICATE_DISTANCE}
* ({@value picard.sam.markduplicates.util.OpticalDuplicateFinder#DEFAULT_OPTICAL_DUPLICATE_DISTANCE}) and the default read name regex
* {@link ReadNameParser#DEFAULT_READ_NAME_REGEX}.
*/
public OpticalDuplicateFinder() {
super();
this.opticalDuplicatePixelDistance = DEFAULT_OPTICAL_DUPLICATE_DISTANCE;
}
/**
* @param readNameRegex see {@link ReadNameParser#DEFAULT_READ_NAME_REGEX}.
* @param opticalDuplicatePixelDistance the optical duplicate pixel distance
* @param log the log to which to write messages.
*/
public OpticalDuplicateFinder(final String readNameRegex, final int opticalDuplicatePixelDistance, final Log log) {
super(readNameRegex, log);
this.opticalDuplicatePixelDistance = opticalDuplicatePixelDistance;
}
/**
* @param readNameRegex see {@link ReadNameParser#DEFAULT_READ_NAME_REGEX}.
* @param opticalDuplicatePixelDistance the optical duplicate pixel distance
* @param maxDuplicateSetSize the size of a set that is too big enough to process
* @param log the log to which to write messages.
*/
public OpticalDuplicateFinder(final String readNameRegex, final int opticalDuplicatePixelDistance, final long maxDuplicateSetSize, final Log log) {
super(readNameRegex, log);
this.opticalDuplicatePixelDistance = opticalDuplicatePixelDistance;
this.maxDuplicateSetSize = maxDuplicateSetSize;
}
/**
* Finds which reads within the list of duplicates that are likely to be optical/co-localized duplicates of
* one another. Within each cluster of optical duplicates that is found, one read remains un-flagged for
* optical duplication and the rest are flagged as optical duplicates. The set of reads that are considered
* optical duplicates are indicated by returning "true" at the same index in the resulting boolean[] as the
* read appeared in the input list of physical locations.
*
* @param list a list of reads that are determined to be duplicates of one another
* @param keeper a single PhysicalLocation that is the one being kept as non-duplicate, and thus should never be
* annotated as an optical duplicate. May in some cases be null, or a PhysicalLocation not
* contained within the list!
* @return a boolean[] of the same length as the incoming list marking which reads are optical duplicates
*/
public boolean[] findOpticalDuplicates(final List<? extends PhysicalLocation> list, final PhysicalLocation keeper) {
final int length = list.size();
final boolean[] opticalDuplicateFlags = new boolean[length];
// If there is only one or zero reads passed in (so there are obviously no optical duplicates),
// or if there are too many reads (so we don't want to try to run this expensive n^2 algorithm),
// then just return an array of all false
if (this.readNameRegex == null || length < 2 || length > maxDuplicateSetSize) {
return opticalDuplicateFlags;
}
final PhysicalLocation actualKeeper = keeperOrNull(list, keeper);
final Log log;
final ProgressLogger progressLoggerForKeeper, progressLoggerForRest;
final boolean logProgress = length > bigDuplicateSetSize;
if (logProgress) {
log = Log.getInstance(OpticalDuplicateFinder.class);
progressLoggerForKeeper = new ProgressLogger(log, 10000, "compared", "ReadEnds to keeper");
progressLoggerForRest = new ProgressLogger(log, 1000, "compared", "ReadEnds to others");
log.info("Large duplicate set. size = " + length);
log.debug("About to compare to keeper:" + actualKeeper);
} else {
log = null;
progressLoggerForKeeper = null;
progressLoggerForRest = null;
}
if (length >= (keeper == null ? 3 : 4)) {
return getOpticalDuplicatesFlagWithGraph(list, actualKeeper, opticalDuplicateFlags, log, progressLoggerForKeeper, progressLoggerForRest, logProgress);
} else {
return getOpticalDuplicatesFlagFast(list, actualKeeper, opticalDuplicateFlags, log, progressLoggerForKeeper, progressLoggerForRest, logProgress);
}
}
/**
* Compute optical duplicates quickly in the standard case where we know that there won't be any transitive distances to worry about.
*
* Note, this is guaranteed to be correct when there are at most 2x reads from a readgroup or 3x with the keeper present
*/
private boolean[] getOpticalDuplicatesFlagFast(List<? extends PhysicalLocation> list, PhysicalLocation actualKeeper, boolean[] opticalDuplicateFlags, Log log, ProgressLogger progressLoggerForKeeper, ProgressLogger progressLoggerForRest, boolean logProgress) {
final int length = list.size();
// First go through and compare all the reads to the keeper
if (actualKeeper != null) {
for (int i = 0; i < length; ++i) {
final PhysicalLocation other = list.get(i);
opticalDuplicateFlags[i] = closeEnough(actualKeeper, other, this.opticalDuplicatePixelDistance);
// The main point of adding this log and if statement (also below) is a workaround a bug in the JVM
// which causes a deep exception (https://github.com/broadinstitute/picard/issues/472).
// It seems that this is related to https://bugs.openjdk.java.net/browse/JDK-8033717 which
// was closed due to non-reproducibility. We came across a bam file that evoked this error
// every time we tried to duplicate-mark it. The problem seemed to be a duplicate-set of size 500,000,
// and this loop seemed to kill the JVM for some reason. This logging statement (and the one in the
// loop below) solved the problem.
}
}
if (logProgress) log.debug("Done with comparing to keeper, now the rest.");
// Now go through and do each pairwise comparison not involving the actualKeeper
for (int i = 0; i < length; ++i) {
final PhysicalLocation lhs = list.get(i);
if (lhs == actualKeeper) continue; // no comparisons to actualKeeper since those are all handled above
// logging here for same reason as above
if (logProgress) progressLoggerForRest.record(String.format("%d", lhs.getReadGroup()), lhs.getX());
for (int j = i + 1; j < length; ++j) {
final PhysicalLocation rhs = list.get(j);
if (rhs == actualKeeper) continue; // no comparisons to actualKeeper since those are all handled above
if (opticalDuplicateFlags[i] && opticalDuplicateFlags[j])
continue; // both already marked, no need to check
if (closeEnough(lhs, rhs, this.opticalDuplicatePixelDistance)) {
// At this point we want to mark either lhs or rhs as duplicate. Either could have been marked
// as a duplicate of the keeper (but not both - that's checked above), so be careful about which
// one to now mark as a duplicate.
final int index = opticalDuplicateFlags[j] ? i : j;
opticalDuplicateFlags[index] = true;
}
}
}
return opticalDuplicateFlags;
}
/**
* Compute the optical duplicates correctly in the case where the duplicate group could end up with transitive optical duplicates
*/
private boolean[] getOpticalDuplicatesFlagWithGraph(List<? extends PhysicalLocation> list, PhysicalLocation keeper, boolean[] opticalDuplicateFlags, Log log, ProgressLogger progressLoggerForKeeper, ProgressLogger progressLoggerForRest, boolean logProgress) {
// Make a graph where the edges are reads that lie within the optical duplicate pixel distance from each other,
// we will then use the union-find algorithm to cluster the graph and find optical duplicate groups
final GraphUtils.Graph<Integer> opticalDistanceRelationGraph = new GraphUtils.Graph<>();
if (logProgress) {
log.debug("Building adjacency graph for duplicate group");
}
final Map<Integer, List<Integer>> tileRGmap = new HashMap<>();
int keeperIndex = -1;
for (int i = 0; i < list.size(); i++) {
PhysicalLocation currentLoc = list.get(i);
if (currentLoc == keeper) {
keeperIndex = i;
}
if (currentLoc.hasLocation()) {
final int key = ((int) currentLoc.getReadGroup() << 16) + currentLoc.getTile();
if (tileRGmap.containsKey(key)) {
tileRGmap.get(key).add(i);
} else {
final List<Integer> pLocation = new ArrayList<>();
pLocation.add(i);
tileRGmap.put(key, pLocation);
}
}
opticalDistanceRelationGraph.addNode(i);
}
// Since because finding adjacent optical duplicates is an O(n^2) operation, we can subdivide the input into its
// readgroups in order to reduce the amount of redundant checks across readgroups between reads.
for (List<Integer> tileGroup : tileRGmap.values()) {
if (tileGroup.size() > 1) {
fillGraphFromAGroup(list, tileGroup, logProgress, progressLoggerForKeeper, this.opticalDuplicatePixelDistance, opticalDistanceRelationGraph);
}
}
if (logProgress) {
log.debug("Finished building adjacency graph for duplicate group, moving onto clustering");
}
// Keep a map of the reads and their cluster assignments
final Map<Integer, Integer> opticalDuplicateClusterMap = opticalDistanceRelationGraph.cluster();
final Map<Integer, Integer> clusterToRepresentativeRead = new HashMap<>();
Integer keeperCluster = null;
// Specially mark the keeper as specifically not a duplicate if it exists
if (keeperIndex >= 0) {
clusterToRepresentativeRead.put(opticalDuplicateClusterMap.get(keeperIndex), keeperIndex);
keeperCluster = opticalDuplicateClusterMap.get(keeperIndex);
}
for (final Map.Entry<Integer, Integer> entry : opticalDuplicateClusterMap.entrySet()) {
// logging here for same reason as above
final int recordIndex = entry.getKey();
final int recordAssignedCluster = entry.getValue();
if (logProgress) {
progressLoggerForRest.record(String.format("%d", list.get(recordIndex).getReadGroup()), list.get(recordIndex).getX());
}
// If its not the first read we've seen for this cluster, mark it as an optical duplicate
if (clusterToRepresentativeRead.containsKey(recordAssignedCluster) && recordIndex != keeperIndex) {
final PhysicalLocation representativeLoc = list.get(clusterToRepresentativeRead.get(recordAssignedCluster));
final PhysicalLocation currentRecordLoc = list.get(recordIndex);
// If not in the keeper cluster, then keep the minX -> minY valued duplicate (note the tile must be equal for reads to cluster together)
if (!(keeperIndex >= 0 && recordAssignedCluster == keeperCluster) && // checking we don't accidentally set the keeper as an optical duplicate
(currentRecordLoc.getX() < representativeLoc.getX() || (currentRecordLoc.getX() == representativeLoc.getX() && currentRecordLoc.getY() < representativeLoc.getY()))) {
// Mark the old min as an optical duplicate, and save the new min
opticalDuplicateFlags[clusterToRepresentativeRead.get(recordAssignedCluster)] = true;
clusterToRepresentativeRead.put(recordAssignedCluster, recordIndex);
} else {
// If a smaller read has already been visited, mark the test read as an optical duplicate
opticalDuplicateFlags[recordIndex] = true;
}
} else {
clusterToRepresentativeRead.put(recordAssignedCluster, recordIndex);
}
}
return opticalDuplicateFlags;
}
private void fillGraphFromAGroup(final List<? extends PhysicalLocation> wholeList, final List<Integer> groupList, final boolean logProgress, final ProgressLogger progressLoggerForKeeper, final int distance, final GraphUtils.Graph<Integer> opticalDistanceRelationGraph) {
for (int i = 0; i < groupList.size(); i++) {
final int iIndex = groupList.get(i);
final PhysicalLocation currentLoc = wholeList.get(iIndex);
// The main point of adding this log and if statement (also below) is a workaround a bug in the JVM
// which causes a deep exception (https://github.com/broadinstitute/picard/issues/472).
// It seems that this is related to https://bugs.openjdk.java.net/browse/JDK-8033717 which
// was closed due to non-reproducibility. We came across a bam file that evoked this error
// every time we tried to duplicate-mark it. The problem seemed to be a duplicate-set of size 500,000,
// and this loop seemed to kill the JVM for some reason. This logging statement (and the one in the
// loop below) solved the problem.
if (logProgress) {
progressLoggerForKeeper.record(String.format("%d", currentLoc.getReadGroup()), currentLoc.getX());
}
for (int j = i + 1; j < groupList.size(); j++) {
final int jIndex = groupList.get(j);
final PhysicalLocation other = wholeList.get(jIndex);
if (closeEnoughShort(currentLoc, other, distance)) {
opticalDistanceRelationGraph.addEdge(iIndex, jIndex);
}
}
}
}
/** Returns the keeper if it is contained within the list and has location information, otherwise null. */
private PhysicalLocation keeperOrNull(final List<? extends PhysicalLocation> list, final PhysicalLocation keeper) {
if (keeper != null && keeper.hasLocation()) {
for (final PhysicalLocation loc : list) {
if (loc == keeper) return keeper;
}
}
return null;
}
/** Simple method to test whether two physical locations are close enough to each other to be deemed optical dupes. */
private boolean closeEnough(final PhysicalLocation lhs, final PhysicalLocation rhs, final int distance) {
return lhs != rhs && // no comparing an object to itself (checked using object identity)!
lhs.hasLocation() && rhs.hasLocation() && // no comparing objects without locations
lhs.getReadGroup() == rhs.getReadGroup() && // must be in the same RG to be optical duplicates
lhs.getTile() == rhs.getTile() && // and the same tile
Math.abs(lhs.getX() - rhs.getX()) <= distance &&
Math.abs(lhs.getY() - rhs.getY()) <= distance;
}
private boolean closeEnoughShort(final PhysicalLocation lhs, final PhysicalLocation rhs, final int distance) {
return lhs != rhs &&
Math.abs(lhs.getX() - rhs.getX()) <= distance &&
Math.abs(lhs.getY() - rhs.getY()) <= distance;
}
}
| 7,060 |
369 | // Copyright (c) 2017-2021, Mudita <NAME>.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include <module-audio/tags_fetcher/TagsFetcher.hpp>
#include <utf8/UTF8.hpp>
#include <filesystem>
#include <optional>
#include <vector>
class AbstractSoundsRepository
{
public:
virtual ~AbstractSoundsRepository() = default;
virtual std::optional<std::filesystem::path> titleToPath(const UTF8 &title) const = 0;
virtual std::optional<UTF8> pathToTitle(std::filesystem::path) const = 0;
virtual std::vector<UTF8> getSongTitles() = 0;
};
class SoundsRepository : public AbstractSoundsRepository
{
public:
explicit SoundsRepository(std::filesystem::path dirToScan);
std::optional<std::filesystem::path> titleToPath(const UTF8 &title) const override;
std::optional<UTF8> pathToTitle(std::filesystem::path path) const override;
std::vector<UTF8> getSongTitles() override;
private:
void processEntry(const std::filesystem::recursive_directory_iterator::value_type &entry);
std::vector<tags::fetcher::Tags> samples;
};
| 473 |
3,282 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.compositor.layouts;
import org.chromium.chrome.browser.compositor.layouts.eventfilter.EdgeSwipeEventFilter.ScrollDirection;
import org.chromium.chrome.browser.compositor.layouts.eventfilter.EdgeSwipeHandler;
import org.chromium.chrome.browser.compositor.layouts.eventfilter.EmptyEdgeSwipeHandler;
/**
* A {@link EdgeSwipeHandler} that takes a {@link LayoutProvider} and delegates all swipe events
* to {@link LayoutProvider#getActiveLayout()}.
*/
public class EdgeSwipeHandlerLayoutDelegate extends EmptyEdgeSwipeHandler {
private final LayoutProvider mLayoutProvider;
/**
* Creates an instance of the {@link EdgeSwipeHandlerLayoutDelegate}.
* @param provider A {@link LayoutProvider} instance.
*/
public EdgeSwipeHandlerLayoutDelegate(LayoutProvider provider) {
mLayoutProvider = provider;
}
@Override
public void swipeStarted(ScrollDirection direction, float x, float y) {
if (mLayoutProvider.getActiveLayout() == null) return;
mLayoutProvider.getActiveLayout().swipeStarted(LayoutManager.time(), direction, x, y);
}
@Override
public void swipeUpdated(float x, float y, float dx, float dy, float tx, float ty) {
if (mLayoutProvider.getActiveLayout() == null) return;
mLayoutProvider.getActiveLayout().swipeUpdated(LayoutManager.time(), x, y, dx, dy, tx, ty);
}
@Override
public void swipeFinished() {
if (mLayoutProvider.getActiveLayout() == null) return;
mLayoutProvider.getActiveLayout().swipeFinished(LayoutManager.time());
}
@Override
public void swipeFlingOccurred(float x, float y, float tx, float ty, float vx, float vy) {
if (mLayoutProvider.getActiveLayout() == null) return;
mLayoutProvider.getActiveLayout().swipeFlingOccurred(
LayoutManager.time(), x, y, tx, ty, vx, vy);
}
} | 691 |
3,274 | <filename>src/test/java/com/ql/util/express/test/rating/SubjectMananger.java
package com.ql.util.express.test.rating;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@SuppressWarnings({ "serial", "rawtypes" })
public class SubjectMananger extends HashMap {
@SuppressWarnings("unchecked")
public Object get(Object userName){
UserSubject userSubject = (UserSubject)super.get(userName);
if(userSubject == null){
userSubject = new UserSubject(userName);
super.put(userName, userSubject);
}
return userSubject;
}
public Object put(String userName, Object userSubject){
throw new RuntimeException("不运行的方法");
}
public List<SubjectValue> getSubjectValues(){
List<SubjectValue> result = new ArrayList<SubjectValue>();
for(Object f : this.values()){
UserSubject item = (UserSubject)f;
for(Object t : item.entrySet()){
Map.Entry me = (Map.Entry)t;
SubjectValue value = new SubjectValue();
value.userId = item.getUserId();
value.subjectId = me.getKey();
value.value = ((Number)me.getValue()).doubleValue();
result.add(value);
}
}
return result;
}
}
@SuppressWarnings({ "serial", "rawtypes" })
class UserSubject extends HashMap {
Object userId;
public UserSubject(Object aUserId){
super();
this.userId = aUserId;
}
public Double get(String subjectId){
Double value = (Double)super.get(subjectId);
if(value == null){
return 0d;
}
return value;
}
@SuppressWarnings("unchecked")
public Object put(String subjectId, Object value){
return super.put(subjectId, value);
}
public Object getUserId() {
return userId;
}
}
| 604 |
421 | import sys
sys.path.append("..") # 先跳出当前目录
from bean.word_unit import WordUnit
from bean.sentence_unit import SentenceUnit
from bean.entity_pair import EntityPair
from core.extract_by_dsnf import ExtractByDSNF
class Extractor:
"""抽取生成知识三元组
Attributes:
entities: WordUnit list,句子的实体列表
entity_pairs: EntityPair WordUnit list,句子实体对列表
"""
entities = [] # 存储该句子中的可能实体
entity_pairs = [] # 存储该句子中(满足一定条件)的可能实体对
def extract(self, origin_sentence, sentence, file_path, num):
"""
Args:
origin_sentence: string,原始句子
sentence: SentenceUnit,句子单元
Returns:
num: 知识三元组的数量编号
"""
self.get_entities(sentence)
self.get_entity_pairs(sentence)
for entity_pair in self.entity_pairs:
entity1 = entity_pair.entity1
entity2 = entity_pair.entity2
extract_dsnf = ExtractByDSNF(origin_sentence, sentence, entity1, entity2, file_path, num)
# [DSNF2|DSNF7],部分覆盖[DSNF5|DSNF6]
if extract_dsnf.SBV_VOB(entity1, entity2):
pass
# [DSNF4]
if extract_dsnf.SBV_CMP_POB(entity1, entity2):
pass
if extract_dsnf.SBVorFOB_POB_VOB(entity1, entity2):
pass
# [DSNF1]
# if not extract_dsnf.E_NN_E(entity1, entity2):
# pass
# [DSNF3|DSNF5|DSNF6],并列实体中的主谓宾可能会包含DSNF3
if extract_dsnf.coordinate(entity1, entity2):
pass
# ["的"短语]
if extract_dsnf.entity_de_entity_NNT(entity1, entity2):
pass
num = extract_dsnf.num
return num
def get_entities(self, sentence):
"""获取句子中的所有可能实体
Args:
sentence: SentenceUnit,句子单元
Returns:
None
"""
self.entities.clear() # 清空实体
for word in sentence.words:
if self.is_entity(word):
self.entities.append(word)
def get_entity_pairs(self, sentence):
"""组成实体对,限制实体对之间的实体数量不能超过4
Args:
sentence: SentenceUnit,句子单元
"""
self.entity_pairs.clear() # 清空实体对
length = len(self.entities)
i = 0
while i < length:
j = i + 1
while j < length:
if (self.entities[i].lemma != self.entities[j].lemma and
self.get_entity_num_between(self.entities[i], self.entities[j], sentence) <= 4):
self.entity_pairs.append(EntityPair(self.entities[i], self.entities[j]))
j += 1
i += 1
def is_entity(self, entry):
"""判断词单元是否实体
Args:
entry: WordUnit,词单元
Returns:
*: bool,实体(True),非实体(False)
"""
# 候选实体词性列表
entity_postags = {'nh', 'ni', 'ns', 'nz', 'j'}
if entry.postag in entity_postags:
return True
else:
return False
def get_entity_num_between(self, entity1, entity2, sentence):
"""获得两个实体之间的实体数量
Args:
entity1: WordUnit,实体1
entity2: WordUnit,实体2
Returns:
num: int,两实体间的实体数量
"""
num = 0
i = entity1.ID + 1
while i < entity2.ID:
if self.is_entity(sentence.words[i]):
num += 1
i += 1
return num
| 2,174 |
5,169 | <filename>Specs/f/1/7/CBNativeSwift/0.1.4/CBNativeSwift.podspec.json
{
"name": "CBNativeSwift",
"version": "0.1.4",
"summary": "menu試做",
"description": "CB原生盤勢",
"homepage": "https://gitlab.incrte.com/incrte-pd1/CCAPP/CBNativeSwift",
"license": "MIT",
"authors": {
"NelsonShen": "<EMAIL>"
},
"platforms": {
"ios": "10.0"
},
"swift_versions": "4.2",
"source": {
"git": "https://gitlab.incrte.com/incrte-pd1/CCAPP/CBNativeSwift.git",
"tag": "0.1.4"
},
"source_files": [
"CBNativeSwift",
"CBNativeSwift/**/*.{h,m,swift}"
],
"exclude_files": "Classes/Exclude",
"resource_bundles": {
"com.incrte.CBNativeSwift.betui": [
"CBNativeSwift/Resources/betui/**/*.json"
],
"com.incrte.CBNativeSwift.assets": [
"CBNativeSwift/Resources/Assets.xcassets"
]
},
"dependencies": {
"SnapKit": [
],
"SwiftHEXColors": [
],
"ObjectMapper": [
],
"SwiftyRSA": [
],
"Alamofire": [
"~> 4.9.1"
],
"Device.swift": [
]
},
"swift_version": "4.2"
}
| 550 |
407 | package com.alibaba.tesla.appmanager.domain.req.unit;
import com.alibaba.tesla.appmanager.common.BaseRequest;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import java.io.Serializable;
/**
* 单元查询请求
*
* @author <EMAIL>
*/
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
public class UnitQueryReq extends BaseRequest {
/**
* 单元唯一标识
*/
private String unitId;
/**
* 单元名称
*/
private String unitName;
/**
* 类型
*/
private String category;
}
| 268 |
1,720 | #ifndef _MIPSREFLECTION_H_
#define _MIPSREFLECTION_H_
#include "Types.h"
#include "MIPSArchitecture.h"
class CMIPS;
namespace MIPSReflection
{
struct INSTRUCTION;
struct SUBTABLE
{
uint32 nShift;
uint32 nMask;
INSTRUCTION* pTable;
};
struct INSTRUCTION
{
const char* sMnemonic;
SUBTABLE* pSubTable;
void (*pGetMnemonic)(INSTRUCTION*, CMIPS*, uint32, char*, unsigned int);
void (*pGetOperands)(INSTRUCTION*, CMIPS*, uint32, uint32, char*, unsigned int);
MIPS_BRANCH_TYPE(*pIsBranch)
(INSTRUCTION*, CMIPS*, uint32);
uint32 (*pGetEffectiveAddress)(INSTRUCTION*, CMIPS*, uint32, uint32);
};
INSTRUCTION* DereferenceInstruction(SUBTABLE*, uint32);
void CopyMnemonic(INSTRUCTION*, CMIPS*, uint32, char*, unsigned int);
void SubTableMnemonic(INSTRUCTION*, CMIPS*, uint32, char*, unsigned int);
void SubTableOperands(INSTRUCTION*, CMIPS*, uint32, uint32, char*, unsigned int);
MIPS_BRANCH_TYPE IsBranch(INSTRUCTION*, CMIPS*, uint32);
MIPS_BRANCH_TYPE IsNoDelayBranch(INSTRUCTION*, CMIPS*, uint32);
MIPS_BRANCH_TYPE SubTableIsBranch(INSTRUCTION*, CMIPS*, uint32);
uint32 SubTableEffAddr(INSTRUCTION*, CMIPS*, uint32, uint32);
};
#endif
| 513 |
393 | <filename>rxjava2-pool/src/main/java/org/davidmoten/rx/pool/DecoratingMember.java
package org.davidmoten.rx.pool;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.reactivex.disposables.Disposable;
import io.reactivex.plugins.RxJavaPlugins;
final class DecoratingMember<T> implements Member<T> {
private static final Logger log = LoggerFactory.getLogger(DecoratingMember.class);
private volatile T value;
private final MemberSingle<T> memberSingle;
private final BiFunction<? super T, ? super Checkin, ? extends T> checkinDecorator;
// synchronized by MemberSingle.drain() wip
private Disposable scheduled;
// synchronized by MemberSingle.drain() wip
private boolean releasing;
// synchronized by MemberSingle.drain() wip
private boolean checking;
// synchronized by MemberSingle.drain() wip
// not subject to word tearing, because of ordering in drain loop (will only
// read this value if check has finished)
private long lastCheckTime;
DecoratingMember(T value, BiFunction<? super T, ? super Checkin, ? extends T> checkinDecorator,
MemberSingle<T> memberSingle) {
this.checkinDecorator = checkinDecorator;
this.memberSingle = memberSingle;
this.value = value;
}
@Override
public T value() {
return checkinDecorator.apply(value, this);
}
@Override
public void checkin() {
memberSingle.pool.checkin(this);
}
public void markAsReleasing() {
this.releasing = true;
}
public boolean isReleasing() {
return releasing;
}
public void markAsChecking() {
this.checking = true;
}
public boolean isChecking() {
return checking;
}
@Override
public void disposeValue() {
try {
if (scheduled != null) {
scheduled.dispose();
scheduled = null;
}
log.debug("disposing value {}", value);
memberSingle.pool.disposer.accept(value);
} catch (Throwable e) {
// make action configurable
RxJavaPlugins.onError(e);
} finally {
value = null;
checking = false;
}
}
public void setValueAndClearReleasingFlag(T value) {
this.value = value;
this.releasing = false;
this.lastCheckTime = now();
}
void scheduleRelease() {
if (scheduled != null) {
scheduled.dispose();
log.debug("cancelled scheduled release of {}", this);
}
long maxIdleTimeMs = memberSingle.pool.maxIdleTimeMs;
if (maxIdleTimeMs > 0) {
// TODO make `this` runnable to save lambda allocation
scheduled = memberSingle.pool.scheduler //
.scheduleDirect( //
() -> memberSingle.addToBeReleased(this), //
maxIdleTimeMs, //
TimeUnit.MILLISECONDS);
log.debug("scheduled release in {}ms of {}", maxIdleTimeMs, this);
}
}
@Override
public String toString() {
return "DecoratingMember [value=" + value + "]";
}
public void markAsChecked() {
checking = false;
lastCheckTime = now();
}
private long now() {
return memberSingle.pool.scheduler.now(TimeUnit.MILLISECONDS);
}
public long lastCheckTime() {
return lastCheckTime;
}
}
| 1,507 |
6,443 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.catalina.realm;
import java.security.NoSuchAlgorithmException;
import org.junit.Assert;
import org.junit.Test;
public class TestSecretKeyCredentialHandler {
private static final String[] ALGORITHMS = { "PBKDF2WithHmacSHA1", "PBEWithMD5AndDES" };
private static final String[] PASSWORDS = { "password", "$!&#%!%@$#@*^$%&%%#!!*%$%&#@!^" };
private static final int[] KEYLENGTHS = { 8, 111, 256 };
private static final int[] SALTLENGTHS = { 1, 7, 12, 20 };
private static final int[] ITERATIONS = { 1, 2111, 10000 };
@Test
public void testGeneral() throws Exception {
for (String digest : ALGORITHMS) {
for (String password : PASSWORDS) {
for (int saltLength : SALTLENGTHS) {
for (int iterations : ITERATIONS) {
for (int keyLength : KEYLENGTHS) {
doTest(password, digest, saltLength, iterations, keyLength, true);
}
}
}
}
}
}
@Test
public void testZeroSalt() throws NoSuchAlgorithmException {
doTest(PASSWORDS[0], ALGORITHMS[0], 0, ITERATIONS[0], KEYLENGTHS[0], false);
}
@Test
public void testZeroIterations() throws NoSuchAlgorithmException {
doTest(PASSWORDS[0], ALGORITHMS[0], SALTLENGTHS[0], 0, KEYLENGTHS[0], false);
}
@Test
public void testZeroKeyLength() throws NoSuchAlgorithmException {
doTest(PASSWORDS[0], ALGORITHMS[0], SALTLENGTHS[0], ITERATIONS[0], 0, false);
}
private void doTest(String password, String digest, int saltLength, int iterations,
int keyLength, boolean expectMatch) throws NoSuchAlgorithmException {
SecretKeyCredentialHandler pbech = new SecretKeyCredentialHandler();
SecretKeyCredentialHandler verifier = new SecretKeyCredentialHandler();
pbech.setAlgorithm(digest);
pbech.setIterations(iterations);
pbech.setSaltLength(saltLength);
pbech.setKeyLength(keyLength);
verifier.setAlgorithm(digest);
String storedCredential = pbech.mutate(password);
if (expectMatch) {
Assert.assertTrue(
"[" + digest + "] [" + saltLength + "] [" + iterations + "] [" + keyLength + "] ["
+ password + "] [" + storedCredential + "]",
verifier.matches(password, storedCredential));
} else {
Assert.assertFalse(
"[" + digest + "] [" + saltLength + "] [" + iterations + "] [" + keyLength + "] ["
+ password + "] [" + storedCredential + "]",
verifier.matches(password, storedCredential));
}
}
}
| 1,454 |
778 | package org.aion.p2p.impl.comm;
import java.util.HashSet;
import java.util.Set;
/** @author chris */
public final class Act {
public static final byte DISCONNECT = 0;
public static final byte REQ_HANDSHAKE = 1;
public static final byte RES_HANDSHAKE = 2;
public static final byte PING = 3;
public static final byte PONG = 4;
public static final byte REQ_ACTIVE_NODES = 5;
public static final byte RES_ACTIVE_NODES = 6;
public static final byte UNKNOWN = Byte.MAX_VALUE;
private static Set<Byte> active =
new HashSet<>() {
{
add(REQ_HANDSHAKE);
add(RES_HANDSHAKE);
add(REQ_ACTIVE_NODES);
add(RES_ACTIVE_NODES);
}
};
/**
* @param _act byte
* @return byte method provided to filter any decoded p2p action (byte)
*/
public static byte filter(byte _act) {
return active.contains(_act) ? _act : UNKNOWN;
}
}
| 471 |
484 | <reponame>Shubodh/Kimera-Semantics<filename>kimera_semantics/include/kimera_semantics/semantic_tsdf_integrator_fast.h
// NOTE: Most code is derived from voxblox: github.com/ethz-asl/voxblox
// Copyright (c) 2016, ETHZ ASL
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of voxblox nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/**
* @file semantic_tsdf_integrator.h
* @brief Integrator of semantic and geometric information
* @author <NAME>
*/
#pragma once
#include <Eigen/Core>
#include <voxblox/integrator/tsdf_integrator.h>
#include "kimera_semantics/common.h"
#include "kimera_semantics/semantic_integrator_base.h"
#include "kimera_semantics/semantic_voxel.h"
namespace kimera {
/**
* @class FastSemanticTsdfIntegrator
* An integrator that prioritizes speed over everything else. Rays are cast from
* the pointcloud to the sensor origin. If a ray intersects
* max_consecutive_ray_collisions voxels in a row that have already been updated
* by other rays from the same cloud, it is terminated early. This results in a
* large reduction in the number of freespace updates and greatly improves
* runtime while ensuring all voxels receive at least a minimum number of
* updates. Speed is further enhanced through limiting the number of rays cast
* from each voxel as set by start_voxel_subsampling_factor and use of the
* ApproxHashSet. Up to an order of magnitude faster then the other integrators
* for small voxels.
*/
class FastSemanticTsdfIntegrator : public vxb::TsdfIntegratorBase,
public SemanticIntegratorBase {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
FastSemanticTsdfIntegrator(const Config& config,
const SemanticConfig& semantic_config,
vxb::Layer<vxb::TsdfVoxel>* tsdf_layer,
vxb::Layer<SemanticVoxel>* semantic_layer);
virtual ~FastSemanticTsdfIntegrator() = default;
virtual void integrateSemanticFunction(const vxb::Transformation& T_G_C,
const vxb::Pointcloud& points_C,
const vxb::Colors& colors,
const SemanticLabels& semantic_labels,
const bool freespace_points,
vxb::ThreadSafeIndex* index_getter);
virtual void integratePointCloud(
const vxb::Transformation& T_G_C,
const vxb::Pointcloud& points_C,
const vxb::Colors& colors,
const bool freespace_points = false) override;
private:
// Everything below is basically taken directly from Voxblox fast approach.
/**
* Two approximate sets are used below. The limitations of these sets are
* outlined in approx_hash_array.h, but in brief they are thread safe and very
* fast, but have a small chance of returning false positives and false
* negatives. As rejecting a ray or integrating an uninformative ray are not
* very harmful operations this trade-off works well in this integrator.
*/
/**
* uses 2^20 bytes (8 megabytes) of ram per tester
* A testers false negative rate is inversely proportional to its size
*/
static constexpr size_t masked_bits_ = 20;
/**
* only needs to zero the above 8mb of memory once every 10,000 scans
* (uses an additional 80,000 bytes)
*/
static constexpr size_t full_reset_threshold_ = 10000;
/**
* Voxel start locations are added to this set before ray casting. The ray
* casting only occurs if no ray has been cast from this location for this
* scan.
*/
vxb::ApproxHashSet<masked_bits_,
full_reset_threshold_,
vxb::GlobalIndex,
vxb::LongIndexHash>
start_voxel_approx_set_;
/**
* This set records which voxels a scans rays have passed through. If a ray
* moves through max_consecutive_ray_collisions voxels in a row that have
* already been seen this scan, it is deemed to be adding no new information
* and the casting stops.
*/
vxb::ApproxHashSet<masked_bits_,
full_reset_threshold_,
vxb::GlobalIndex,
vxb::LongIndexHash>
voxel_observed_approx_set_;
/// Used in terminating the integration early if it exceeds a time limit.
std::chrono::time_point<std::chrono::steady_clock> integration_start_time_;
};
} // Namespace kimera
| 2,094 |
2,542 | <filename>src/prod/src/Hosting2/NetworkInventoryAgent.cpp
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include "../Management/NetworkInventoryManager/common/NIMCommon.h"
using namespace std;
using namespace Common;
using namespace Transport;
using namespace ServiceModel;
using namespace Federation;
using namespace Hosting2;
using namespace Reliability;
using namespace Management::NetworkInventoryManager;
StringLiteral const TraceType("NetworkInventoryAgent");
// ********************************************************************************************************************
// NetworkInventoryAgent::OpenAsyncOperation Implementation
//
class NetworkInventoryAgent::OpenAsyncOperation :
public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(OpenAsyncOperation)
public:
OpenAsyncOperation(
NetworkInventoryAgent & owner,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout)
{
}
virtual ~OpenAsyncOperation()
{
}
static ErrorCode End(AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<OpenAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
WriteNoise(TraceType, owner_.Root.TraceId, "Opening NetworkInventoryAgent: Timeout={0}",
timeoutHelper_.GetRemainingTime());
owner_.RegisterRequestHandler();
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::Success));
}
private:
NetworkInventoryAgent & owner_;
TimeoutHelper timeoutHelper_;
};
// ********************************************************************************************************************
// NetworkInventoryAgent::CloseAsyncOperation Implementation
//
class NetworkInventoryAgent::CloseAsyncOperation :
public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(CloseAsyncOperation)
public:
CloseAsyncOperation(
NetworkInventoryAgent & owner,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout),
leaseInvalidatedCount_(0),
activatedHostClosedCount_(0)
{
}
virtual ~CloseAsyncOperation()
{
}
static ErrorCode End(AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<CloseAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
UNREFERENCED_PARAMETER(thisSPtr);
WriteNoise(TraceType, owner_.Root.TraceId, "Closing NetworkInventoryAgent: Timeout={0}",
timeoutHelper_.GetRemainingTime());
owner_.UnregisterRequestHandler();
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::Success));
}
private:
NetworkInventoryAgent & owner_;
TimeoutHelper timeoutHelper_;
atomic_uint64 leaseInvalidatedCount_;
atomic_uint64 activatedHostClosedCount_;
};
// Send a TRequest message to NIM service and get the TReply.
template <class TRequest, class TReply>
class NetworkInventoryAgent::NISRequestAsyncOperation : public AsyncOperation
{
public:
NISRequestAsyncOperation(
__in NetworkInventoryAgent & owner,
TRequest const & requestMessageBody,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout),
requestMessageBody_(requestMessageBody)
{
}
static ErrorCode End(AsyncOperationSPtr const & operation, __out TReply & reply)
{
auto thisPtr = AsyncOperation::End<NISRequestAsyncOperation>(operation);
reply = move(thisPtr->reply_);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
WriteNoise(TraceType, owner_.Root.TraceId, "NetworkInventoryAgent: Sending message to NIM service. {0}",
timeoutHelper_.GetRemainingTime());
MessageUPtr request = CreateMessageRequest();
auto operation = owner_.reliability_.FederationWrapperBase.BeginRequestToFM(move(request),
timeoutHelper_.GetRemainingTime(),
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { OnRequestCompleted(operation, false); },
thisSPtr);
OnRequestCompleted(operation, true);
}
void OnRequestCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
MessageUPtr message;
ErrorCode error(ErrorCodeValue::Success);
error = owner_.reliability_.FederationWrapperBase.EndRequestToFM(operation, message);
if (!error.IsSuccess())
{
WriteWarning(TraceType, owner_.Root.TraceId, "End(NISRequestAsyncOperation): ErrorCode={0}", error);
TryComplete(operation->Parent, error);
return;
}
if (!message->GetBody(reply_))
{
error = ErrorCodeValue::InvalidMessage;
WriteWarning(TraceType, owner_.Root.TraceId, "GetBody<TReply> failed: Message={0}, ErrorCode={1}",
*message, error);
TryComplete(operation->Parent, error);
return;
}
TryComplete(operation->Parent, error);
}
private:
MessageUPtr CreateMessageRequest()
{
MessageUPtr message = make_unique<Transport::Message>(requestMessageBody_);
message->Headers.Add(ActorHeader(Actor::NetworkInventoryService));
message->Headers.Add(ActionHeader(TRequest::ActionName));
WriteInfo(TraceType, owner_.Root.TraceId, "NetworkInventoryAgent:CreateMessageRequest: Message={0}, Body={1}",
*message, requestMessageBody_);
return move(message);
}
TimeoutHelper timeoutHelper_;
NetworkInventoryAgent & owner_;
TRequest const & requestMessageBody_;
TReply reply_;
};
template <class TRequest, class TReply, class TMessageBody>
class NetworkInventoryAgent::NISRequestAsyncOperationApi : public AsyncOperation
{
public:
NISRequestAsyncOperationApi(
__in NetworkInventoryAgent & owner,
TMessageBody const & nimMessage,
TRequest const & requestMessageBody,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout),
requestMessageBody_(requestMessageBody),
nimMessage_(nimMessage)
{
}
static ErrorCode End(AsyncOperationSPtr const & operation,
__out TReply & reply)
{
auto thisPtr = AsyncOperation::End<NISRequestAsyncOperationApi>(operation);
reply = move(thisPtr->reply_);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
WriteNoise(TraceType, owner_.Root.TraceId, "NetworkInventoryAgent: Sending message to NIM service. {0}",
timeoutHelper_.GetRemainingTime());
MessageUPtr request = CreateMessageRequest();
auto operation = owner_.reliability_.FederationWrapperBase.BeginRequestToFM(move(request),
timeoutHelper_.GetRemainingTime(),
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { OnRequestCompleted(operation, false); },
thisSPtr);
OnRequestCompleted(operation, true);
}
void OnRequestCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
MessageUPtr message;
ErrorCode error(ErrorCodeValue::Success);
error = owner_.reliability_.FederationWrapperBase.EndRequestToFM(operation, message);
if (!error.IsSuccess())
{
WriteWarning(TraceType, owner_.Root.TraceId, "End(NISRequestAsyncOperationApi): ErrorCode={0}", error);
TryComplete(operation->Parent, error);
return;
}
if (!message->GetBody(reply_))
{
error = ErrorCodeValue::InvalidMessage;
WriteWarning(TraceType, owner_.Root.TraceId, "GetBody<TReply> failed: Message={0}, ErrorCode={1}",
*message, error);
TryComplete(operation->Parent, error);
return;
}
TryComplete(operation->Parent, error);
}
private:
MessageUPtr CreateMessageRequest()
{
return nimMessage_.CreateMessage<TRequest>(requestMessageBody_);
}
TimeoutHelper timeoutHelper_;
NetworkInventoryAgent & owner_;
TRequest const & requestMessageBody_;
TReply reply_;
TMessageBody nimMessage_;
};
// ********************************************************************************************************************
// NetworkInventoryAgent Implementation
//
NetworkInventoryAgent::NetworkInventoryAgent(
Common::ComponentRoot const & root,
__in Federation::FederationSubsystem & federation,
__in IReliabilitySubsystem & reliability,
__in IHostingSubsystemSPtr hosting) :
RootedObject(root),
federation_(federation),
reliability_(reliability),
hosting_(hosting)
{
}
NetworkInventoryAgent::~NetworkInventoryAgent()
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent.destructor");
}
AsyncOperationSPtr NetworkInventoryAgent::OnBeginOpen(
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<OpenAsyncOperation>(
*this,
timeout,
callback,
parent);
}
ErrorCode NetworkInventoryAgent::OnEndOpen(AsyncOperationSPtr const & operation)
{
return OpenAsyncOperation::End(operation);
}
AsyncOperationSPtr NetworkInventoryAgent::OnBeginClose(
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<CloseAsyncOperation>(
*this,
timeout,
callback,
parent);
}
ErrorCode NetworkInventoryAgent::OnEndClose(AsyncOperationSPtr const & operation)
{
return CloseAsyncOperation::End(operation);
}
void NetworkInventoryAgent::OnAbort()
{
WriteInfo(TraceType, Root.TraceId, "Aborting NetworkInventoryAgent");
UnregisterRequestHandler();
}
void NetworkInventoryAgent::RegisterRequestHandler()
{
// Register the one way and request-reply message handlers
federation_.RegisterMessageHandler(
Actor::NetworkInventoryAgent,
[this] (MessageUPtr & message, OneWayReceiverContextUPtr & oneWayReceiverContext)
{
WriteError(TraceType, "{0} received a oneway message for NetworkInventoryAgent: {1}",
Root.TraceId,
*message);
oneWayReceiverContext->Reject(ErrorCodeValue::InvalidMessage);
},
[this] (Transport::MessageUPtr & message, RequestReceiverContextUPtr & requestReceiverContext)
{
this->ProcessMessage(message, requestReceiverContext);
},
false /*dispatchOnTransportThread*/);
WriteInfo(TraceType, Root.TraceId, "NetworkInventoryAgent::RegisterRequestHandler: registered handler for Actor::NetworkInventoryAgent: {0}",
Actor::NetworkInventoryAgent);
}
void NetworkInventoryAgent::UnregisterRequestHandler()
{
federation_.UnRegisterMessageHandler(Actor::NetworkInventoryAgent);
WriteInfo(TraceType, Root.TraceId, "NetworkInventoryAgent::UnregisterRequestHandler: unregistered handler for Actor::NetworkInventoryAgent");
}
void NetworkInventoryAgent::ProcessMessage(
__in Transport::MessageUPtr & message,
__in Federation::RequestReceiverContextUPtr & context)
{
wstring const & action = message->Action;
if (action == PublishNetworkTablesRequestMessage::ActionName)
{
this->ProcessNetworkMappingTables(message, context);
}
else
{
WriteWarning(TraceType, Root.TraceId, "Dropping unsupported message: {0}", message);
}
}
#pragma region Message Processing
// ********************************************************************************************************************
// NetworkInventoryAgent Message Processing
//
void NetworkInventoryAgent::ProcessNetworkMappingTables(
__in Transport::MessageUPtr & message,
__in Federation::RequestReceiverContextUPtr & context)
{
PublishNetworkTablesRequestMessage requestBody;
if (!message->GetBody<PublishNetworkTablesRequestMessage>(requestBody))
{
auto error = ErrorCode::FromNtStatus(message->Status);
WriteWarning(TraceType, Root.TraceId, "GetBody<PublishNetworkTablesRequestMessage> failed: Message={0}, ErrorCode={1}",
message,
error);
return;
}
WriteNoise(TraceType, Root.TraceId, "Processing ProcessNetworkMappingTables: network={0} {1}, id={2}",
requestBody.NetworkName,
requestBody.InstanceID,
requestBody.SequenceNumber);
auto operation = hosting_->FabricActivatorClientObj->BeginUpdateRoutes(
requestBody,
HostingConfig::GetConfig().RequestTimeout,
[this, ctx = context.release()](AsyncOperationSPtr const & operation)
{
auto error = hosting_->FabricActivatorClientObj->EndUpdateRoutes(operation);
WriteTrace(
error.ToLogLevel(),
TraceType,
Root.TraceId,
"End(ProcessNetworkMappingTablesCompleted): ErrorCode={0}",
error);
NetworkErrorCodeResponseMessage inReply(0, error);
Transport::MessageUPtr replyMsg = Common::make_unique<Transport::Message>(inReply);
ctx->Reply(move(replyMsg));
},
Root.CreateAsyncOperationRoot());
}
AsyncOperationSPtr NetworkInventoryAgent::BeginSendAllocationRequestMessage(
NetworkAllocationRequestMessage const & params,
TimeSpan const timeout,
AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendAllocationRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperation<NetworkAllocationRequestMessage, NetworkAllocationResponseMessage> >(
*this, params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
ErrorCode NetworkInventoryAgent::EndSendAllocationRequestMessage(AsyncOperationSPtr const & operation,
__out NetworkAllocationResponseMessage & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperation<NetworkAllocationRequestMessage, NetworkAllocationResponseMessage>::End(
operation,
reply);
}
AsyncOperationSPtr NetworkInventoryAgent::BeginSendDeallocationRequestMessage(
NetworkRemoveRequestMessage const & params,
TimeSpan const timeout,
AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendDeallocationRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperation<NetworkRemoveRequestMessage, NetworkErrorCodeResponseMessage> >(
*this, params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
Common::ErrorCode NetworkInventoryAgent::EndSendDeallocationRequestMessage(
Common::AsyncOperationSPtr const & operation,
__out NetworkErrorCodeResponseMessage & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperation<NetworkRemoveRequestMessage, NetworkErrorCodeResponseMessage>::End(
operation,
reply);
}
Common::AsyncOperationSPtr NetworkInventoryAgent::BeginSendPublishNetworkTablesRequestMessage(
PublishNetworkTablesMessageRequest const & params,
Common::TimeSpan const timeout,
Common::AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendPublishNetworkTablesRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperation<PublishNetworkTablesMessageRequest, NetworkErrorCodeResponseMessage> >(
*this, params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
Common::ErrorCode NetworkInventoryAgent::EndSendPublishNetworkTablesRequestMessage(
Common::AsyncOperationSPtr const & operation,
__out NetworkErrorCodeResponseMessage & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperation<PublishNetworkTablesMessageRequest, NetworkErrorCodeResponseMessage>::End(
operation,
reply);
}
AsyncOperationSPtr NetworkInventoryAgent::BeginSendCreateRequestMessage(
CreateNetworkMessageBody const & params,
TimeSpan const timeout,
AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendCreateRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperationApi<CreateNetworkMessageBody, BasicFailoverReplyMessageBody, NIMMessage> >(
*this, NIMMessage::GetCreateNetwork(),
params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
ErrorCode NetworkInventoryAgent::EndSendCreateRequestMessage(AsyncOperationSPtr const & operation,
__out BasicFailoverReplyMessageBody & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperationApi<CreateNetworkMessageBody, BasicFailoverReplyMessageBody, NIMMessage>::End(
operation,
reply);
}
AsyncOperationSPtr NetworkInventoryAgent::BeginSendRemoveRequestMessage(
NetworkRemoveRequestMessage const & params,
TimeSpan const timeout,
AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendRemoveRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperation<NetworkRemoveRequestMessage, NetworkErrorCodeResponseMessage> >(
*this, params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
ErrorCode NetworkInventoryAgent::EndSendRemoveRequestMessage(AsyncOperationSPtr const & operation,
__out NetworkErrorCodeResponseMessage & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperation<NetworkRemoveRequestMessage, NetworkErrorCodeResponseMessage>::End(
operation,
reply);
}
AsyncOperationSPtr NetworkInventoryAgent::BeginSendEnumerateRequestMessage(
NetworkEnumerateRequestMessage const & params,
TimeSpan const timeout,
AsyncCallback const & callback)
{
WriteNoise(TraceType, Root.TraceId, "NetworkInventoryAgent::BeginSendEnumerateRequestMessage: {0}", params);
return AsyncOperation::CreateAndStart<NetworkInventoryAgent::NISRequestAsyncOperation<NetworkEnumerateRequestMessage, NetworkEnumerateResponseMessage> >(
*this, params, timeout, callback,
Root.CreateAsyncOperationRoot());
}
ErrorCode NetworkInventoryAgent::EndSendEnumerateRequestMessage(AsyncOperationSPtr const & operation,
__out NetworkEnumerateResponseMessage & reply)
{
return NetworkInventoryAgent::NISRequestAsyncOperation<NetworkEnumerateRequestMessage, NetworkEnumerateResponseMessage>::End(
operation,
reply);
}
#pragma endregion
| 6,809 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.