max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
703 |
/*
* This source file is part of RmlUi, the HTML/CSS Interface Middleware
*
* For the latest information, see http://github.com/mikke89/RmlUi
*
* Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd
* Copyright (c) 2019 The RmlUi Team, and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "LayoutInlineBoxText.h"
#include "LayoutEngine.h"
#include "LayoutLineBox.h"
#include "../../Include/RmlUi/Core/Core.h"
#include "../../Include/RmlUi/Core/ElementText.h"
#include "../../Include/RmlUi/Core/ElementUtilities.h"
#include "../../Include/RmlUi/Core/FontEngineInterface.h"
#include "../../Include/RmlUi/Core/Log.h"
#include "../../Include/RmlUi/Core/Property.h"
#include "../../Include/RmlUi/Core/Profiling.h"
namespace Rml {
LayoutInlineBoxText::LayoutInlineBoxText(ElementText* element, int _line_begin) : LayoutInlineBox(static_cast<Element*>(element), Box())
{
line_begin = _line_begin;
// Build the box to represent the dimensions of the first word.
BuildWordBox();
}
LayoutInlineBoxText::~LayoutInlineBoxText()
{
}
// Returns true if this box is capable of overflowing, or if it must be rendered on a single line.
bool LayoutInlineBoxText::CanOverflow() const
{
return line_segmented;
}
// Flows the inline box's content into its parent line.
UniquePtr<LayoutInlineBox> LayoutInlineBoxText::FlowContent(bool first_box, float available_width, float right_spacing_width)
{
ElementText* text_element = GetTextElement();
RMLUI_ASSERT(text_element != nullptr);
int line_length;
float line_width;
bool overflow = !text_element->GenerateLine(line_contents, line_length, line_width, line_begin, available_width, right_spacing_width, first_box, true);
Vector2f content_area;
content_area.x = line_width;
content_area.y = box.GetSize().y;
box.SetContent(content_area);
// Call the base-class's FlowContent() to increment the width of our parent's box.
LayoutInlineBox::FlowContent(first_box, available_width, right_spacing_width);
if (overflow)
return MakeUnique<LayoutInlineBoxText>(GetTextElement(), line_begin + line_length);
return nullptr;
}
// Computes and sets the vertical position of this element, relative to its parent inline box (or block box, for an un-nested inline box).
void LayoutInlineBoxText::CalculateBaseline(float& ascender, float& descender)
{
ascender = height - baseline;
descender = height - ascender;
}
// Offsets the baseline of this box, and all of its children, by the ascender of the parent line box.
void LayoutInlineBoxText::OffsetBaseline(float ascender)
{
// Offset by the ascender.
position.y += (ascender - (height - baseline));
// Calculate the leading (the difference between font height and line height).
float leading = 0;
FontFaceHandle font_face_handle = element->GetFontFaceHandle();
if (font_face_handle != 0)
leading = height - GetFontEngineInterface()->GetLineHeight(font_face_handle);
// Offset by the half-leading.
position.y += leading * 0.5f;
}
// Positions the inline box's element.
void LayoutInlineBoxText::PositionElement()
{
if (line_begin == 0)
{
LayoutInlineBox::PositionElement();
GetTextElement()->ClearLines();
GetTextElement()->AddLine(Vector2f(0, 0), line_contents);
}
else
{
GetTextElement()->AddLine(line->GetRelativePosition() + position - element->GetRelativeOffset(Box::BORDER), line_contents);
}
}
// Sizes the inline box's element.
void LayoutInlineBoxText::SizeElement(bool RMLUI_UNUSED_PARAMETER(split))
{
RMLUI_UNUSED(split);
}
void* LayoutInlineBoxText::operator new(size_t size)
{
return LayoutEngine::AllocateLayoutChunk(size);
}
void LayoutInlineBoxText::operator delete(void* chunk, size_t size)
{
LayoutEngine::DeallocateLayoutChunk(chunk, size);
}
// Returns the box's element as a text element.
ElementText* LayoutInlineBoxText::GetTextElement()
{
RMLUI_ASSERT(rmlui_dynamic_cast<ElementText*>(element));
return static_cast< ElementText* >(element);
}
// Builds a box for the first word of the element.
void LayoutInlineBoxText::BuildWordBox()
{
RMLUI_ZoneScoped;
ElementText* text_element = GetTextElement();
RMLUI_ASSERT(text_element != nullptr);
FontFaceHandle font_face_handle = text_element->GetFontFaceHandle();
if (font_face_handle == 0)
{
height = 0;
baseline = 0;
Log::Message(Log::LT_WARNING, "No font face defined on element %s. Please specify a font-family in your RCSS, otherwise make sure Context::Update is run after new elements are constructed, before Context::Render.", text_element->GetAddress().c_str());
return;
}
Vector2f content_area;
line_segmented = !text_element->GenerateToken(content_area.x, line_begin);
content_area.y = text_element->GetLineHeight();
box.SetContent(content_area);
}
} // namespace Rml
| 1,822 |
529 |
<gh_stars>100-1000
from enum import Enum
from typing import Any, Dict, List, Tuple, Union
SQL = str
SQLWithParams = Tuple[str, Union[Tuple[Any, ...], Dict[str, Any]]]
class StrEnum(str, Enum):
@classmethod
def all(cls) -> List["PostgresPartitioningMethod"]:
return [choice for choice in cls]
@classmethod
def values(cls) -> List[str]:
return [choice.value for choice in cls]
def __str__(self) -> str:
return str(self.value)
class ConflictAction(Enum):
"""Possible actions to take on a conflict."""
NOTHING = "NOTHING"
UPDATE = "UPDATE"
@classmethod
def all(cls) -> List["ConflictAction"]:
return [choice for choice in cls]
class PostgresPartitioningMethod(StrEnum):
"""Methods of partitioning supported by PostgreSQL 11.x native support for
table partitioning."""
RANGE = "range"
LIST = "list"
| 339 |
7,451 |
from functools import partial
from importlib import import_module
def import_string(dotted_path, dotted_attributes=None):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. When a dotted attribute path is also provided, the
dotted attribute path would be applied to the attribute/class retrieved from
the first step, and return the corresponding value designated by the
attribute path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError:
raise ImportError("%s doesn't look like a module path" % dotted_path)
module = import_module(module_path)
try:
result = getattr(module, class_name)
except AttributeError:
raise ImportError(
'Module "%s" does not define a "%s" attribute/class'
% (module_path, class_name)
)
if not dotted_attributes:
return result
else:
attributes = dotted_attributes.split(".")
traveled_attributes = []
try:
for attribute in attributes:
traveled_attributes.append(attribute)
result = getattr(result, attribute)
return result
except AttributeError:
raise ImportError(
'Module "%s" does not define a "%s" attribute inside attribute/class "%s"'
% (module_path, ".".join(traveled_attributes), class_name)
)
def lazy_import(dotted_path, dotted_attributes=None):
return partial(import_string, dotted_path, dotted_attributes)
| 621 |
317 |
#include <iostream>
#include <fstream>
#include <iterator>
#include <opencv2/opencv.hpp>
#include <json/json.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "NavGraph.hpp"
namespace mattersim {
NavGraph::Location::Location(const Json::Value& viewpoint, const std::string& skyboxDir,
bool preload, bool depth): skyboxDir(skyboxDir), im_loaded(false),
includeDepth(depth), cubemap_texture(0), depth_texture(0) {
viewpointId = viewpoint["image_id"].asString();
included = viewpoint["included"].asBool();
float posearr[16];
int i = 0;
for (auto f : viewpoint["pose"]) {
posearr[i++] = f.asFloat();
}
// glm uses column-major order. Inputs are in row-major order.
rot = glm::transpose(glm::make_mat4(posearr));
// glm access is col,row
pos = glm::vec3{rot[3][0], rot[3][1], rot[3][2]};
rot[3] = {0,0,0,1}; // remove translation component
for (auto u : viewpoint["unobstructed"]) {
unobstructed.push_back(u.asBool());
}
if (preload) {
// Preload skybox images
loadCubemapImages();
}
};
void NavGraph::Location::loadCubemapImages() {
cv::Mat rgb = cv::imread(skyboxDir + viewpointId + "_skybox_small.jpg");
int w = rgb.cols/6;
int h = rgb.rows;
xpos = rgb(cv::Rect(2*w, 0, w, h));
xneg = rgb(cv::Rect(4*w, 0, w, h));
ypos = rgb(cv::Rect(0*w, 0, w, h));
yneg = rgb(cv::Rect(5*w, 0, w, h));
zpos = rgb(cv::Rect(1*w, 0, w, h));
zneg = rgb(cv::Rect(3*w, 0, w, h));
if (xpos.empty() || xneg.empty() || ypos.empty() || yneg.empty() || zpos.empty() || zneg.empty()) {
throw std::invalid_argument( "MatterSim: Could not open skybox RGB files at: " + skyboxDir + viewpointId + "_skybox_small.jpg");
}
if (includeDepth) {
// 16 bit grayscale images
cv::Mat depth = cv::imread(skyboxDir + viewpointId + "_skybox_depth_small.png", CV_LOAD_IMAGE_ANYDEPTH);
w = depth.cols/6;
h = depth.rows;
xposD = depth(cv::Rect(2*w, 0, w, h));
xnegD = depth(cv::Rect(4*w, 0, w, h));
yposD = depth(cv::Rect(0*w, 0, w, h));
ynegD = depth(cv::Rect(5*w, 0, w, h));
zposD = depth(cv::Rect(1*w, 0, w, h));
znegD = depth(cv::Rect(3*w, 0, w, h));
if (xposD.empty() || xnegD.empty() || yposD.empty() || ynegD.empty() || zposD.empty() || znegD.empty()) {
throw std::invalid_argument( "MatterSim: Could not open skybox depth files at: " + skyboxDir + viewpointId + "_skybox_depth_small.png");
}
}
im_loaded = true;
}
void NavGraph::Location::loadCubemapTextures() {
// RGB texture
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_CUBE_MAP);
glGenTextures(1, &cubemap_texture);
glBindTexture(GL_TEXTURE_CUBE_MAP, cubemap_texture);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
//use fast 4-byte alignment (default anyway) if possible
glPixelStorei(GL_UNPACK_ALIGNMENT, (xneg.step & 3) ? 1 : 4);
//set length of one complete row in data (doesn't need to equal image.cols)
glPixelStorei(GL_UNPACK_ROW_LENGTH, xneg.step/xneg.elemSize());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGB, xpos.rows, xpos.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, xpos.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGB, xneg.rows, xneg.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, xneg.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGB, ypos.rows, ypos.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, ypos.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGB, yneg.rows, yneg.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, yneg.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGB, zpos.rows, zpos.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, zpos.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGB, zneg.rows, zneg.cols, 0, GL_BGR, GL_UNSIGNED_BYTE, zneg.ptr());
assertOpenGLError("RGB texture");
if (includeDepth) {
// Depth Texture
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_CUBE_MAP);
glGenTextures(1, &depth_texture);
glBindTexture(GL_TEXTURE_CUBE_MAP, depth_texture);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
//use fast 4-byte alignment (default anyway) if possible
glPixelStorei(GL_UNPACK_ALIGNMENT, (xnegD.step & 3) ? 1 : 4);
//set length of one complete row in data (doesn't need to equal image.cols)
glPixelStorei(GL_UNPACK_ROW_LENGTH, xnegD.step/xnegD.elemSize());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RED, xposD.rows, xposD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, xposD.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RED, xnegD.rows, xnegD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, xnegD.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RED, yposD.rows, yposD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, yposD.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RED, ynegD.rows, ynegD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, ynegD.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RED, zposD.rows, zposD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, zposD.ptr());
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RED, znegD.rows, znegD.cols, 0, GL_RED, GL_UNSIGNED_SHORT, znegD.ptr());
assertOpenGLError("Depth texture");
}
}
void NavGraph::Location::deleteCubemapTextures() {
// no need to check existence, silently ignores errors
glDeleteTextures(1, &cubemap_texture);
glDeleteTextures(1, &depth_texture);
cubemap_texture = 0;
depth_texture = 0;
}
std::pair<GLuint, GLuint> NavGraph::Location::cubemapTextures() {
if (glIsTexture(cubemap_texture)){
return {cubemap_texture, depth_texture};
}
if (!im_loaded) {
loadCubemapImages();
}
loadCubemapTextures();
return {cubemap_texture, depth_texture};
}
NavGraph::NavGraph(const std::string& navGraphPath, const std::string& datasetPath,
bool preloadImages, bool renderDepth, int randomSeed, unsigned int cacheSize) : cache(cacheSize) {
generator.seed(randomSeed);
auto textFile = navGraphPath + "/scans.txt";
std::ifstream scansFile(textFile);
if (scansFile.fail()){
throw std::invalid_argument( "MatterSim: Could not open list of scans at: " +
textFile + ", is path valid?" );
}
std::vector<std::string> scanIds;
std::copy(std::istream_iterator<std::string>(scansFile),
std::istream_iterator<std::string>(),
std::back_inserter(scanIds));
#pragma omp parallel for
for (unsigned int i=0; i<scanIds.size(); i++) {
std::string scanId = scanIds.at(i);
Json::Value root;
auto navGraphFile = navGraphPath + "/" + scanId + "_connectivity.json";
std::ifstream ifs(navGraphFile, std::ifstream::in);
if (ifs.fail()){
throw std::invalid_argument( "MatterSim: Could not open navigation graph file: " +
navGraphFile + ", is path valid?" );
}
ifs >> root;
auto skyboxDir = datasetPath + "/" + scanId + "/matterport_skybox_images/";
#pragma omp critical
{
scanLocations.insert(std::pair<std::string,
std::vector<LocationPtr> > (scanId, std::vector<LocationPtr>()));
}
for (auto viewpoint : root) {
Location l(viewpoint, skyboxDir, preloadImages, renderDepth);
#pragma omp critical
{
scanLocations[scanId].push_back(std::make_shared<Location>(l));
}
}
}
}
NavGraph::~NavGraph() {
// free all remaining textures
for (auto scan : scanLocations) {
for (auto loc : scan.second) {
loc->deleteCubemapTextures();
}
}
}
NavGraph& NavGraph::getInstance(const std::string& navGraphPath, const std::string& datasetPath,
bool preloadImages, bool renderDepth, int randomSeed, unsigned int cacheSize){
// magic static
static NavGraph instance(navGraphPath, datasetPath, preloadImages, renderDepth, randomSeed, cacheSize);
return instance;
}
const std::string& NavGraph::randomViewpoint(const std::string& scanId) {
std::uniform_int_distribution<int> distribution(0,scanLocations.at(scanId).size()-1);
int start_ix = distribution(generator); // generates random starting index
int ix = start_ix;
while (!scanLocations.at(scanId).at(ix)->included) { // Don't start at an excluded viewpoint
ix++;
if (ix >= scanLocations.at(scanId).size()) ix = 0;
if (ix == start_ix) {
throw std::logic_error( "MatterSim: ScanId: " + scanId + " has no included viewpoints!");
}
}
return scanLocations.at(scanId).at(ix)->viewpointId;
}
unsigned int NavGraph::index(const std::string& scanId, const std::string& viewpointId) const {
int ix = -1;
for (int i = 0; i < scanLocations.at(scanId).size(); ++i) {
if (scanLocations.at(scanId).at(i)->viewpointId == viewpointId) {
if (!scanLocations.at(scanId).at(i)->included) {
throw std::invalid_argument( "MatterSim: ViewpointId: " +
viewpointId + ", is excluded from the connectivity graph." );
}
ix = i;
break;
}
}
if (ix < 0) {
throw std::invalid_argument( "MatterSim: Could not find viewpointId: " +
viewpointId + ", is viewpoint id valid?" );
} else {
return ix;
}
}
const std::string& NavGraph::viewpoint(const std::string& scanId, unsigned int ix) const {
return scanLocations.at(scanId).at(ix)->viewpointId;
}
const glm::mat4& NavGraph::cameraRotation(const std::string& scanId, unsigned int ix) const {
return scanLocations.at(scanId).at(ix)->rot;
}
const glm::vec3& NavGraph::cameraPosition(const std::string& scanId, unsigned int ix) const {
return scanLocations.at(scanId).at(ix)->pos;
}
std::vector<unsigned int> NavGraph::adjacentViewpointIndices(const std::string& scanId, unsigned int ix) const {
std::vector<unsigned int> reachable;
for (unsigned int i = 0; i < scanLocations.at(scanId).size(); ++i) {
if (i == ix) {
// Skip option to stay at the same viewpoint
continue;
}
if (scanLocations.at(scanId).at(ix)->unobstructed[i] && scanLocations.at(scanId).at(i)->included) {
reachable.push_back(i);
}
}
return reachable;
}
std::pair<GLuint, GLuint> NavGraph::cubemapTextures(const std::string& scanId, unsigned int ix) {
LocationPtr loc = scanLocations.at(scanId).at(ix);
std::pair<GLuint, GLuint> textures = loc->cubemapTextures();
cache.add(loc);
return textures;
}
void NavGraph::deleteCubemapTextures(const std::string& scanId, unsigned int ix) {
scanLocations.at(scanId).at(ix)->deleteCubemapTextures();
}
}
| 5,184 |
4,205 |
<reponame>EMellau/reactor-core<filename>reactor-core/src/test/java/reactor/test/MockUtils.java
/*
* Copyright (c) 2016-2021 VMware Inc. or its affiliates, All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactor.test;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneId;
import reactor.core.Fuseable;
import reactor.core.Scannable;
import reactor.core.publisher.ConnectableFlux;
/**
* Test utilities that helps with mocking.
*
* @author <NAME>
*/
public class MockUtils {
/**
* An abstract class that can be used to mock a {@link Scannable} {@link ConnectableFlux}.
*/
public static abstract class TestScannableConnectableFlux<T>
extends ConnectableFlux<T>
implements Scannable { }
/**
* An interface that can be used to mock a {@link Scannable}
* {@link reactor.core.Fuseable.ConditionalSubscriber}.
*/
public interface TestScannableConditionalSubscriber<T>
extends Fuseable.ConditionalSubscriber<T>,
Scannable { }
/**
* A {@link Clock} that can be manipulated, to be used in tests.
*/
public static final class VirtualClock extends Clock {
private Instant instant;
private final ZoneId zone;
public VirtualClock(Instant initialInstant, ZoneId zone) {
this.instant = initialInstant;
this.zone = zone;
}
public VirtualClock() {
this(Instant.EPOCH, ZoneId.systemDefault());
}
public void setInstant(Instant newFixedInstant) {
this.instant = newFixedInstant;
}
public void advanceTimeBy(Duration duration) {
this.instant = this.instant.plus(duration);
}
@Override
public ZoneId getZone() {
return zone;
}
@Override
public Clock withZone(ZoneId zone) {
if (zone.equals(this.zone)) { // intentional NPE
return this;
}
return new VirtualClock(instant, zone);
}
@Override
public long millis() {
return instant.toEpochMilli();
}
@Override
public Instant instant() {
return instant;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof VirtualClock) {
VirtualClock other = (VirtualClock) obj;
return instant.equals(other.instant) && zone.equals(other.zone);
}
return false;
}
@Override
public int hashCode() {
return instant.hashCode() ^ zone.hashCode();
}
@Override
public String toString() {
return "VirtualClock[" + instant + "," + zone + "]";
}
}
}
| 993 |
694 |
<filename>app/src/main/java/fuzion24/device/vulnerability/vulnerabilities/system/SamsungCREDzip.java
package fuzion24.device.vulnerability.vulnerabilities.system;
import android.content.Context;
import android.content.pm.PackageManager;
import android.content.res.AssetManager;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.Thread;
import java.util.ArrayList;
import java.util.List;
import android.os.Build;
import fuzion24.device.vulnerability.util.CPUArch;
import fuzion24.device.vulnerability.vulnerabilities.VulnerabilityTest;
public class SamsungCREDzip implements VulnerabilityTest {
private final static int BUFFER_SIZE = 1024;
private final static String DESTINATION = "/sdcard/Download/";
private final static String FILENAME = "cred.zip";
private final static String ASSETNAME = "Samsung_cred.zip";
@Override
public List<CPUArch> getSupportedArchitectures() {
ArrayList<CPUArch> archs = new ArrayList<>();
archs.add(CPUArch.ALL);
return archs;
}
@Override
public String getCVEorID() {
return "CVE-2015-7888";
}
private boolean thisHasSDCardPermission(Context ctx)
{
String readPermission = "android.permission.READ_EXTERNAL_STORAGE";
String writePermission = "android.permission.WRITE_EXTERNAL_STORAGE";
return (ctx.checkCallingOrSelfPermission(readPermission) == PackageManager.PERMISSION_GRANTED &&
ctx.checkCallingOrSelfPermission(writePermission) == PackageManager.PERMISSION_GRANTED);
}
private boolean isSamsungPhone(){
return Build.MANUFACTURER.equals("samsung");
}
@Override
public boolean isVulnerable(Context context) throws Exception {
boolean isVuln = false;
if(!isSamsungPhone()) return false;
if(!thisHasSDCardPermission(context))
throw new Exception("No SDCard permission assigned to app to perform Samsung cred.zip remote code execution test");
InputStream in = null;
OutputStream out = null;
try{
AssetManager assetFiles = context.getAssets();
File outFile = new File(DESTINATION, FILENAME);
in = assetFiles.open(ASSETNAME);
out = new FileOutputStream(outFile);
byte[] buffer = new byte[BUFFER_SIZE];
int read;
while((read = in.read(buffer)) != -1){
out.write(buffer, 0, read);
}
Thread.sleep(3000);
outFile = null;
outFile = new File(DESTINATION, FILENAME);
if(outFile.exists()){
isVuln = false;
outFile.delete();
}else{
isVuln = true;
}
}catch(IOException e){
throw new Exception("Error when extracting the asset file: " + e);
}finally{
if (in != null)
in.close();
if (out != null)
out.close();
}
return isVuln;
}
}
| 1,361 |
4,303 |
import bisect
import warnings
from torch._utils import _accumulate
from torch import randperm
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self, other):
return ConcatDataset([self, other])
class TensorDataset(Dataset):
"""Dataset wrapping data and target tensors.
Each sample will be retrieved by indexing both tensors along the first
dimension.
Arguments:
data_tensor (Tensor): contains sample data.
target_tensor (Tensor): contains sample targets (labels).
"""
def __init__(self, data_tensor, target_tensor):
assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
def __getitem__(self, index):
return self.data_tensor[index], self.target_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
class ConcatDataset(Dataset):
"""
Dataset to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Arguments:
datasets (iterable): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
class Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths):
"""
Randomly split a dataset into non-overlapping new datasets of given lengths
ds
Arguments:
dataset (Dataset): Dataset to be split
lengths (iterable): lengths of splits to be produced
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = randperm(sum(lengths))
return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]
| 1,388 |
839 |
<reponame>AnEmortalKid/cxf
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.systest.outofband.header;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
import java.net.URL;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.JAXBException;
import javax.xml.namespace.QName;
import javax.xml.ws.BindingProvider;
import javax.xml.ws.Holder;
import javax.xml.ws.soap.SOAPFaultException;
import org.w3c.dom.Node;
import org.apache.cxf.binding.soap.SoapHeader;
import org.apache.cxf.headers.Header;
import org.apache.cxf.jaxb.JAXBDataBinding;
import org.apache.cxf.outofband.header.ObjectFactory;
import org.apache.cxf.outofband.header.OutofBandHeader;
import org.apache.cxf.testutil.common.AbstractBusClientServerTestBase;
import org.apache.hello_world_doc_lit_bare.PutLastTradedPricePortType;
import org.apache.hello_world_doc_lit_bare.SOAPService;
import org.apache.hello_world_doc_lit_bare.types.TradePriceData;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class OOBHeaderTest extends AbstractBusClientServerTestBase {
public static final String PORT = Server.PORT;
public static final String CONFIG_FILE = "org/apache/cxf/systest/outofband/header/cxf.xml";
public static final String TEST_HDR_NS = "http://cxf.apache.org/outofband/Header";
public static final String TEST_HDR_REQUEST_ELEM = "outofbandHeader";
public static final String TEST_HDR_RESPONSE_ELEM = "outofbandHeader";
private final QName serviceName = new QName("http://apache.org/hello_world_doc_lit_bare",
"SOAPService");
private final QName portName = new QName("http://apache.org/hello_world_doc_lit_bare", "SoapPort");
@BeforeClass
public static void startServers() throws Exception {
System.setProperty("org.apache.cxf.bus.factory", "org.apache.cxf.bus.CXFBusFactory");
System.setProperty("cxf.config.file", "org/apache/cxf/systest/outofband/header/cxf.xml");
createStaticBus(CONFIG_FILE);
assertTrue("server did not launch correctly", launchServer(Server.class, true));
}
private void addOutOfBoundHeader(PutLastTradedPricePortType portType, boolean invalid, boolean mu) {
InvocationHandler handler = Proxy.getInvocationHandler(portType);
BindingProvider bp = null;
try {
if (handler instanceof BindingProvider) {
bp = (BindingProvider)handler;
Map<String, Object> requestContext = bp.getRequestContext();
OutofBandHeader ob = new OutofBandHeader();
ob.setName("testOobHeader");
ob.setValue("testOobHeaderValue");
ob.setHdrAttribute(invalid ? "dontProcess" : "testHdrAttribute");
SoapHeader hdr = new SoapHeader(
new QName(TEST_HDR_NS, TEST_HDR_REQUEST_ELEM),
ob,
new JAXBDataBinding(ob.getClass()));
hdr.setMustUnderstand(mu);
List<Header> holder = new ArrayList<>();
holder.add(hdr);
//Add List of headerHolders to requestContext.
requestContext.put(Header.HEADER_LIST, holder);
}
} catch (JAXBException ex) {
//System.out.println("failed to insert header into request context :" + ex);
}
}
private void checkReturnedOOBHeader(PutLastTradedPricePortType portType) {
InvocationHandler handler = Proxy.getInvocationHandler(portType);
BindingProvider bp = null;
if (handler instanceof BindingProvider) {
bp = (BindingProvider)handler;
Map<String, Object> responseContext = bp.getResponseContext();
OutofBandHeader hdrToTest = null;
List<?> oobHdr = (List<?>) responseContext.get(Header.HEADER_LIST);
if (oobHdr == null) {
fail("Should have got List of out-of-band headers ..");
}
assertTrue("HeaderHolder list expected to conain 1 object received " + oobHdr.size(),
oobHdr.size() == 1);
if (oobHdr != null) {
Iterator<?> iter = oobHdr.iterator();
while (iter.hasNext()) {
Object hdr = iter.next();
if (hdr instanceof Header) {
Header hdr1 = (Header) hdr;
if (hdr1.getObject() instanceof Node) {
//System.out.println("Node conains : " + hdr1.getObject().toString());
try {
JAXBElement<?> job
= (JAXBElement<?>)JAXBContext.newInstance(ObjectFactory.class)
.createUnmarshaller()
.unmarshal((Node) hdr1.getObject());
hdrToTest = (OutofBandHeader) job.getValue();
// System.out.println("oob-hdr contains : \nname = "
// + hdrToTest.getName()
// + " \nvalue = " + hdrToTest.getValue()
// + " \natribute = " + hdrToTest.getHdrAttribute());
} catch (JAXBException ex) {
//
ex.printStackTrace();
}
}
}
}
}
assertNotNull("out-of-band header should not be null", hdrToTest);
assertTrue("Expected out-of-band Header name testOobReturnHeaderName recevied :"
+ hdrToTest.getName(),
"testOobReturnHeaderName".equals(hdrToTest.getName()));
assertTrue("Expected out-of-band Header value testOobReturnHeaderValue recevied :"
+ hdrToTest.getValue(),
"testOobReturnHeaderValue".equals(hdrToTest.getValue()));
assertTrue("Expected out-of-band Header attribute testReturnHdrAttribute recevied :"
+ hdrToTest.getHdrAttribute(),
"testReturnHdrAttribute".equals(hdrToTest.getHdrAttribute()));
}
}
@Test
public void testBasicConnection() throws Exception {
URL wsdl = getClass().getResource("/wsdl/doc_lit_bare.wsdl");
assertNotNull("WSDL is null", wsdl);
SOAPService service = new SOAPService(wsdl, serviceName);
assertNotNull("Service is null", service);
PutLastTradedPricePortType putLastTradedPrice = service.getPort(portName,
PutLastTradedPricePortType.class);
updateAddressPort(putLastTradedPrice, PORT);
TradePriceData priceData = new TradePriceData();
priceData.setTickerPrice(1.0f);
priceData.setTickerSymbol("CELTIX");
assertFalse(check(0, putLastTradedPrice, false, true, priceData));
assertFalse(check(1, putLastTradedPrice, false, true, priceData));
assertTrue(check(2, putLastTradedPrice, false, true, priceData));
assertTrue(check(3, putLastTradedPrice, false, true, priceData));
assertFalse(check(0, putLastTradedPrice, true, true, priceData));
assertFalse(check(1, putLastTradedPrice, true, true, priceData));
assertFalse(check(2, putLastTradedPrice, true, true, priceData));
assertFalse(check(3, putLastTradedPrice, true, true, priceData));
assertTrue(check(0, putLastTradedPrice, false, false, priceData));
assertTrue(check(1, putLastTradedPrice, false, false, priceData));
assertTrue(check(2, putLastTradedPrice, false, false, priceData));
assertTrue(check(4, putLastTradedPrice, false, false, priceData));
assertTrue(check(0, putLastTradedPrice, true, false, priceData));
assertTrue(check(1, putLastTradedPrice, true, false, priceData));
assertTrue(check(2, putLastTradedPrice, true, false, priceData));
assertTrue(check(4, putLastTradedPrice, true, false, priceData));
}
private boolean check(int i, PutLastTradedPricePortType putLastTradedPrice,
boolean invalid, boolean mu,
TradePriceData priceData) {
String address = "";
switch (i) {
case 0:
address = "http://localhost:" + PORT + "/SOAPDocLitBareService/SoapPort";
break;
case 1:
address = "http://localhost:" + PORT + "/SOAPDocLitBareService/SoapPortNoHeader";
break;
case 2:
address = "http://localhost:" + PORT + "/SOAPDocLitBareService/SoapPortHeader";
break;
default:
address = "http://localhost:" + PORT
+ "/SOAPDocLitBareService/SoapPortHeaderProperty";
}
((BindingProvider)putLastTradedPrice).getRequestContext()
.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, address);
Holder<TradePriceData> holder = new Holder<>(priceData);
try {
addOutOfBoundHeader(putLastTradedPrice, invalid, mu);
putLastTradedPrice.sayHi(holder);
checkReturnedOOBHeader(putLastTradedPrice);
return true;
} catch (SOAPFaultException ex) {
if (ex.getMessage().contains("MustUnderstand")) {
return false;
}
throw ex;
}
}
}
| 4,734 |
721 |
@ParametersAreNonnullByDefault // Not the right one, but eclipse knows only 3 null annotations anyway, so it's ok
package crazypants.enderio.zoo.config;
import javax.annotation.ParametersAreNonnullByDefault;
| 58 |
5,567 |
#ifndef NEU_EVENT_H
#define NEU_EVENT_H
#include <string>
#include "lib/json/json.hpp"
using namespace std;
using json = nlohmann::json;
namespace events {
void dispatch(string event, json data);
} // namespace events
#endif // #define NEU_EVENT_H
| 97 |
5,169 |
{
"name": "Bullitt",
"version": "0.0.1",
"summary": "An iOS framework for interacting with the vBulletin API.",
"description": "Bullitt is a simple framework that provides access to the vBulletin mobile API.\n\nSo far its feature set is limited:\n* It can load the list of forums.\n* It can load threads for a forum.\n* It can load posts for a thread.\n\nOver time, it will be able to do more.",
"homepage": "https://github.com/haugli/Bullitt",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": "<NAME>",
"social_media_url": "http://twitter.com/haugli",
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/haugli/Bullitt.git",
"tag": "0.0.1"
},
"source_files": "Bullitt/*.swift",
"dependencies": {
"Alamofire": [
"~> 1.3"
],
"CryptoSwift": [
"~> 0.0.13"
],
"SwiftyJSON": [
"~> 2.2.1"
]
}
}
| 390 |
348 |
{"nom":"Foncegrive","circ":"4ème circonscription","dpt":"Côte-d'Or","inscrits":124,"abs":69,"votants":55,"blancs":3,"nuls":7,"exp":45,"res":[{"nuance":"LR","nom":"<NAME>","voix":23},{"nuance":"REM","nom":"Mme <NAME>","voix":22}]}
| 96 |
577 |
<reponame>ender8282/jython
'''
Checks that files are closed in three situations:
1. Garbage collection/finalization close
2. Regular close
3. Shutdown time, close out open PyFiles
'''
import os
import support
from java.io import File
from java.lang import System, Thread
def check(fn='test.txt'):
f = File(fn)
if not f.exists():
raise support.TestError('"%s" should exist' % fn)
if not f.length():
raise support.TestError('"%s" should have contents' % fn)
os.remove(fn)
open("garbagecollected", "w").write("test")
#Wait up to 2 seconds for garbage collected to disappear
System.gc()
for i in range(10):
if not os.path.exists('garbagecollected'):
break
Thread.sleep(200)
check("garbagecollected")
f = open("normalclose", "w")
f.write("test")
f.close()
check("normalclose")
#test397m writes to "shutdown" and exits
support.runJython('test397m.py')
check('shutdown')
| 333 |
3,246 |
package org.datatransferproject.types.common;
public class PortabilityCommon {
/** The protocol used for authorization. */
public enum AuthProtocol {
UNKNOWN,
OAUTH_1,
OAUTH_2,
CUSTOM
}
}
| 79 |
326 |
// <COMPONENT>: os-apis
// <FILE-TYPE>: component public header
/*!
* @defgroup OS_APIS_SIGNALS Signals
* @brief Contains signals related os apis
*/
#ifndef OS_APIS_SIGNALS_H
#define OS_APIS_SIGNALS_H
#include "os-apis.h"
#include "signals-core.h"
/*!
* Retrun the Max number of signals possible
*/
int OS_MaxSignalNumber();
/*!
* This enum is used only for the OS_PinSigactionToKernelSigaction function
* you cannot use these values directly in other OS-APIs functions or system calls
*/
typedef enum
{
SIGACTION_SIGINFO,
SIGACTION_NODEFER,
SIGACTION_RESETHAND,
SIGACTION_ONSTACK,
SIGACTION_NOCLDSTOP,
SIGACTION_RESTART,
SIGACTION_RESTORER
}SigactionDefines ;
/*
* This function is used to convert the defines into the right
* value according to the OS (bit map value)
*/
UINT64 OS_PinSigactionToKernelSigaction(SigactionDefines input);
#ifdef TARGET_MAC
typedef void(*OS_SIGTRAP_PTR)(void *, unsigned int, int, void *, void *);
void OS_SigReturn(void *uctx, int infostyle);
#endif
/*! @ingroup OS_APIS_SIGNALS
* Specifies an action to OS_SigAction()
*/
struct SIGACTION {
union {
void (*_sa_handler)(int); //! Signal handle function (old way)
void (*_sa_sigaction)(int, void *, void *); //! Signal handle function (new way)
void *_sa_handler_ptr; //! Convenience void* pointer to the signal handler.
} _u;
SIGSET_T sa_mask; //! Mask of signals to block during the handling of the signal
unsigned long sa_flags; //! Additional flags (OS specific).
void (*sa_restorer)(void); //! Signal restorer.
};
#ifdef TARGET_MAC
struct SIGACTION_WITH_TRAMP {
struct SIGACTION act;
void(*sa_tramp)(void *, unsigned int, int, void *, void *);
};
#endif
#ifdef TARGET_MAC
/**
* Same as OS_SigAction but the specified act contains a trampoline (meaning don't use the default Pin trampoline
* but the the one that is exists in act)
*/
OS_RETURN_CODE OS_SigActionWithTrampoline(INT signum, const struct SIGACTION_WITH_TRAMP *actWithTramp, struct SIGACTION *oldact);
#endif
/*! @ingroup OS_APIS_SIGNALS
* Change the action taken by a process on receipt of a specific signal.
* This function is compatible with POSIX sigaction().
*
* @param[in] signum The signal to alter its behavior.
* @param[in] act The action to be taken upon signal reception.
* @param[in] oldact The previous action that was taken upon signal reception.
*
* @return Operation status code.
*
* @par Availability:
* @b O/S: Linux & OS X*\n
* @b CPU: All\n
*/
OS_RETURN_CODE OS_SigAction(INT signum, const struct SIGACTION *act, struct SIGACTION *oldact);
/*! @ingroup OS_APIS_SIGNALS
* Temporarily replaces the signal mask of the calling process with the mask given
* by mask and then suspends the process until delivery of a signal whose action is to
* invoke a signal handler or to terminate a process.
*
* @param[in] mask The mask to use for the signals
*
* @return Operation status code.
*
* @par Availability:
* @b O/S: Linux & OS X*\n
* @b CPU: All\n
*/
OS_RETURN_CODE OS_SigSuspend(const SIGSET_T *mask);
/*! @ingroup OS_APIS_SIGNALS
* Returns the set of signals that are pending for delivery to the calling thread
* (i.e., the signals which have been raised while blocked). The mask of
* pending signals is returned in set.
*
* @param[out] set Where to store the signal set.
*
* @return Operation status code.
*
* @par Availability:
* @b O/S: Linux & OS X*\n
* @b CPU: All\n
*/
OS_RETURN_CODE OS_SigPending(const SIGSET_T *set);
/*! @ingroup OS_APIS_SIGNALS
* Send signal to a particular thread inside a process.
*
* @param[in] pid The process ID where the thread is running.if it's INVALID_NATIVE_PID then we
ignore the pid.
* @param[in] tid The thread ID to send the signal to.
* @param[in] signal The signal to send.
*
* @return Operation status code.
*
* @par Availability:
* @b O/S: Linux & OS X*\n
* @b CPU: All\n
*/
OS_RETURN_CODE OS_SendSignalToThread(NATIVE_PID pid, NATIVE_TID tid, UINT32 signal);
#endif // OS_APIS_SIGNALS_H
| 1,612 |
1,780 |
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.samples.nacos.starter;
import io.seata.samples.nacos.service.BusinessService;
import org.springframework.context.support.ClassPathXmlApplicationContext;
/**
* The type Dubbo business tester.
*/
public class DubboBusinessTester {
/**
* The entry point of application.
*
* @param args the input arguments
*/
public static void main(String[] args) {
/**
* 4. The whole e-commerce platform is ready , The buyer(U100001) create an order on the sku(C00321) , the
* count is 2
*/
ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(
new String[] {"spring/dubbo-business.xml"});
final BusinessService business = (BusinessService)context.getBean("business");
business.purchase("U100001", "C00321", 2);
}
}
| 485 |
347 |
<filename>backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/GetAllVmsFilteredAndSortedQuery.java<gh_stars>100-1000
package org.ovirt.engine.core.bll;
import java.util.List;
import org.ovirt.engine.core.bll.context.EngineContext;
import org.ovirt.engine.core.common.businessentities.VM;
import org.ovirt.engine.core.common.queries.GetFilteredAndSortedParameters;
public class GetAllVmsFilteredAndSortedQuery<P extends GetFilteredAndSortedParameters> extends GetAllVmsQueryBase<P> {
public GetAllVmsFilteredAndSortedQuery(P parameters, EngineContext engineContext) {
super(parameters, engineContext);
}
@Override
protected List<VM> getVMs() {
//get max results and page # from parameters
int maxResults = getParameters().getMaxResults();
int pageNum = getParameters().getPageNum();
//translate them to offset and limit (for the database)
int offset = (pageNum - 1) * maxResults;
int limit = offset + maxResults;
return vmDao.getAllSortedAndFiltered(getUserID(), offset, limit);
}
}
| 390 |
1,861 |
<filename>cpp/spectrum/core/proc/ScalingScanlineProcessingBlock.cpp
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "ScalingScanlineProcessingBlock.h"
#include <spectrum/core/SpectrumEnforce.h>
#include <spectrum/core/proc/ScanlineProcessingBlock.h>
#include <spectrum/core/proc/legacy/SeparableFiltersResampler.h>
#include <spectrum/core/proc/legacy/Sharpener.h>
#include <spectrum/image/Scanline.h>
#include <cmath>
#include <memory>
#include <vector>
namespace facebook {
namespace spectrum {
namespace core {
namespace proc {
//
// ScalingBlockImpl
//
class ScalingBlockImpl : public ScanlineProcessingBlock {
protected:
const image::pixel::Specification _pixelSpecification;
const image::Size inputSize;
const image::Size outputSize;
const float scalingX;
const float scalingY;
const float invScalingX;
const float invScalingY;
std::vector<std::unique_ptr<image::Scanline>> input = {};
std::size_t nextLineToRelease = 0;
std::size_t outputScanline = 0;
public:
ScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize);
~ScalingBlockImpl() override = default;
void consume(std::unique_ptr<image::Scanline> scanline) override;
};
ScalingBlockImpl::ScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize)
: _pixelSpecification(pixelSpecification),
inputSize(inputSize),
outputSize(outputSize),
scalingX((float)outputSize.width / (float)inputSize.width),
scalingY((float)outputSize.height / (float)inputSize.height),
invScalingX(1.0f / scalingX),
invScalingY(1.0f / scalingY) {
SPECTRUM_ENFORCE_IF_NOT(scalingX > 0);
SPECTRUM_ENFORCE_IF_NOT(scalingY > 0);
}
void ScalingBlockImpl::consume(std::unique_ptr<image::Scanline> scanline) {
SPECTRUM_ENFORCE_IF_NOT(scanline->specification() == _pixelSpecification);
input.push_back(std::move(scanline));
}
//
// Magic kernel
//
class MagicKernelScalingBlockImpl : public ScalingBlockImpl {
private:
// for magic kernel only
std::vector<std::unique_ptr<image::Scanline>> magicOutput = {};
void runMagicKernel();
std::unique_ptr<image::Scanline> produceMagicKernel();
public:
MagicKernelScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize);
~MagicKernelScalingBlockImpl() override = default;
std::unique_ptr<image::Scanline> produce() override;
};
MagicKernelScalingBlockImpl::MagicKernelScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize)
: ScalingBlockImpl(pixelSpecification, inputSize, outputSize) {}
void MagicKernelScalingBlockImpl::runMagicKernel() {
const auto numComponents = _pixelSpecification.bytesPerPixel;
const auto stride = outputSize.width * numComponents;
std::unique_ptr<std::uint8_t[]> lineBuffer(new std::uint8_t[stride]);
legacy::SeparableFiltersResampler magicResampler(
inputSize.width,
inputSize.height,
outputSize.width,
outputSize.height,
numComponents);
legacy::Sharpener magicSharpener(
outputSize.width, outputSize.height, numComponents, lineBuffer.get());
// run
const std::size_t inputSize = input.size();
while (nextLineToRelease < inputSize) {
// input -> resampler
SPECTRUM_ENFORCE_IF_NOT(input[nextLineToRelease]);
std::uint8_t* buffer =
reinterpret_cast<std::uint8_t*>(input[nextLineToRelease]->data());
SPECTRUM_ENFORCE_IF_NOT(buffer);
magicResampler.putLine(buffer);
// resampler -> sharpener
// elements of `pResampledRow` are Q21.11 fixed-point numbers
while (const int32_t* pResampledRow = magicResampler.getLine()) {
magicSharpener.putLine(pResampledRow);
// sharpener -> output
while (magicSharpener.getLine(lineBuffer.get())) {
auto scanline = std::make_unique<image::Scanline>(
_pixelSpecification, outputSize.width);
SPECTRUM_ENFORCE_IF_NOT(stride == scanline->sizeBytes());
SPECTRUM_ENFORCE_IF_NOT(scanline && scanline->data());
SPECTRUM_ENFORCE_IF_NOT(lineBuffer.get());
memcpy(scanline->data(), lineBuffer.get(), stride);
magicOutput.push_back(std::move(scanline));
}
}
// free processed input
input[nextLineToRelease].reset();
nextLineToRelease++;
}
}
std::unique_ptr<image::Scanline> MagicKernelScalingBlockImpl::produce() {
if (input.size() == inputSize.height) {
// TODO T21712884: stream scanlines instead of processing of all at once
runMagicKernel();
input.clear();
}
if (outputScanline < magicOutput.size()) {
return std::move(magicOutput[outputScanline++]);
} else {
return nullptr;
}
}
//
// Bicubic kernel
//
namespace {
static inline float clamp(const float f, const float min, const float max) {
return f < min ? min : (f > max ? max : f);
}
static inline float bicubicCompute(
const float deltaX,
const float p0,
const float p1,
const float p2,
const float p3) {
// compute cubic hermite interpolation using the horner method
const float a = p1;
const float b = p2 - p0;
const float c = 2.0f * p0 - 5.0f * p1 + 4.0f * p2 - p3;
const float d = 3.0f * (p1 - p2) + p3 - p0;
return a + 0.5f * deltaX * (b + deltaX * (c + deltaX * d));
}
static std::vector<std::uint8_t> bicubicComputeAndClamp(
const std::uint8_t numberOfComponents,
const float deltaX,
const std::uint8_t* pix0,
const std::uint8_t* pix1,
const std::uint8_t* pix2,
const std::uint8_t* pix3) {
auto result = std::vector<std::uint8_t>(numberOfComponents);
for (std::uint8_t componentOffset = 0; componentOffset < numberOfComponents;
componentOffset++) {
const auto resultValue = bicubicCompute(
deltaX,
static_cast<float>(pix0[componentOffset]),
static_cast<float>(pix1[componentOffset]),
static_cast<float>(pix2[componentOffset]),
static_cast<float>(pix3[componentOffset]));
result[componentOffset] = clamp(resultValue, 0, 255);
}
return result;
}
} // namespace
class BicubicScalingBlockImpl : public ScalingBlockImpl {
public:
BicubicScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize);
~BicubicScalingBlockImpl() override = default;
std::unique_ptr<image::Scanline> produce() override;
};
BicubicScalingBlockImpl::BicubicScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize)
: ScalingBlockImpl(pixelSpecification, inputSize, outputSize) {}
std::unique_ptr<image::Scanline> BicubicScalingBlockImpl::produce() {
if (outputScanline == outputSize.height) {
return nullptr;
}
auto result =
std::make_unique<image::Scanline>(_pixelSpecification, outputSize.width);
const float middleY = 0.5f * invScalingY *
static_cast<float>(outputScanline + outputScanline + 1);
// shift from pixel center to logical index
const float logicalMiddleY = clamp(middleY - 0.5f, 0.0f, inputSize.height);
const int y1 = static_cast<int>(floor(logicalMiddleY));
const int y2 = y1 < inputSize.height - 1 ? y1 + 1 : y1;
SPECTRUM_ENFORCE_IF_NOT(y1 >= 0 && y1 <= y2 && y2 < inputSize.height);
const float deltaY = (y1 == y2) ? 0 : (logicalMiddleY - y1) / (y2 - y1);
SPECTRUM_ENFORCE_IF_NOT(deltaY >= 0.0f && deltaY <= 1.0f);
const int y0 = y1 == 0 ? 0 : y1 - 1;
const int y3 = y2 < inputSize.height - 1 ? y2 + 1 : y2;
SPECTRUM_ENFORCE_IF_NOT(y0 >= 0 && y0 <= y3 && y3 < inputSize.height);
if (static_cast<size_t>(y3) >= input.size()) {
return nullptr;
}
for (int xOffset = 0; xOffset < outputSize.width; xOffset++) {
const float middleX =
0.5f * invScalingX * static_cast<float>(xOffset + xOffset + 1);
// shift from pixel center to logical index
const float logicalMiddleX = clamp(middleX - 0.5f, 0.0f, inputSize.width);
const int x1 = static_cast<int>(floor(logicalMiddleX));
const int x2 = x1 < inputSize.width - 1 ? x1 + 1 : x1;
SPECTRUM_ENFORCE_IF_NOT(x1 >= 0 && x1 <= x2 && x2 < inputSize.width);
const float deltaX = (x1 == x2) ? 0 : (logicalMiddleX - x1) / (x2 - x1);
SPECTRUM_ENFORCE_IF_NOT(deltaX >= 0.0f && deltaX <= 1.0f);
const int x0 = x1 == 0 ? 0 : x1 - 1;
const int x3 = x2 < inputSize.width - 1 ? x2 + 1 : x2;
SPECTRUM_ENFORCE_IF_NOT(x0 >= 0 && x0 <= x3 && x3 < inputSize.width);
const auto numberOfComponents = _pixelSpecification.numberOfComponents();
const auto bicubicSum0 = bicubicComputeAndClamp(
numberOfComponents,
deltaX,
(*input[y0]).dataAtPixel(x0),
(*input[y0]).dataAtPixel(x1),
(*input[y0]).dataAtPixel(x2),
(*input[y0]).dataAtPixel(x3));
const auto bicubicSum1 = bicubicComputeAndClamp(
numberOfComponents,
deltaX,
(*input[y1]).dataAtPixel(x0),
(*input[y1]).dataAtPixel(x1),
(*input[y1]).dataAtPixel(x2),
(*input[y1]).dataAtPixel(x3));
const auto bicubicSum2 = bicubicComputeAndClamp(
numberOfComponents,
deltaX,
(*input[y2]).dataAtPixel(x0),
(*input[y2]).dataAtPixel(x1),
(*input[y2]).dataAtPixel(x2),
(*input[y2]).dataAtPixel(x3));
const auto bicubicSum3 = bicubicComputeAndClamp(
numberOfComponents,
deltaX,
(*input[y3]).dataAtPixel(x0),
(*input[y3]).dataAtPixel(x1),
(*input[y3]).dataAtPixel(x2),
(*input[y3]).dataAtPixel(x3));
const auto pxl = bicubicComputeAndClamp(
numberOfComponents,
deltaY,
bicubicSum0.data(),
bicubicSum1.data(),
bicubicSum2.data(),
bicubicSum3.data());
auto dst = result->dataAtPixel(xOffset);
for (std::uint8_t componentOffset = 0; componentOffset < numberOfComponents;
componentOffset++) {
*(dst + componentOffset) = pxl[componentOffset];
}
}
// free scanlines that will not be touched again
for (int i = nextLineToRelease; i < y0; i++) {
SPECTRUM_ENFORCE_IF(input[i] == nullptr);
input[i].reset();
}
nextLineToRelease = y0;
++outputScanline;
return result;
}
//
// No-operation
//
class NoOpScalingBlockImpl : public ScalingBlockImpl {
private:
public:
NoOpScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize);
~NoOpScalingBlockImpl() override = default;
std::unique_ptr<image::Scanline> produce() override;
};
NoOpScalingBlockImpl::NoOpScalingBlockImpl(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize)
: ScalingBlockImpl(pixelSpecification, inputSize, outputSize) {
SPECTRUM_ENFORCE_IF_NOT(inputSize == outputSize);
}
std::unique_ptr<image::Scanline> NoOpScalingBlockImpl::produce() {
if (outputScanline == outputSize.height) {
return nullptr;
}
return std::move(input[outputScanline++]);
}
//
// ScalingScanlineProcessingBlock
//
ScalingScanlineProcessingBlock::ScalingScanlineProcessingBlock(
const image::pixel::Specification& pixelSpecification,
const image::Size& inputSize,
const image::Size& outputSize,
const Configuration::General::SamplingMethod samplingMethod)
: _pixelSpecification(pixelSpecification) {
if (inputSize == outputSize) {
delegate = std::make_unique<NoOpScalingBlockImpl>(
pixelSpecification, inputSize, outputSize);
} else {
switch (samplingMethod) {
case Configuration::General::SamplingMethod::MagicKernel:
delegate = std::make_unique<MagicKernelScalingBlockImpl>(
pixelSpecification, inputSize, outputSize);
break;
case Configuration::General::SamplingMethod::Bicubic:
delegate = std::make_unique<BicubicScalingBlockImpl>(
pixelSpecification, inputSize, outputSize);
break;
default:
SPECTRUM_UNREACHABLE_CONFIGURATION_SAMPLING_METHOD(samplingMethod);
}
}
};
// required as `ScalingBlockImpl` is forward declared
ScalingScanlineProcessingBlock::~ScalingScanlineProcessingBlock(){};
void ScalingScanlineProcessingBlock::consume(
std::unique_ptr<image::Scanline> scanline) {
SPECTRUM_ENFORCE_IF_NOT(scanline->specification() == _pixelSpecification);
delegate->consume(std::move(scanline));
}
std::unique_ptr<image::Scanline> ScalingScanlineProcessingBlock::produce() {
return delegate->produce();
}
} // namespace proc
} // namespace core
} // namespace spectrum
} // namespace facebook
| 4,928 |
584 |
/*
Copyright 2015 Adobe
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
/**************************************************************************************************/
#ifndef STLAB_MODEL_HPP
#define STLAB_MODEL_HPP
/**************************************************************************************************/
#include <atomic>
#include <condition_variable>
#include <iostream>
#include <mutex>
/**************************************************************************************************/
namespace stlab {
/**************************************************************************************************/
inline namespace v1 {
/**************************************************************************************************/
struct annotate_counters {
std::atomic_size_t _dtor{0};
std::atomic_size_t _copy_ctor{0};
std::atomic_size_t _move_ctor{0};
std::atomic_size_t _copy_assign_lhs{0};
std::atomic_size_t _copy_assign_rhs{0};
std::atomic_size_t _move_assign_lhs{0};
std::atomic_size_t _move_assign_rhs{0};
std::atomic_size_t _swap{0};
std::atomic_size_t _equality{0};
std::mutex _mutex;
std::condition_variable _condition;
std::size_t remaining() const { return _copy_ctor + _move_ctor - _dtor + 1; }
void wait(std::size_t count) {
std::unique_lock<std::mutex> lock(_mutex);
while (count != remaining())
_condition.wait(lock);
}
friend inline std::ostream& operator<<(std::ostream& out, const annotate_counters& x) {
out << " dtor: " << x._dtor << "\n";
out << " copy_ctor: " << x._copy_ctor << "\n";
out << " move_ctor: " << x._move_ctor << "\n";
out << "copy_assign_lhs: " << x._copy_assign_lhs << "\n";
out << "copy_assign_rhs: " << x._copy_assign_rhs << "\n";
out << "move_assign_lhs: " << x._move_assign_lhs << "\n";
out << "move_assign_rhs: " << x._move_assign_rhs << "\n";
out << " swap: " << x._swap << "\n";
out << " equality: " << x._equality << "\n";
return out;
}
};
struct annotate {
annotate_counters* _counters;
explicit annotate(annotate_counters& counters) : _counters(&counters) {}
~annotate() {
{
++_counters->_dtor;
_counters->_condition.notify_one();
}
}
annotate(const annotate& x) : _counters(x._counters) { ++_counters->_copy_ctor; }
annotate(annotate&& x) noexcept : _counters(x._counters) { ++_counters->_move_ctor; }
annotate& operator=(const annotate& x) {
++x._counters->_copy_assign_rhs;
++_counters->_copy_assign_lhs;
return *this;
}
annotate& operator=(annotate&& x) noexcept {
++x._counters->_move_assign_rhs;
++_counters->_move_assign_lhs;
return *this;
}
friend inline void swap(annotate& x, annotate& y) {
++x._counters->_swap;
++y._counters->_swap;
}
friend inline bool operator==(const annotate& x, const annotate& y) {
++x._counters->_equality;
++y._counters->_equality;
return true;
}
friend inline bool operator!=(const annotate& x, const annotate& y) { return !(x == y); }
};
/**************************************************************************************************/
struct regular {
int _x;
explicit regular(int x) : _x(x) { std::cout << _x << " ctor" << std::endl; }
~regular() { std::cout << _x << " dtor" << std::endl; }
regular(const regular& rhs) : _x(rhs._x) { std::cout << _x << " copy-ctor" << std::endl; }
regular(regular&& rhs) noexcept : _x(std::move(rhs._x)) {
std::cout << _x << " move-ctor" << std::endl;
rhs._x = 0;
}
regular& operator=(const regular& rhs) {
std::cout << _x << " assign" << std::endl;
_x = rhs._x;
return *this;
}
regular& operator=(regular&& rhs) noexcept {
std::cout << _x << " move-assign" << std::endl;
_x = std::move(rhs._x);
rhs._x = 0;
return *this;
}
friend inline void swap(regular& lhs, regular& rhs) {
std::cout << lhs._x << "/" << rhs._x << " swap " << std::endl;
std::swap(lhs._x, rhs._x);
}
friend inline bool operator==(const regular& lhs, const regular& rhs) {
return lhs._x == rhs._x;
}
friend inline bool operator!=(const regular& lhs, const regular& rhs) { return !(lhs == rhs); }
friend inline bool operator<(const regular& lhs, const regular& rhs) {
bool result(lhs._x < rhs._x);
std::cout << lhs._x << " < " << rhs._x << ": " << std::boolalpha << result << std::endl;
return result;
}
};
/**************************************************************************************************/
class move_only {
private:
int _member{0};
public:
move_only() {}
move_only(int member) : _member(member) {}
move_only(const move_only&) = delete;
move_only& operator=(const move_only&) = delete;
move_only(move_only&&) = default;
move_only& operator=(move_only&&) = default;
virtual ~move_only() = default;
int member() { return _member; }
int member() const { return _member; }
};
/**************************************************************************************************/
} // namespace v1
/**************************************************************************************************/
} // namespace stlab
/**************************************************************************************************/
#endif
/**************************************************************************************************/
| 2,205 |
740 |
#include "approx_path_distance.hpp"
namespace vg {
namespace algorithms {
using namespace std;
size_t min_approx_path_distance(const PathPositionHandleGraph* graph, const pos_t& pos1, const pos_t& pos2, uint64_t max_search) {
auto nearest1 = nearest_offsets_in_paths(graph, pos1, max_search);
auto nearest2 = nearest_offsets_in_paths(graph, pos2, max_search);
uint64_t min_distance = numeric_limits<uint64_t>::max();
for (auto& p : nearest1) {
auto q = nearest2.find(p.first);
if (q != nearest2.end()) {
// note, doesn't respect orientation
for (auto& o1 : p.second) {
for (auto& o2 : q->second) {
uint64_t x = (o1.first > o2.first ? o1.first - o2.first : o2.first - o1.first);
min_distance = std::min(min_distance, x);
}
}
}
}
return (size_t)min_distance;
}
}
}
| 440 |
575 |
<filename>chrome/android/java/src/org/chromium/chrome/browser/contextmenu/RevampedContextMenuItemWithIconButtonViewBinder.java<gh_stars>100-1000
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.contextmenu;
import static org.chromium.chrome.browser.contextmenu.RevampedContextMenuItemWithIconButtonProperties.BUTTON_CLICK_LISTENER;
import static org.chromium.chrome.browser.contextmenu.RevampedContextMenuItemWithIconButtonProperties.BUTTON_CONTENT_DESC;
import static org.chromium.chrome.browser.contextmenu.RevampedContextMenuItemWithIconButtonProperties.BUTTON_IMAGE;
import android.graphics.drawable.Drawable;
import android.view.View;
import android.widget.ImageView;
import org.chromium.chrome.R;
import org.chromium.ui.modelutil.PropertyKey;
import org.chromium.ui.modelutil.PropertyModel;
class RevampedContextMenuItemWithIconButtonViewBinder extends RevampedContextMenuItemViewBinder {
public static void bind(PropertyModel model, View view, PropertyKey propertyKey) {
RevampedContextMenuItemViewBinder.bind(
model, view.findViewById(R.id.menu_row_text), propertyKey);
if (propertyKey == BUTTON_IMAGE) {
Drawable drawable = model.get(BUTTON_IMAGE);
final ImageView imageView = view.findViewById(R.id.menu_row_share_icon);
imageView.setImageDrawable(drawable);
imageView.setVisibility(drawable != null ? View.VISIBLE : View.GONE);
final int padding = view.getResources().getDimensionPixelSize(
R.dimen.revamped_context_menu_list_lateral_padding);
// We don't need extra end padding for the text if the share icon is visible as the icon
// already has padding.
view.findViewById(R.id.menu_row_text)
.setPaddingRelative(padding, 0, drawable != null ? 0 : padding, 0);
} else if (propertyKey == BUTTON_CONTENT_DESC) {
((ImageView) view.findViewById(R.id.menu_row_share_icon))
.setContentDescription(view.getContext().getString(
R.string.accessibility_menu_share_via, model.get(BUTTON_CONTENT_DESC)));
} else if (propertyKey == BUTTON_CLICK_LISTENER) {
view.findViewById(R.id.menu_row_share_icon)
.setOnClickListener(model.get(BUTTON_CLICK_LISTENER));
}
}
}
| 964 |
14,668 |
<reponame>zealoussnow/chromium
{
"name": "ActivityLog test for SW with messaging API",
"key": "<KEY>",
"version": "0.1",
"manifest_version": 3,
"description": "Tests activity log for long lived connection between tab and SW extension",
"permissions": ["activityLogPrivate"],
"background": {"service_worker": "service_worker_background.js"}
}
| 111 |
8,633 |
<reponame>nicolasiltis/prefect
from prefect.tasks.mysql.mysql import MySQLFetch, MySQLExecute
from prefect import Flow, task
EXAMPLE_TABLE = "user"
HOST = "localhost"
PORT = 3306
DB_NAME = "ext"
USER = "admin"
PASSWORD = "<PASSWORD>"
mysql_fetch = MySQLFetch(
host=HOST, port=PORT, db_name=DB_NAME, user=USER, password=PASSWORD
)
mysql_exec = MySQLExecute(
host=HOST, port=PORT, db_name=DB_NAME, user=USER, password=PASSWORD
)
@task
def print_results(x):
print(x)
with Flow("MySQL Example") as flow:
# fetch 3 results
fetch_results = mysql_fetch(
query=f"SELECT * FROM {EXAMPLE_TABLE}", fetch="many", fetch_count=3
)
print_results(fetch_results)
# execute a query that returns 3 results
exec_results = mysql_exec(query=f"SELECT * FROM {EXAMPLE_TABLE} LIMIT 3")
print_results(exec_results)
flow.run()
| 347 |
309 |
<filename>src/Cxx/PolyData/ColorCellsWithRGB.cxx
#include <vtkSmartPointer.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkRenderWindow.h>
#include <vtkRenderer.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkMath.h>
#include <vtkUnsignedCharArray.h>
#include <vtkCellData.h>
#include <vtkPolyData.h>
#include <vtkPlaneSource.h>
#include <vtkNamedColors.h>
int main(int , char *[])
{
// Provide some geometry
int resolutionX = 5;
int resolutionY = 3;
vtkSmartPointer<vtkPlaneSource> aPlane =
vtkSmartPointer<vtkPlaneSource>::New();
aPlane->SetXResolution(resolutionX);
aPlane->SetYResolution(resolutionY);
aPlane->Update();
// Create cell data
vtkMath::RandomSeed(8775070); // for reproducibility
vtkSmartPointer<vtkUnsignedCharArray> cellData =
vtkSmartPointer<vtkUnsignedCharArray>::New();
cellData->SetNumberOfComponents(3);
cellData->SetNumberOfTuples(aPlane->GetOutput()->GetNumberOfCells());
for (int i = 0; i < aPlane->GetOutput()->GetNumberOfCells(); i++)
{
float rgb[3];
rgb[0] = vtkMath::Random(64, 255);
rgb[1] = vtkMath::Random(64, 255);
rgb[2] = vtkMath::Random(64, 255);
cellData->InsertTuple(i, rgb);
}
aPlane->GetOutput()->GetCellData()->SetScalars(cellData);
// Setup actor and mapper
vtkSmartPointer<vtkNamedColors> colors =
vtkSmartPointer<vtkNamedColors>::New();
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(aPlane->GetOutputPort());
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
// Setup render window, renderer, and interactor
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
renderer->AddActor(actor);
renderer->SetBackground(colors->GetColor3d("SlateGray").GetData());
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
| 864 |
3,442 |
/*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.java.sip.communicator.service.protocol.event;
/**
* A listener that dispatches events notifying that an invitation which was
* sent earlier has been rejected by the invitee.
*
* @author <NAME>
*/
public interface ChatRoomInvitationRejectionListener
{
/**
* Called when an invitee rejects an invitation previously sent by us.
*
* @param evt the instance of the <tt>ChatRoomInvitationRejectedEvent</tt>
* containing the rejected chat room invitation as well as the source
* provider where this happened.
*/
public void invitationRejected(ChatRoomInvitationRejectedEvent evt);
}
| 361 |
2,816 |
<filename>src/planner/binder/statement/bind_copy.cpp
#include "duckdb/catalog/catalog.hpp"
#include "duckdb/parser/statement/copy_statement.hpp"
#include "duckdb/planner/binder.hpp"
#include "duckdb/parser/statement/insert_statement.hpp"
#include "duckdb/planner/operator/logical_copy_to_file.hpp"
#include "duckdb/planner/operator/logical_get.hpp"
#include "duckdb/planner/operator/logical_insert.hpp"
#include "duckdb/catalog/catalog_entry/copy_function_catalog_entry.hpp"
#include "duckdb/main/client_context.hpp"
#include "duckdb/main/database.hpp"
#include "duckdb/parser/expression/columnref_expression.hpp"
#include "duckdb/parser/expression/star_expression.hpp"
#include "duckdb/parser/tableref/basetableref.hpp"
#include "duckdb/parser/query_node/select_node.hpp"
#include <algorithm>
namespace duckdb {
BoundStatement Binder::BindCopyTo(CopyStatement &stmt) {
// COPY TO a file
auto &config = DBConfig::GetConfig(context);
if (!config.enable_external_access) {
throw PermissionException("COPY TO is disabled by configuration");
}
BoundStatement result;
result.types = {LogicalType::BIGINT};
result.names = {"Count"};
// bind the select statement
auto select_node = Bind(*stmt.select_statement);
// lookup the format in the catalog
auto &catalog = Catalog::GetCatalog(context);
auto copy_function = catalog.GetEntry<CopyFunctionCatalogEntry>(context, DEFAULT_SCHEMA, stmt.info->format);
if (!copy_function->function.copy_to_bind) {
throw NotImplementedException("COPY TO is not supported for FORMAT \"%s\"", stmt.info->format);
}
auto function_data =
copy_function->function.copy_to_bind(context, *stmt.info, select_node.names, select_node.types);
// now create the copy information
auto copy = make_unique<LogicalCopyToFile>(copy_function->function, move(function_data));
copy->AddChild(move(select_node.plan));
result.plan = move(copy);
return result;
}
BoundStatement Binder::BindCopyFrom(CopyStatement &stmt) {
auto &config = DBConfig::GetConfig(context);
if (!config.enable_external_access) {
throw PermissionException("COPY FROM is disabled by configuration");
}
BoundStatement result;
result.types = {LogicalType::BIGINT};
result.names = {"Count"};
D_ASSERT(!stmt.info->table.empty());
// COPY FROM a file
// generate an insert statement for the the to-be-inserted table
InsertStatement insert;
insert.table = stmt.info->table;
insert.schema = stmt.info->schema;
insert.columns = stmt.info->select_list;
// bind the insert statement to the base table
auto insert_statement = Bind(insert);
D_ASSERT(insert_statement.plan->type == LogicalOperatorType::LOGICAL_INSERT);
auto &bound_insert = (LogicalInsert &)*insert_statement.plan;
// lookup the format in the catalog
auto &catalog = Catalog::GetCatalog(context);
auto copy_function = catalog.GetEntry<CopyFunctionCatalogEntry>(context, DEFAULT_SCHEMA, stmt.info->format);
if (!copy_function->function.copy_from_bind) {
throw NotImplementedException("COPY FROM is not supported for FORMAT \"%s\"", stmt.info->format);
}
// lookup the table to copy into
auto table = Catalog::GetCatalog(context).GetEntry<TableCatalogEntry>(context, stmt.info->schema, stmt.info->table);
vector<string> expected_names;
if (!bound_insert.column_index_map.empty()) {
expected_names.resize(bound_insert.expected_types.size());
for (idx_t i = 0; i < table->columns.size(); i++) {
if (bound_insert.column_index_map[i] != DConstants::INVALID_INDEX) {
expected_names[bound_insert.column_index_map[i]] = table->columns[i].name;
}
}
} else {
expected_names.reserve(bound_insert.expected_types.size());
for (idx_t i = 0; i < table->columns.size(); i++) {
expected_names.push_back(table->columns[i].name);
}
}
auto function_data =
copy_function->function.copy_from_bind(context, *stmt.info, expected_names, bound_insert.expected_types);
auto get = make_unique<LogicalGet>(0, copy_function->function.copy_from_function, move(function_data),
bound_insert.expected_types, expected_names);
for (idx_t i = 0; i < bound_insert.expected_types.size(); i++) {
get->column_ids.push_back(i);
}
insert_statement.plan->children.push_back(move(get));
result.plan = move(insert_statement.plan);
return result;
}
BoundStatement Binder::Bind(CopyStatement &stmt) {
if (!stmt.info->is_from && !stmt.select_statement) {
// copy table into file without a query
// generate SELECT * FROM table;
auto ref = make_unique<BaseTableRef>();
ref->schema_name = stmt.info->schema;
ref->table_name = stmt.info->table;
auto statement = make_unique<SelectNode>();
statement->from_table = move(ref);
if (!stmt.info->select_list.empty()) {
for (auto &name : stmt.info->select_list) {
statement->select_list.push_back(make_unique<ColumnRefExpression>(name));
}
} else {
statement->select_list.push_back(make_unique<StarExpression>());
}
stmt.select_statement = move(statement);
}
this->allow_stream_result = false;
if (stmt.info->is_from) {
return BindCopyFrom(stmt);
} else {
return BindCopyTo(stmt);
}
}
} // namespace duckdb
| 1,823 |
430 |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for pre-processing the data into individual, standardized formats."""
import collections
import datetime
import itertools
import os
import pathlib
import re
from typing import Callable, Dict, Set, Tuple
from absl import logging
from dm_c19_modelling.england_data import constants
import pandas as pd
import yaml
_PATH_FILENAME_REGEXES = "filename_regexes.yaml"
_COLUMNS = constants.Columns
_DATE_FORMAT = "%Y-%m-%d"
def _order_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Orders the columns of the dataframe as: date, region, observations."""
df.insert(0, _COLUMNS.DATE.value, df.pop(_COLUMNS.DATE.value))
reg_columns = []
obs_columns = []
for col in df.columns[1:]:
if col.startswith(constants.REGION_PREFIX):
reg_columns.append(col)
elif col.startswith(constants.OBSERVATION_PREFIX):
obs_columns.append(col)
else:
raise ValueError(f"Unknown column: '{col}'")
columns = [_COLUMNS.DATE.value] + reg_columns + obs_columns
return df[columns]
def _raw_data_formatter_daily_deaths(filepath: str) -> pd.DataFrame:
"""Loads and formats daily deaths data."""
sheet_name = "Tab4 Deaths by trust"
header = 15
df = pd.read_excel(filepath, sheet_name=sheet_name, header=header)
# Drop rows and columns which are all nans.
df.dropna(axis=0, how="all", inplace=True)
df.dropna(axis=1, how="all", inplace=True)
# Drop unneeded columns and rows.
drop_columns = ["Total", "Awaiting verification"]
up_to_mar_1_index = "Up to 01-Mar-20"
if sum(i for i in df[up_to_mar_1_index] if isinstance(i, int)) == 0.0:
drop_columns.append(up_to_mar_1_index)
df.drop(columns=drop_columns, inplace=True)
df = df[df["Code"] != "-"]
# Melt the death counts by date into "Date" and "Death Count" columns.
df = df.melt(
id_vars=["NHS England Region", "Code", "Name"],
var_name="Date",
value_name="Death Count")
# Rename the columns to their standard names.
df.rename(
columns={
"Date": _COLUMNS.DATE.value,
"Death Count": _COLUMNS.OBS_DEATHS.value,
"Code": _COLUMNS.REG_TRUST_CODE.value,
"Name": _COLUMNS.REG_TRUST_NAME.value,
"NHS England Region": _COLUMNS.REG_NHSER_NAME.value,
},
inplace=True)
_order_columns(df)
df[_COLUMNS.DATE.value] = df[_COLUMNS.DATE.value].map(
lambda x: x.strftime(_DATE_FORMAT))
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_TRUST_NAME.value,
_COLUMNS.REG_TRUST_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_deaths' contains nans")
return df
def _raw_data_formatter_daily_cases(filepath: str) -> pd.DataFrame:
"""Loads and formats daily cases data."""
df = pd.read_csv(filepath)
df.rename(columns={"Area type": "Area_type"}, inplace=True)
df.query("Area_type == 'ltla'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"Area_type", "Cumulative lab-confirmed cases",
"Cumulative lab-confirmed cases rate"
]
df.drop(columns=drop_columns, inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"Area name": _COLUMNS.REG_LTLA_NAME.value,
"Area code": _COLUMNS.REG_LTLA_CODE.value,
"Specimen date": _COLUMNS.DATE.value,
"Daily lab-confirmed cases": _COLUMNS.OBS_CASES.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_LTLA_NAME.value,
_COLUMNS.REG_LTLA_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_cases' contains nans")
return df
def _raw_data_formatter_google_mobility(filepath: str) -> pd.DataFrame:
"""Loads and formats Google mobility data."""
df = pd.read_csv(filepath)
# Filter to UK.
df.query("country_region_code == 'GB'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"country_region_code", "country_region", "metro_area", "census_fips_code"
]
df.drop(columns=drop_columns, inplace=True)
# Fill missing region info with "na".
df[["sub_region_1", "sub_region_2", "iso_3166_2_code"]].fillna(
"na", inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"sub_region_1":
_COLUMNS.REG_SUB_REGION_1.value,
"sub_region_2":
_COLUMNS.REG_SUB_REGION_2.value,
"iso_3166_2_code":
_COLUMNS.REG_ISO_3166_2_CODE.value,
"date":
_COLUMNS.DATE.value,
"retail_and_recreation_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RETAIL_AND_RECREATION.value,
"grocery_and_pharmacy_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_GROCERY_AND_PHARMACY.value,
"parks_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_PARKS.value,
"transit_stations_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_TRANSIT_STATIONS.value,
"workplaces_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_WORKPLACES.value,
"residential_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RESIDENTIAL.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_SUB_REGION_1.value,
_COLUMNS.REG_SUB_REGION_2.value,
_COLUMNS.REG_ISO_3166_2_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def _raw_data_formatter_online_111(filepath: str) -> pd.DataFrame:
"""Loads and formats online 111 data."""
df = pd.read_csv(filepath)
# Drop nans.
df.dropna(subset=["ccgcode"], inplace=True)
# Reformat dates.
remap_dict = {
"journeydate":
lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda
_DATE_FORMAT),
"ccgname":
lambda x: x.replace("&", "and"),
"sex": {
"Female": "f",
"Male": "m",
"Indeterminate": "u",
},
"ageband": {
"0-18 years": "0",
"19-69 years": "19",
"70+ years": "70"
}
}
for col, remap in remap_dict.items():
df[col] = df[col].map(remap)
journeydate_values = pd.date_range(
df.journeydate.min(), df.journeydate.max()).strftime(_DATE_FORMAT)
ccgcode_values = df.ccgcode.unique()
df.sex.fillna("u", inplace=True)
sex_values = ["f", "m", "u"]
assert set(sex_values) >= set(df.sex.unique()), "unsupported sex value"
df.ageband.fillna("u", inplace=True)
ageband_values = ["0", "19", "70", "u"]
assert set(ageband_values) >= set(
df.ageband.unique()), "unsupported ageband value"
ccg_code_name_map = df[["ccgcode", "ccgname"
]].set_index("ccgcode")["ccgname"].drop_duplicates()
# Some CCG codes have duplicate names, which differ by their commas. Keep the
# longer ones.
fn = lambda x: sorted(x["ccgname"].map(lambda y: (len(y), y)))[-1][1]
ccg_code_name_map = ccg_code_name_map.reset_index().groupby("ccgcode").apply(
fn)
df_full = pd.DataFrame(
list(
itertools.product(journeydate_values, ccgcode_values, sex_values,
ageband_values)),
columns=["journeydate", "ccgcode", "sex", "ageband"])
df = pd.merge(df_full, df, how="outer")
# 0 calls don't have rows, so are nans.
df["Total"].fillna(0, inplace=True)
df["ccgname"] = df["ccgcode"].map(ccg_code_name_map)
# Combine sex and ageband columns into a joint column.
df["sex_ageband"] = df["sex"] + "_" + df["ageband"]
df = df.pivot_table(
index=["journeydate", "ccgcode", "ccgname"],
columns="sex_ageband",
values="Total").reset_index()
df.columns.name = None
# Rename the columns to their standard names.
df.rename(
columns={
"ccgcode": _COLUMNS.REG_CCG_CODE.value,
"ccgname": _COLUMNS.REG_CCG_NAME.value,
"journeydate": _COLUMNS.DATE.value,
"f_0": _COLUMNS.OBS_ONLINE_111_F_0.value,
"f_19": _COLUMNS.OBS_ONLINE_111_F_19.value,
"f_70": _COLUMNS.OBS_ONLINE_111_F_70.value,
"f_u": _COLUMNS.OBS_ONLINE_111_F_U.value,
"m_0": _COLUMNS.OBS_ONLINE_111_M_0.value,
"m_19": _COLUMNS.OBS_ONLINE_111_M_19.value,
"m_70": _COLUMNS.OBS_ONLINE_111_M_70.value,
"m_u": _COLUMNS.OBS_ONLINE_111_M_U.value,
"u_0": _COLUMNS.OBS_ONLINE_111_U_0.value,
"u_19": _COLUMNS.OBS_ONLINE_111_U_19.value,
"u_70": _COLUMNS.OBS_ONLINE_111_U_70.value,
"u_u": _COLUMNS.OBS_ONLINE_111_U_U.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_NAME.value,
_COLUMNS.REG_CCG_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'online_111' contains nans")
return df
def _raw_data_formatter_calls_111_999(filepath: str) -> pd.DataFrame:
"""Loads and formats 111 & 999 calls data."""
df = pd.read_csv(filepath)
# Drop unneeded columns and rows.
drop_columns = []
df.drop(columns=drop_columns, inplace=True)
# Drop nans.
df.dropna(subset=["CCGCode", "CCGName"], inplace=True)
# Reformat values.
df["AgeBand"].fillna("u", inplace=True)
remap_dict = {
"Call Date":
lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda
"%Y-%m-%d"),
"CCGName":
lambda x: x.replace("&", "and"),
"SiteType":
lambda x: str(int(x)),
"Sex": {
"Female": "f",
"Male": "m",
"Unknown": "u",
},
"AgeBand": {
"0-18 years": "0",
"19-69 years": "19",
"70-120 years": "70",
"u": "u",
}
}
for col, remap in remap_dict.items():
df[col] = df[col].map(remap)
call_date_values = pd.date_range(df["Call Date"].min(),
df["Call Date"].max()).strftime(_DATE_FORMAT)
ccgcode_values = df["CCGCode"].unique()
sitetype_values = ["111", "999"]
assert set(sitetype_values) >= set(
df.SiteType.unique()), "unsupported sitetype value"
sex_values = ["f", "m", "u"]
assert set(sex_values) >= set(df.Sex.unique()), "unsupported sex value"
ageband_values = ["0", "19", "70", "u"]
assert set(ageband_values) >= set(
df.AgeBand.unique()), "unsupported ageband value"
ccg_code_name_map = df[["CCGCode", "CCGName"
]].set_index("CCGCode")["CCGName"].drop_duplicates()
df_full = pd.DataFrame(
list(itertools.product(call_date_values, ccgcode_values, sitetype_values,
sex_values, ageband_values)),
columns=["Call Date", "CCGCode", "SiteType", "Sex", "AgeBand"])
df = pd.merge(df_full, df, how="outer")
# 0 calls don't have rows, so are nans.
df["TriageCount"].fillna(0, inplace=True)
df["CCGName"] = df["CCGCode"].map(ccg_code_name_map)
# Combine SiteType, Sex, and AgeBand columns into a joint column.
df["SiteType_Sex_AgeBand"] = (
df["SiteType"] + "_" + df["Sex"] + "_" + df["AgeBand"])
df = df.pivot_table(
index=["Call Date", "CCGCode", "CCGName"],
columns="SiteType_Sex_AgeBand",
values="TriageCount").reset_index()
df.columns.name = None
# Rename the columns to their standard names.
df.rename(
columns={
"CCGCode": _COLUMNS.REG_CCG_CODE.value,
"CCGName": _COLUMNS.REG_CCG_NAME.value,
"Call Date": _COLUMNS.DATE.value,
"111_f_0": _COLUMNS.OBS_CALL_111_F_0.value,
"111_f_19": _COLUMNS.OBS_CALL_111_F_19.value,
"111_f_70": _COLUMNS.OBS_CALL_111_F_70.value,
"111_f_u": _COLUMNS.OBS_CALL_111_F_U.value,
"111_m_0": _COLUMNS.OBS_CALL_111_M_0.value,
"111_m_19": _COLUMNS.OBS_CALL_111_M_19.value,
"111_m_70": _COLUMNS.OBS_CALL_111_M_70.value,
"111_m_u": _COLUMNS.OBS_CALL_111_M_U.value,
"111_u_0": _COLUMNS.OBS_CALL_111_U_0.value,
"111_u_19": _COLUMNS.OBS_CALL_111_U_19.value,
"111_u_70": _COLUMNS.OBS_CALL_111_U_70.value,
"111_u_u": _COLUMNS.OBS_CALL_111_U_U.value,
"999_f_0": _COLUMNS.OBS_CALL_999_F_0.value,
"999_f_19": _COLUMNS.OBS_CALL_999_F_19.value,
"999_f_70": _COLUMNS.OBS_CALL_999_F_70.value,
"999_f_u": _COLUMNS.OBS_CALL_999_F_U.value,
"999_m_0": _COLUMNS.OBS_CALL_999_M_0.value,
"999_m_19": _COLUMNS.OBS_CALL_999_M_19.value,
"999_m_70": _COLUMNS.OBS_CALL_999_M_70.value,
"999_m_u": _COLUMNS.OBS_CALL_999_M_U.value,
"999_u_0": _COLUMNS.OBS_CALL_999_U_0.value,
"999_u_19": _COLUMNS.OBS_CALL_999_U_19.value,
"999_u_70": _COLUMNS.OBS_CALL_999_U_70.value,
"999_u_u": _COLUMNS.OBS_CALL_999_U_U.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_NAME.value,
_COLUMNS.REG_CCG_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'calls_111_999' contains nans")
return df
_FORMATTER_FUNCTIONS = {
"daily_deaths": _raw_data_formatter_daily_deaths,
"daily_cases": _raw_data_formatter_daily_cases,
"google_mobility": _raw_data_formatter_google_mobility,
"online_111": _raw_data_formatter_online_111,
"calls_111_999": _raw_data_formatter_calls_111_999,
}
def _get_raw_data_formatter_by_name(name: str) -> Callable[[str], pd.DataFrame]:
return _FORMATTER_FUNCTIONS[name]
def _merge_online_111_and_calls_111_999(
df_online_111: pd.DataFrame,
df_calls_111_999: pd.DataFrame) -> pd.DataFrame:
"""Merges the 111 online and 111/999 calls into a single dataframe."""
df = pd.merge(
df_online_111,
df_calls_111_999,
how="outer",
on=[
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_CODE.value,
_COLUMNS.REG_CCG_NAME.value,
])
return df
def format_raw_data_files(
paths_dict: Dict[str, str]) -> Dict[str, pd.DataFrame]:
"""Loads and formats the individual raw data files.
Args:
paths_dict: mapping from data names to filepaths.
Returns:
mapping from data names to formatted dataframes.
"""
formatted_dfs = {}
for name, path in paths_dict.items():
logging.info("Formatting raw data: %s", name)
formatter = _get_raw_data_formatter_by_name(name)
formatted_dfs[name] = formatter(path)
logging.info("Merging online 111 and 111/999 calls")
if "online_111" and "calls_111_999" in formatted_dfs:
formatted_dfs[
"online_111_and_calls_111_999"] = _merge_online_111_and_calls_111_999(
formatted_dfs.pop("online_111"), formatted_dfs.pop("calls_111_999"))
elif "online_111" in formatted_dfs:
formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop(
"online_111")
elif "calls_111_999" in formatted_dfs:
formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop(
"calls_111_999")
return formatted_dfs
def merge_formatted_data(
formatted_data: Dict[str, pd.DataFrame]) -> pd.DataFrame:
"""Concatenates all formatted data into a single dataframe.
Args:
formatted_data: mapping from the data name to its dataframe.
Returns:
a dataframe containing all of the input dataframes.
"""
logging.info("Merging all dataframes")
dfs = []
for name, df in formatted_data.items():
df = df.copy()
df.insert(1, _COLUMNS.OBSERVATION_TYPE.value, name)
dfs.append(df)
df_merged = pd.concat(dfs)
reg_columns = [
c for c in df_merged.columns if c.startswith(constants.REGION_PREFIX)
]
df_merged.sort_values(
[_COLUMNS.DATE.value, _COLUMNS.OBSERVATION_TYPE.value] + reg_columns,
inplace=True)
df_merged.reset_index(drop=True, inplace=True)
return df_merged
def _load_filename_regexes() -> Dict[str, str]:
"""Gets a mapping from the data name to the regex for that data's filepath."""
path = pathlib.Path(os.path.dirname(
os.path.realpath(__file__))) / _PATH_FILENAME_REGEXES
with open(path) as fid:
return yaml.load(fid, Loader=yaml.SafeLoader)
def get_paths_for_given_date(
raw_data_directory: str,
scrape_date: str) -> Tuple[Dict[str, str], str, Set[str]]:
"""Get the raw data paths for a scrape date and filename regex.
Args:
raw_data_directory: the directory where the raw data is saved.
scrape_date: the scrape date to use, in the form YYYYMMDD, or 'latest'.
Returns:
mapping of data names to filepaths
the scrape date used
names whose data was not found on disk
"""
filename_regexes = _load_filename_regexes()
if scrape_date == "latest":
rx = re.compile("^[0-9]{8}$")
directories = []
for filename in os.listdir(raw_data_directory):
if rx.match(filename) is None:
continue
path = pathlib.Path(raw_data_directory) / filename
if not os.path.isdir(path):
continue
directories.append(path)
if not directories:
raise ValueError("Could not find latest scrape date directory")
directory = max(directories)
scrape_date_dirname = directory.parts[-1]
else:
try:
datetime.datetime.strptime(scrape_date, "%Y%m%d")
except ValueError:
raise ValueError("Date must be formatted: YYYYMMDD")
scrape_date_dirname = scrape_date
directory = pathlib.Path(raw_data_directory) / scrape_date_dirname
paths_dict = collections.defaultdict(lambda: None)
for name, filename_regex in filename_regexes.items():
rx = re.compile(f"^{filename_regex}$")
for filename in os.listdir(directory):
path = directory / filename
if os.path.isdir(path):
continue
match = rx.match(filename)
if match is None:
continue
if paths_dict[name] is not None:
raise ValueError("There should only be 1 file per name")
paths_dict[name] = str(path)
missing_names = set(filename_regexes.keys()) - set(paths_dict.keys())
return dict(paths_dict), scrape_date_dirname, missing_names
def load_population_dataframe(raw_data_directory: str) -> pd.DataFrame:
"""Load population data from disk, and create a dataframe from it.
Args:
raw_data_directory: the directory where the raw data is saved.
Returns:
a dataframe containing population data.
"""
filename = _load_filename_regexes()["population"]
filepath = pathlib.Path(raw_data_directory) / filename
kwargs = dict(header=0, skiprows=(0, 1, 2, 3, 4, 5, 7))
try:
pop_m = pd.read_excel(filepath, sheet_name="Mid-2019 Males", **kwargs)
pop_f = pd.read_excel(filepath, sheet_name="Mid-2019 Females", **kwargs)
except FileNotFoundError:
return None
# Remove lower resolution columns.
columns_to_remove = ("STP20 Code", "STP20 Name", "NHSER20 Code",
"NHSER20 Name", "All Ages")
for col in columns_to_remove:
del pop_m[col]
del pop_f[col]
mapping = {"CCG Code": _COLUMNS.REG_CCG_CODE.value,
"CCG Name": _COLUMNS.REG_CCG_NAME.value,
"90+": 90}
pop_m.rename(columns=mapping, inplace=True)
pop_f.rename(columns=mapping, inplace=True)
# This labels the male and female data uniquely so they can be merged.
pop_m.rename(
columns=lambda x: f"m_{str(x).lower()}" if isinstance(x, int) else x,
inplace=True)
pop_f.rename(
columns=lambda x: f"f_{str(x).lower()}" if isinstance(x, int) else x,
inplace=True)
region_columns = [_COLUMNS.REG_CCG_NAME.value, _COLUMNS.REG_CCG_CODE.value]
df = pd.merge(pop_m, pop_f, how="outer", on=tuple(region_columns))
mapping = {
f"{gender}_{age}":
_COLUMNS.OBS_POPULATION_GENDER_AGE.value.format(gender=gender, age=age)
for gender, age in itertools.product(("m", "f"), range(91))
}
df.rename(columns=mapping, inplace=True)
return df
| 9,542 |
553 |
<filename>examples/features/src/main/java/com/github/kagkarlsson/examples/TrackingProgressRecurringTaskMain.java
package com.github.kagkarlsson.examples;
import com.github.kagkarlsson.examples.helpers.Example;
import com.github.kagkarlsson.scheduler.Scheduler;
import com.github.kagkarlsson.scheduler.task.helper.CustomTask;
import com.github.kagkarlsson.scheduler.task.helper.RecurringTask;
import com.github.kagkarlsson.scheduler.task.helper.Tasks;
import com.github.kagkarlsson.scheduler.task.schedule.FixedDelay;
import javax.sql.DataSource;
import java.io.Serializable;
import java.time.Duration;
import static java.util.function.Function.identity;
public class TrackingProgressRecurringTaskMain extends Example {
public static void main(String[] args) {
new TrackingProgressRecurringTaskMain().runWithDatasource();
}
@Override
public void run(DataSource dataSource) {
final FixedDelay schedule = FixedDelay.ofSeconds(2);
final RecurringTask<Counter> statefulTask = Tasks.recurring("counting-task", schedule, Counter.class)
.initialData(new Counter(0))
.executeStateful((taskInstance, executionContext) -> {
final Counter startingCounter = taskInstance.getData();
for (int i = 0; i < 10; i++) {
System.out.println("Counting " + (startingCounter.value + i));
}
return new Counter(startingCounter.value + 10); // new value to be persisted as task_data for the next run
});
final Scheduler scheduler = Scheduler
.create(dataSource)
.pollingInterval(Duration.ofSeconds(5))
.startTasks(statefulTask)
.registerShutdownHook()
.build();
scheduler.start();
}
private static final class Counter implements Serializable {
private final int value;
public Counter(int value) {
this.value = value;
}
}
}
| 792 |
945 |
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkLBFGSBOptimizerv4_h
#define itkLBFGSBOptimizerv4_h
#include "itkLBFGSOptimizerBasev4.h"
#include "vnl/algo/vnl_lbfgsb.h"
#include "ITKOptimizersv4Export.h"
namespace itk
{
/* Necessary forward declaration */
/**
*\class LBFGSBOptimizerHelperv4
* \brief Wrapper helper around vnl_lbfgsb.
*
* This class is used to translate iteration events, etc, from
* vnl_lbfgsb into iteration events in ITK.
*
* \ingroup ITKOptimizersv4
*/
// Forward reference because of private implementation
class ITK_FORWARD_EXPORT LBFGSBOptimizerHelperv4;
/**
*\class LBFGSBOptimizerv4
* \brief Limited memory Broyden Fletcher Goldfarb Shannon minimization with simple bounds.
*
* This class is a wrapper for converted Fortran code for performing limited
* memory Broyden Fletcher Goldfarb Shannon minimization with simple bounds.
* The algorithm mininizes a nonlinear function f(x) of n variables subject to
* simple bound constraints of l <= x <= u.
*
* See also the documentation in Numerics/lbfgsb.c
*
* References:
*
* [1] <NAME>, <NAME> and <NAME>al.
* A Limited Memory Algorithm for Bound Constrained Optimization, (1995),
* SIAM Journal on Scientific and Statistical Computing ,
* 16, 5, pp. 1190-1208.
*
* [2] <NAME>, <NAME> and <NAME>.
* L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale
* bound constrained optimization (1997),
* ACM Transactions on Mathematical Software,
* Vol 23, Num. 4, pp. 550 - 560.
*
* \ingroup Numerics Optimizersv4
* \ingroup ITKOptimizersv4
*/
class ITKOptimizersv4_EXPORT LBFGSBOptimizerv4 : public LBFGSOptimizerBasev4<vnl_lbfgsb>
{
public:
ITK_DISALLOW_COPY_AND_MOVE(LBFGSBOptimizerv4);
/** Standard "Self" type alias. */
using Self = LBFGSBOptimizerv4;
using Superclass = LBFGSOptimizerBasev4<vnl_lbfgsb>;
using Pointer = SmartPointer<Self>;
using ConstPointer = SmartPointer<const Self>;
using MetricType = Superclass::MetricType;
using ParametersType = Superclass::ParametersType;
using ScalesType = Superclass::ScalesType;
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** Run-time type information (and related methods). */
itkTypeMacro(LBFGSBOptimizerv4, Superclass);
enum BoundSelectionValues
{
UNBOUNDED = 0,
LOWERBOUNDED = 1,
BOTHBOUNDED = 2,
UPPERBOUNDED = 3
};
/** BoundValue type.
* Use for defining the lower and upper bounds on the variables.
*/
using BoundValueType = Array<double>;
/** BoundSelection type
* Use for defining the boundary condition for each variables.
*/
using BoundSelectionType = Array<long>;
/** Set the position to initialize the optimization. */
void
SetInitialPosition(const ParametersType & param);
/** Get the position to initialize the optimization. */
ParametersType &
GetInitialPosition()
{
return m_InitialPosition;
}
/** Start optimization with an initial value. */
void
StartOptimization(bool doOnlyInitialization = false) override;
/** Plug in a Cost Function into the optimizer */
void
SetMetric(MetricType * metric) override;
/** Set the lower bound value for each variable. */
void
SetLowerBound(const BoundValueType & value);
itkGetConstReferenceMacro(LowerBound, BoundValueType);
/** Set the upper bound value for each variable. */
void
SetUpperBound(const BoundValueType & value);
itkGetConstReferenceMacro(UpperBound, BoundValueType);
/** Set the boundary condition for each variable, where
* select[i] = 0 if x[i] is unbounded,
* = 1 if x[i] has only a lower bound,
* = 2 if x[i] has both lower and upper bounds, and
* = 3 if x[1] has only an upper bound
*/
void
SetBoundSelection(const BoundSelectionType & value);
itkGetConstReferenceMacro(BoundSelection, BoundSelectionType);
/** Set/Get the CostFunctionConvergenceFactor. Algorithm terminates
* when the reduction in cost function is less than factor * epsmcj
* where epsmch is the machine precision.
* Typical values for factor: 1e+12 for low accuracy;
* 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy.
*/
virtual void
SetCostFunctionConvergenceFactor(double);
itkGetConstMacro(CostFunctionConvergenceFactor, double);
/** Set/Get the MaximumNumberOfCorrections. Default is 5 */
virtual void
SetMaximumNumberOfCorrections(unsigned int);
itkGetConstMacro(MaximumNumberOfCorrections, unsigned int);
/** This optimizer does not support scaling of the derivatives. */
void
SetScales(const ScalesType &) override;
/** Get the current infinity norm of the project gradient of the cost
* function. */
itkGetConstReferenceMacro(InfinityNormOfProjectedGradient, double);
protected:
LBFGSBOptimizerv4();
~LBFGSBOptimizerv4() override;
void
PrintSelf(std::ostream & os, Indent indent) const override;
using CostFunctionAdaptorType = Superclass::CostFunctionAdaptorType;
/** Internal optimizer type. */
using InternalOptimizerType = LBFGSBOptimizerHelperv4;
friend class LBFGSBOptimizerHelperv4;
private:
unsigned int m_MaximumNumberOfCorrections{ 5 };
ParametersType m_InitialPosition;
BoundValueType m_LowerBound;
BoundValueType m_UpperBound;
BoundSelectionType m_BoundSelection;
};
} // end namespace itk
#endif
| 1,905 |
14,668 |
<reponame>zealoussnow/chromium
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromeos/network/network_activation_handler_impl.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "chromeos/dbus/shill/shill_service_client.h"
#include "chromeos/network/network_event_log.h"
#include "chromeos/network/network_handler.h"
#include "dbus/object_proxy.h"
namespace chromeos {
namespace {
const char kErrorShillError[] = "shill-error";
} // namespace
NetworkActivationHandlerImpl::NetworkActivationHandlerImpl() = default;
NetworkActivationHandlerImpl::~NetworkActivationHandlerImpl() = default;
void NetworkActivationHandlerImpl::CompleteActivation(
const std::string& service_path,
base::OnceClosure success_callback,
network_handler::ErrorCallback error_callback) {
NET_LOG(USER) << "CompleteActivation: " << NetworkPathId(service_path);
ShillServiceClient::Get()->CompleteCellularActivation(
dbus::ObjectPath(service_path),
base::BindOnce(&NetworkActivationHandlerImpl::HandleShillSuccess,
AsWeakPtr(), std::move(success_callback)),
base::BindOnce(&network_handler::ShillErrorCallbackFunction,
kErrorShillError, service_path,
std::move(error_callback)));
}
void NetworkActivationHandlerImpl::HandleShillSuccess(
base::OnceClosure success_callback) {
if (!success_callback.is_null())
std::move(success_callback).Run();
}
} // namespace chromeos
| 546 |
634 |
/*
* Copyright 2013-2017 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package consulo.web.gwt.client.ui;
import com.google.gwt.user.client.ui.HasHorizontalAlignment;
import com.google.gwt.user.client.ui.Label;
import com.vaadin.client.StyleConstants;
import com.vaadin.client.communication.StateChangeEvent;
import com.vaadin.client.ui.AbstractComponentConnector;
import com.vaadin.shared.ui.Connect;
import consulo.web.gwt.client.ui.image.ImageConverter;
import consulo.web.gwt.shared.ui.state.LabelState;
import consulo.web.gwt.shared.ui.state.image.MultiImageState;
/**
* @author VISTALL
* @since 11-Sep-17
*/
@Connect(canonicalName = "consulo.ui.web.internal.WebLabelImpl.Vaadin")
public class GwtLabelImplConnector extends AbstractComponentConnector {
@Override
protected void updateComponentSize() {
GwtComponentSizeUpdater.updateForComponent(this);
}
@Override
protected void updateWidgetStyleNames() {
super.updateWidgetStyleNames();
setWidgetStyleName(StyleConstants.UI_WIDGET, false);
}
@Override
public GwtLabelImpl getWidget() {
return (GwtLabelImpl)super.getWidget();
}
@Override
public LabelState getState() {
return (LabelState)super.getState();
}
@Override
public void onStateChanged(StateChangeEvent stateChangeEvent) {
super.onStateChanged(stateChangeEvent);
Label label = getWidget().getLabel();
label.setText(getState().caption);
switch (getState().myHorizontalAlignment) {
case LEFT:
label.setHorizontalAlignment(HasHorizontalAlignment.ALIGN_LEFT);
break;
case CENTER:
label.setHorizontalAlignment(HasHorizontalAlignment.ALIGN_CENTER);
break;
case RIGHT:
label.setHorizontalAlignment(HasHorizontalAlignment.ALIGN_RIGHT);
break;
}
MultiImageState state = getState().myImageState;
getWidget().setIcon(state == null ? null : ImageConverter.create(state));
}
}
| 801 |
589 |
package rocks.inspectit.server.jetty;
import javax.annotation.PostConstruct;
import javax.servlet.ServletContext;
import org.mortbay.jetty.servlet.Context;
import org.slf4j.Logger;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.web.context.support.GenericWebApplicationContext;
import rocks.inspectit.shared.all.spring.logger.Log;
/**
* This class binds an empty Spring {@link GenericWebApplicationContext} to the
* <code>ServletContext</code> of a given {@link Context}.
*
* The newly created web application context is required, so that servlets managed via Jetty can
* attach to it. While the context itself is usually empty, it is bound to the parent context
* provided through {@link ApplicationContextAware#setApplicationContext(ApplicationContext)}. Thus,
* all Jetty managed servlets may obtain the web application context through the configured
* {@link #setContextAttribute(String) context attribute}.
*
* The configured {@link #setJettyContext(Context) Jetty context} is automatically started upon
* {@link #initialize() initialization}.
*
* @author NovaProvisioning
*/
public class JettyWebApplicationContextInitializer implements ApplicationContextAware {
/** The logger of this class. */
@Log
Logger log;
/**
* The context attribute.
*/
private String contextAttribute;
/**
* The jetty context being inject.
*/
private Context jettyContext;
/**
* The real application context.
*/
private ApplicationContext ctx;
/**
* Initializes the context attribute used to bind the web application context to the
* <code>ServletContext</code>.
*
* @param contextAttribute
* identifier to be used for binding
*/
public void setContextAttribute(String contextAttribute) {
this.contextAttribute = contextAttribute;
}
/**
* Injects the Jetty context object to be initialized.
*
* @param jettyContext
* the context
*/
public void setJettyContext(Context jettyContext) {
this.jettyContext = jettyContext;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) {
this.ctx = applicationContext;
}
/**
* Performs initialization of the web context binding it to Jetty.
*
* @throws Exception
* in case of an error while starting the Jetty context
*/
@PostConstruct
public void postConstruct() throws Exception {
ServletContext servletContext = jettyContext.getServletContext();
GenericWebApplicationContext webCtx = new GenericWebApplicationContext();
webCtx.setServletContext(servletContext);
webCtx.setParent(ctx);
webCtx.refresh();
servletContext.setAttribute(contextAttribute, webCtx);
jettyContext.start();
if (log.isInfoEnabled()) {
log.info("| Jetty Web Application Context started!");
}
}
}
| 831 |
12,278 |
<filename>ReactNativeFrontend/ios/Pods/boost/boost/callable_traits/args.hpp
/*
@Copyright <NAME> 2015-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_CLBL_TRTS_ARGS_HPP
#define BOOST_CLBL_TRTS_ARGS_HPP
#include <boost/callable_traits/detail/core.hpp>
namespace boost { namespace callable_traits {
//[ args_hpp
/*`[section:ref_args args]
[heading Header]
``#include <boost/callable_traits/args.hpp>``
[heading Definition]
*/
template<typename T, template<class...> class Container = std::tuple>
using args_t = //see below
//<-
detail::try_but_fail_if_invalid<
typename detail::traits<
detail::shallow_decay<T>>::template expand_args<Container>,
cannot_expand_the_parameter_list_of_first_template_argument>;
namespace detail {
template<typename T, template<class...> class Container,
typename = std::false_type>
struct args_impl {};
template<typename T, template<class...> class Container>
struct args_impl <T, Container, typename std::is_same<
args_t<T, Container>, detail::dummy>::type>
{
using type = args_t<T, Container>;
};
}
//->
template<typename T,
template<class...> class Container = std::tuple>
struct args : detail::args_impl<T, Container> {};
//<-
}} // namespace boost::callable_traits
//->
/*`
[heading Constraints]
* `T` must be one of the following:
* function
* function pointer
* function reference
* member function pointer
* member data pointer
* user-defined type with a non-overloaded `operator()`
* type of a non-generic lambda
[heading Behavior]
* When the constraints are violated, a substitution failure occurs.
* When `T` is a function, function pointer, or function reference, the aliased type is `Container` instantiated with the function's parameter types.
* When `T` is a function object, the aliased type is `Container` instantiated with the `T::operator()` parameter types.
* When `T` is a member function pointer, the aliased type is a `Container` instantiation, where the first type argument is a reference to the parent class of `T`, qualified according to the member qualifiers on `T`, such that the first type is equivalent to `boost::callable_traits::qualified_class_of_t<T>`. The subsequent type arguments, if any, are the parameter types of the member function.
* When `T` is a member data pointer, the aliased type is `Container` with a single element, which is a `const` reference to the parent class of `T`.
[heading Input/Output Examples]
[table
[[`T`] [`args_t<T>`]]
[[`void(float, char, int)`] [`std::tuple<float, char, int>`]]
[[`void(*)(float, char, int)`] [`std::tuple<float, char, int`]]
[[`void(&)(float, char, int)`] [`std::tuple<float, char, int`]]
[[`void(float, char, int) const &&`][`std::tuple<float, char, int>`]]
[[`void(*)()`] [`std::tuple<>`]]
[[`void(foo::* const &)(float, char, int)`] [`std::tuple<foo&, float, char, int>`]]
[[`int(foo::*)(int) const`] [`std::tuple<const foo&, int>`]]
[[`void(foo::*)() volatile &&`] [`std::tuple<volatile foo &&>`]]
[[`int foo::*`] [`std::tuple<const foo&>`]]
[[`const int foo::*`] [`std::tuple<const foo&>`]]
[[`int`] [(substitution failure)]]
[[`int (*const)()`] [(substitution failure)]]
]
[heading Example Program]
[import ../example/args.cpp]
[args]
[endsect]
*/
//]
#endif // #ifndef BOOST_CLBL_TRTS_ARGS_HPP
| 1,425 |
384 |
/*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2021, by the GROMACS development team, led by
* <NAME>, <NAME>, <NAME>, and <NAME>,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#include "gmxpre.h"
#include "freeenergydispatch.h"
#include "gromacs/gmxlib/nrnb.h"
#include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
#include "gromacs/gmxlib/nonbonded/nonbonded.h"
#include "gromacs/math/vectypes.h"
#include "gromacs/mdlib/enerdata_utils.h"
#include "gromacs/mdlib/force.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
#include "gromacs/mdtypes/enerdata.h"
#include "gromacs/mdtypes/forceoutput.h"
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/interaction_const.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/mdtypes/nblist.h"
#include "gromacs/mdtypes/simulation_workload.h"
#include "gromacs/mdtypes/threaded_force_buffer.h"
#include "gromacs/nbnxm/nbnxm.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/utility/enumerationhelpers.h"
#include "gromacs/utility/gmxassert.h"
#include "gromacs/utility/real.h"
#include "pairlistset.h"
#include "pairlistsets.h"
FreeEnergyDispatch::FreeEnergyDispatch(const int numEnergyGroups) :
foreignGroupPairEnergies_(numEnergyGroups),
threadedForceBuffer_(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded), false, numEnergyGroups),
threadedForeignEnergyBuffer_(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded), false, numEnergyGroups)
{
}
namespace
{
//! Flags all atoms present in pairlist \p nlist in the mask in \p threadForceBuffer
void setReductionMaskFromFepPairlist(const t_nblist& gmx_restrict nlist,
gmx::ThreadForceBuffer<gmx::RVec>* threadForceBuffer)
{
// Extract pair list data
gmx::ArrayRef<const int> iinr = nlist.iinr;
gmx::ArrayRef<const int> jjnr = nlist.jjnr;
for (int i : iinr)
{
threadForceBuffer->addAtomToMask(i);
}
for (int j : jjnr)
{
threadForceBuffer->addAtomToMask(j);
}
}
} // namespace
void FreeEnergyDispatch::setupFepThreadedForceBuffer(const int numAtomsForce, const PairlistSets& pairlistSets)
{
const int numThreads = threadedForceBuffer_.numThreadBuffers();
GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == numThreads,
"The number of buffers should be same as number of NB threads");
#pragma omp parallel for num_threads(numThreads) schedule(static)
for (int th = 0; th < numThreads; th++)
{
auto& threadForceBuffer = threadedForceBuffer_.threadForceBuffer(th);
threadForceBuffer.resizeBufferAndClearMask(numAtomsForce);
setReductionMaskFromFepPairlist(
*pairlistSets.pairlistSet(gmx::InteractionLocality::Local).fepLists()[th],
&threadForceBuffer);
if (pairlistSets.params().haveMultipleDomains)
{
setReductionMaskFromFepPairlist(
*pairlistSets.pairlistSet(gmx::InteractionLocality::NonLocal).fepLists()[th],
&threadForceBuffer);
}
threadForceBuffer.processMask();
}
threadedForceBuffer_.setupReduction();
}
void nonbonded_verlet_t::setupFepThreadedForceBuffer(const int numAtomsForce)
{
if (!pairlistSets_->params().haveFep)
{
return;
}
GMX_RELEASE_ASSERT(freeEnergyDispatch_, "Need a valid dispatch object");
freeEnergyDispatch_->setupFepThreadedForceBuffer(numAtomsForce, *pairlistSets_);
}
namespace
{
void dispatchFreeEnergyKernel(gmx::ArrayRef<const std::unique_ptr<t_nblist>> nbl_fep,
const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
bool useSimd,
int ntype,
real rlist,
const interaction_const_t& ic,
gmx::ArrayRef<const gmx::RVec> shiftvec,
gmx::ArrayRef<const real> nbfp,
gmx::ArrayRef<const real> nbfp_grid,
gmx::ArrayRef<const real> chargeA,
gmx::ArrayRef<const real> chargeB,
gmx::ArrayRef<const int> typeA,
gmx::ArrayRef<const int> typeB,
t_lambda* fepvals,
gmx::ArrayRef<const real> lambda,
const bool clearForcesAndEnergies,
gmx::ThreadedForceBuffer<gmx::RVec>* threadedForceBuffer,
gmx::ThreadedForceBuffer<gmx::RVec>* threadedForeignEnergyBuffer,
gmx_grppairener_t* foreignGroupPairEnergies,
gmx_enerdata_t* enerd,
const gmx::StepWorkload& stepWork,
t_nrnb* nrnb)
{
int donb_flags = 0;
/* Add short-range interactions */
donb_flags |= GMX_NONBONDED_DO_SR;
if (stepWork.computeForces)
{
donb_flags |= GMX_NONBONDED_DO_FORCE;
}
if (stepWork.computeVirial)
{
donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
}
if (stepWork.computeEnergy)
{
donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
}
GMX_ASSERT(gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded) == nbl_fep.ssize(),
"Number of lists should be same as number of NB threads");
#pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
{
try
{
auto& threadForceBuffer = threadedForceBuffer->threadForceBuffer(th);
if (clearForcesAndEnergies)
{
threadForceBuffer.clearForcesAndEnergies();
}
auto threadForces = threadForceBuffer.forceBufferWithPadding();
rvec* threadForceShiftBuffer = as_rvec_array(threadForceBuffer.shiftForces().data());
gmx::ArrayRef<real> threadVc =
threadForceBuffer.groupPairEnergies().energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
gmx::ArrayRef<real> threadVv =
threadForceBuffer.groupPairEnergies().energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
gmx::ArrayRef<real> threadDvdl = threadForceBuffer.dvdl();
gmx_nb_free_energy_kernel(*nbl_fep[th],
coords,
useSimd,
ntype,
rlist,
ic,
shiftvec,
nbfp,
nbfp_grid,
chargeA,
chargeB,
typeA,
typeB,
donb_flags,
lambda,
nrnb,
threadForces,
threadForceShiftBuffer,
threadVc,
threadVv,
threadDvdl);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
/* If we do foreign lambda and we have soft-core interactions
* we have to recalculate the (non-linear) energies contributions.
*/
if (fepvals->n_lambda > 0 && stepWork.computeDhdl && fepvals->sc_alpha != 0)
{
gmx::StepWorkload stepWorkForeignEnergies = stepWork;
stepWorkForeignEnergies.computeForces = false;
stepWorkForeignEnergies.computeVirial = false;
gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> lam_i;
gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl_nb = { 0 };
const int kernelFlags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE))
| GMX_NONBONDED_DO_FOREIGNLAMBDA;
for (gmx::index i = 0; i < 1 + enerd->foreignLambdaTerms.numLambdas(); i++)
{
std::fill(std::begin(dvdl_nb), std::end(dvdl_nb), 0);
for (int j = 0; j < static_cast<int>(FreeEnergyPerturbationCouplingType::Count); j++)
{
lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i - 1]);
}
#pragma omp parallel for schedule(static) num_threads(nbl_fep.ssize())
for (gmx::index th = 0; th < nbl_fep.ssize(); th++)
{
try
{
// Note that here we only compute energies and dV/dlambda, but we need
// to pass a force buffer. No forces are compute and stored.
auto& threadForeignEnergyBuffer = threadedForeignEnergyBuffer->threadForceBuffer(th);
threadForeignEnergyBuffer.clearForcesAndEnergies();
gmx::ArrayRef<real> threadVc =
threadForeignEnergyBuffer.groupPairEnergies()
.energyGroupPairTerms[NonBondedEnergyTerms::CoulombSR];
gmx::ArrayRef<real> threadVv =
threadForeignEnergyBuffer.groupPairEnergies()
.energyGroupPairTerms[NonBondedEnergyTerms::LJSR];
gmx::ArrayRef<real> threadDvdl = threadForeignEnergyBuffer.dvdl();
gmx_nb_free_energy_kernel(*nbl_fep[th],
coords,
useSimd,
ntype,
rlist,
ic,
shiftvec,
nbfp,
nbfp_grid,
chargeA,
chargeB,
typeA,
typeB,
kernelFlags,
lam_i,
nrnb,
gmx::ArrayRefWithPadding<gmx::RVec>(),
nullptr,
threadVc,
threadVv,
threadDvdl);
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
foreignGroupPairEnergies->clear();
threadedForeignEnergyBuffer->reduce(
nullptr, nullptr, foreignGroupPairEnergies, dvdl_nb, stepWorkForeignEnergies, 0);
std::array<real, F_NRE> foreign_term = { 0 };
sum_epot(*foreignGroupPairEnergies, foreign_term.data());
// Accumulate the foreign energy difference and dV/dlambda into the passed enerd
enerd->foreignLambdaTerms.accumulate(
i,
foreign_term[F_EPOT],
dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw]
+ dvdl_nb[FreeEnergyPerturbationCouplingType::Coul]);
}
}
}
} // namespace
void FreeEnergyDispatch::dispatchFreeEnergyKernels(const PairlistSets& pairlistSets,
const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
gmx::ForceWithShiftForces* forceWithShiftForces,
const bool useSimd,
const int ntype,
const real rlist,
const interaction_const_t& ic,
gmx::ArrayRef<const gmx::RVec> shiftvec,
gmx::ArrayRef<const real> nbfp,
gmx::ArrayRef<const real> nbfp_grid,
gmx::ArrayRef<const real> chargeA,
gmx::ArrayRef<const real> chargeB,
gmx::ArrayRef<const int> typeA,
gmx::ArrayRef<const int> typeB,
t_lambda* fepvals,
gmx::ArrayRef<const real> lambda,
gmx_enerdata_t* enerd,
const gmx::StepWorkload& stepWork,
t_nrnb* nrnb,
gmx_wallcycle* wcycle)
{
GMX_ASSERT(pairlistSets.params().haveFep, "We should have a free-energy pairlist");
wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedFep);
const int numLocalities = (pairlistSets.params().haveMultipleDomains ? 2 : 1);
// The first call to dispatchFreeEnergyKernel() should clear the buffers. Clearing happens
// inside that function to avoid an extra OpenMP parallel region here. We need a boolean
// to track the need for clearing.
// A better solution would be to move the OpenMP parallel region here, but that first
// requires modifying ThreadedForceBuffer.reduce() to be called thread parallel.
bool clearForcesAndEnergies = true;
for (int i = 0; i < numLocalities; i++)
{
const gmx::InteractionLocality iLocality = static_cast<gmx::InteractionLocality>(i);
const auto fepPairlists = pairlistSets.pairlistSet(iLocality).fepLists();
/* When the first list is empty, all are empty and there is nothing to do */
if (fepPairlists[0]->nrj > 0)
{
dispatchFreeEnergyKernel(fepPairlists,
coords,
useSimd,
ntype,
rlist,
ic,
shiftvec,
nbfp,
nbfp_grid,
chargeA,
chargeB,
typeA,
typeB,
fepvals,
lambda,
clearForcesAndEnergies,
&threadedForceBuffer_,
&threadedForeignEnergyBuffer_,
&foreignGroupPairEnergies_,
enerd,
stepWork,
nrnb);
}
else if (clearForcesAndEnergies)
{
// We need to clear the thread force buffer.
// With a non-empty pairlist we do this in dispatchFreeEnergyKernel()
// to avoid the overhead of an extra openMP parallel loop
#pragma omp parallel for schedule(static) num_threads(fepPairlists.ssize())
for (gmx::index th = 0; th < fepPairlists.ssize(); th++)
{
try
{
threadedForceBuffer_.threadForceBuffer(th).clearForcesAndEnergies();
}
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
}
}
clearForcesAndEnergies = false;
}
wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedFep);
wallcycle_sub_start(wcycle, WallCycleSubCounter::NonbondedFepReduction);
gmx::EnumerationArray<FreeEnergyPerturbationCouplingType, real> dvdl_nb = { 0 };
threadedForceBuffer_.reduce(forceWithShiftForces, nullptr, &enerd->grpp, dvdl_nb, stepWork, 0);
if (fepvals->sc_alpha != 0)
{
enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Vdw] +=
dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
enerd->dvdl_nonlin[FreeEnergyPerturbationCouplingType::Coul] +=
dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
}
else
{
enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Vdw] +=
dvdl_nb[FreeEnergyPerturbationCouplingType::Vdw];
enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] +=
dvdl_nb[FreeEnergyPerturbationCouplingType::Coul];
}
wallcycle_sub_stop(wcycle, WallCycleSubCounter::NonbondedFepReduction);
}
void nonbonded_verlet_t::dispatchFreeEnergyKernels(const gmx::ArrayRefWithPadding<const gmx::RVec>& coords,
gmx::ForceWithShiftForces* forceWithShiftForces,
const bool useSimd,
const int ntype,
const real rlist,
const interaction_const_t& ic,
gmx::ArrayRef<const gmx::RVec> shiftvec,
gmx::ArrayRef<const real> nbfp,
gmx::ArrayRef<const real> nbfp_grid,
gmx::ArrayRef<const real> chargeA,
gmx::ArrayRef<const real> chargeB,
gmx::ArrayRef<const int> typeA,
gmx::ArrayRef<const int> typeB,
t_lambda* fepvals,
gmx::ArrayRef<const real> lambda,
gmx_enerdata_t* enerd,
const gmx::StepWorkload& stepWork,
t_nrnb* nrnb)
{
if (!pairlistSets_->params().haveFep)
{
return;
}
GMX_RELEASE_ASSERT(freeEnergyDispatch_, "Need a valid dispatch object");
freeEnergyDispatch_->dispatchFreeEnergyKernels(*pairlistSets_,
coords,
forceWithShiftForces,
useSimd,
ntype,
rlist,
ic,
shiftvec,
nbfp,
nbfp_grid,
chargeA,
chargeB,
typeA,
typeB,
fepvals,
lambda,
enerd,
stepWork,
nrnb,
wcycle_);
}
| 13,277 |
774 |
<gh_stars>100-1000
"""
Tests for testuser command.
"""
from django.conf import settings
from django.core import management
from django.contrib.auth import get_user_model
def test_testuser():
user_model = get_user_model()
# the test user is already there, created by conftest.py
# change its email address, so we can notice whether the command worked
u = user_model.objects.get(username='test')
u.email = "<EMAIL>"
u.save()
# now re-initialize the test user via managment command:
management.call_command('testuser')
# check if the test user is there, with default email
u = user_model.objects.get(username='test')
assert u.email == settings.DEFAULT_FROM_EMAIL
| 234 |
535 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <os/mynewt.h>
#include <bsp/bsp.h>
#include <i2s/i2s.h>
#include <i2s/i2s_driver.h>
#include <i2s_nrfx/i2s_nrfx.h>
#include <nrfx/drivers/include/nrfx_i2s.h>
struct i2s_nrfx {
nrfx_i2s_config_t nrfx_i2s_cfg;
bool running;
int8_t nrfx_queued_count;
struct i2s *i2s;
struct i2s_sample_buffer *nrfx_buffers[2];
};
static struct i2s_nrfx i2s_nrfx;
static void
nrfx_add_buffer(struct i2s *i2s, struct i2s_sample_buffer *buffer)
{
nrfx_i2s_buffers_t nrfx_buffers = {0};
nrfx_err_t err;
assert(i2s != NULL);
if (buffer == NULL) {
return;
}
if (i2s->direction == I2S_OUT || i2s->direction == I2S_OUT_IN) {
nrfx_buffers.p_tx_buffer = buffer->sample_data;
}
if (i2s->direction == I2S_IN || i2s->direction == I2S_OUT_IN) {
nrfx_buffers.p_rx_buffer = buffer->sample_data;
}
assert(i2s_nrfx.nrfx_queued_count < 2);
assert(i2s_nrfx.nrfx_buffers[i2s_nrfx.nrfx_queued_count] == NULL);
i2s_nrfx.nrfx_buffers[i2s_nrfx.nrfx_queued_count] = buffer;
i2s_nrfx.nrfx_queued_count++;
if (i2s_nrfx.nrfx_queued_count == 1) {
i2s_driver_state_changed (i2s, I2S_STATE_RUNNING);
err = nrfx_i2s_start(&nrfx_buffers, buffer->sample_count * i2s->sample_size_in_bytes / 4, 0);
} else {
err = nrfx_i2s_next_buffers_set(&nrfx_buffers);
}
assert(err == NRFX_SUCCESS);
}
static void
feed_nrfx(void)
{
struct i2s_sample_buffer *buffer;
buffer = i2s_driver_buffer_get(i2s_nrfx.i2s);
nrfx_add_buffer(i2s_nrfx.i2s, buffer);
}
static void
i2s_nrfx_data_handler(const nrfx_i2s_buffers_t *p_released, uint32_t status)
{
struct i2s_sample_buffer *buffer;
if (p_released != NULL &&
(p_released->p_rx_buffer != NULL || p_released->p_tx_buffer != NULL)) {
i2s_nrfx.nrfx_queued_count--;
assert(i2s_nrfx.nrfx_queued_count >= 0);
buffer = i2s_nrfx.nrfx_buffers[0];
assert(buffer->sample_data == p_released->p_tx_buffer || buffer->sample_data == p_released->p_rx_buffer);
i2s_nrfx.nrfx_buffers[0] = i2s_nrfx.nrfx_buffers[1];
i2s_nrfx.nrfx_buffers[1] = NULL;
i2s_driver_buffer_put(i2s_nrfx.i2s, buffer);
}
if (i2s_nrfx.running && i2s_nrfx.nrfx_queued_count < 2) {
assert(i2s_nrfx.nrfx_buffers[1] == NULL);
feed_nrfx();
}
if (status == NRFX_I2S_STATUS_TRANSFER_STOPPED) {
i2s_driver_state_changed(i2s_nrfx.i2s, I2S_STATE_STOPPED);
}
}
static int
i2s_nrfx_init(struct i2s *i2s, const struct i2s_cfg *cfg)
{
int rc;
i2s_nrfx.i2s = i2s;
NVIC_SetVector(nrfx_get_irq_number(NRF_I2S), (uint32_t)nrfx_i2s_irq_handler);
i2s_nrfx.nrfx_i2s_cfg = cfg->nrfx_i2s_cfg;
switch (cfg->nrfx_i2s_cfg.sample_width) {
case NRF_I2S_SWIDTH_8BIT:
#if defined(I2S_CONFIG_SWIDTH_SWIDTH_8BitIn16)
case NRF_I2S_SWIDTH_8BIT_IN16BIT:
#endif
#if defined(I2S_CONFIG_SWIDTH_SWIDTH_8BitIn32)
case NRF_I2S_SWIDTH_8BIT_IN32BIT:
#endif
i2s->sample_size_in_bytes = 1;
break;
case NRF_I2S_SWIDTH_16BIT:
#if defined(I2S_CONFIG_SWIDTH_SWIDTH_16BitIn32)
case NRF_I2S_SWIDTH_16BIT_IN32BIT:
#endif
i2s->sample_size_in_bytes = 2;
break;
case NRF_I2S_SWIDTH_24BIT:
#if defined(I2S_CONFIG_SWIDTH_SWIDTH_24BitIn32)
case NRF_I2S_SWIDTH_24BIT_IN32BIT:
#endif
#if defined(I2S_CONFIG_SWIDTH_SWIDTH_32Bit)
case NRF_I2S_SWIDTH_32BIT:
#endif
i2s->sample_size_in_bytes = 4;
break;
}
i2s->direction = I2S_INVALID;
if (cfg->nrfx_i2s_cfg.sdin_pin != NRFX_I2S_PIN_NOT_USED) {
i2s->direction = I2S_IN;
}
if (cfg->nrfx_i2s_cfg.sdout_pin != NRFX_I2S_PIN_NOT_USED) {
i2s->direction |= I2S_OUT;
}
rc = i2s_init(i2s, cfg->pool);
if (rc != OS_OK) {
nrfx_i2s_uninit();
goto end;
}
i2s->sample_rate = cfg->sample_rate;
i2s->driver_data = &i2s_nrfx;
end:
return rc;
}
int
i2s_create(struct i2s *i2s, const char *name, const struct i2s_cfg *cfg)
{
return os_dev_create(&i2s->dev, name, OS_DEV_INIT_PRIMARY,
100, (os_dev_init_func_t)i2s_nrfx_init, (void *)cfg);
}
int
i2s_driver_stop(struct i2s *i2s)
{
struct i2s_sample_buffer *buffer;
if (i2s_nrfx.running) {
i2s_nrfx.running = false;
nrfx_i2s_stop();
}
while (NULL != (buffer = i2s_driver_buffer_get(i2s))) {
i2s_driver_buffer_put(i2s, buffer);
}
return 0;
}
/* Settings are stored for following sampling frequencies:
* 8000, 16000, 22050, 32000, 441000, 48000 */
struct i2s_clock_cfg {
nrf_i2s_mck_t mck_setup;
nrf_i2s_ratio_t ratio;
};
static const uint16_t sample_rates[] = { 8000, 16000, 22050, 32000, 44100, 48000 };
static const struct i2s_clock_cfg mck_for_8_16_bit_samples[] = {
{ NRF_I2S_MCK_32MDIV125, NRF_I2S_RATIO_32X}, /* 8000: 8000 LRCK error 0.0% */
{ NRF_I2S_MCK_32MDIV63, NRF_I2S_RATIO_32X}, /* 16000: 15873.016 LRCK error -0.8% */
{ NRF_I2S_MCK_32MDIV15, NRF_I2S_RATIO_96X}, /* 22050: 22222.222 LRCK error 0.8% */
{ NRF_I2S_MCK_32MDIV31, NRF_I2S_RATIO_32X}, /* 32000: 32258.065 LRCK error 0.8% */
{ NRF_I2S_MCK_32MDIV23, NRF_I2S_RATIO_32X}, /* 44100: 43478.261 LRCK error -1.4% */
{ NRF_I2S_MCK_32MDIV21, NRF_I2S_RATIO_32X} /* 48000: 47619.048 LRCK error -0.8% */
};
static const struct i2s_clock_cfg mck_for_24_bit_samples[] = {
{ NRF_I2S_MCK_32MDIV21, NRF_I2S_RATIO_192X}, /* 8000: 7936.508 LRCK error -0.8% */
{ NRF_I2S_MCK_32MDIV42, NRF_I2S_RATIO_48X}, /* 16000: 15873.016 LRCK error -0.8% */
{ NRF_I2S_MCK_32MDIV30, NRF_I2S_RATIO_48X}, /* 22050: 22222.222 LRCK error 0.8% */
{ NRF_I2S_MCK_32MDIV21, NRF_I2S_RATIO_48X}, /* 32000: 31746.032 LRCK error -0.8% */
{ NRF_I2S_MCK_32MDIV15, NRF_I2S_RATIO_48X}, /* 44100: 44444.444 LRCK error 0.8% */
{ NRF_I2S_MCK_32MDIV15, NRF_I2S_RATIO_48X} /* 48000: 44444.444 LRCK error -7.4% */
};
static void
i2s_nrfx_select_clock_cfg(nrfx_i2s_config_t *cfg, uint32_t sample_rate)
{
int i;
if (cfg->ratio != 0 || cfg->mck_setup != 0) {
/* User provided custom clock setup, no need to use stock values */
return;
}
for (i = 0; i < ARRAY_SIZE(sample_rates); ++i) {
if (sample_rates[i] == sample_rate) {
if (cfg->sample_width == NRF_I2S_SWIDTH_24BIT) {
cfg->ratio = mck_for_24_bit_samples[i].ratio;
cfg->mck_setup = mck_for_24_bit_samples[i].mck_setup;
} else {
cfg->ratio = mck_for_8_16_bit_samples[i].ratio;
cfg->mck_setup = mck_for_8_16_bit_samples[i].mck_setup;
}
break;
}
}
assert(cfg->mck_setup);
}
int
i2s_driver_start(struct i2s *i2s)
{
int rc = 0;
if (!i2s_nrfx.running) {
i2s_nrfx.running = true;
i2s_nrfx_select_clock_cfg(&i2s_nrfx.nrfx_i2s_cfg, i2s->sample_rate);
nrfx_i2s_init(&i2s_nrfx.nrfx_i2s_cfg, i2s_nrfx_data_handler);
assert(i2s_nrfx.nrfx_buffers[0] == NULL);
assert(i2s_nrfx.nrfx_buffers[1] == NULL);
assert(!STAILQ_EMPTY(&i2s->driver_queue));
i2s_nrfx.nrfx_queued_count = 0;
feed_nrfx();
}
return rc;
}
void
i2s_driver_buffer_queued(struct i2s *i2s)
{
if (i2s_nrfx.nrfx_queued_count < 2 && i2s_nrfx.running) {
feed_nrfx();
}
}
int
i2s_driver_suspend(struct i2s *i2s, os_time_t timeout, int arg)
{
return OS_OK;
}
int
i2s_driver_resume(struct i2s *i2s)
{
return OS_OK;
}
| 4,280 |
868 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.rest.queue;
import javax.ws.rs.Consumes;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.QueueConfiguration;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.api.core.client.ClientSession;
import org.apache.activemq.artemis.jms.client.ActiveMQDestination;
import org.apache.activemq.artemis.jms.client.ActiveMQQueue;
import org.apache.activemq.artemis.jms.server.config.JMSQueueConfiguration;
import org.apache.activemq.artemis.jms.server.config.impl.FileJMSConfiguration;
import org.apache.activemq.artemis.rest.ActiveMQRestLogger;
import org.apache.activemq.artemis.rest.queue.push.PushConsumerResource;
import org.apache.activemq.artemis.rest.queue.push.xml.PushRegistration;
import org.apache.activemq.artemis.rest.util.Constants;
import org.w3c.dom.Document;
@Path(Constants.PATH_FOR_QUEUES)
public class QueueDestinationsResource {
private final Map<String, QueueResource> queues = new ConcurrentHashMap<>();
private final QueueServiceManager manager;
public QueueDestinationsResource(QueueServiceManager manager) {
this.manager = manager;
}
@POST
@Consumes("application/activemq.jms.queue+xml")
public Response createJmsQueue(@Context UriInfo uriInfo, Document document) {
ActiveMQRestLogger.LOGGER.debug("Handling POST request for \"" + uriInfo.getPath() + "\"");
try {
JMSQueueConfiguration queue = FileJMSConfiguration.parseQueueConfiguration(document.getDocumentElement());
ActiveMQQueue activeMQQueue = ActiveMQDestination.createQueue(queue.getName());
String queueName = activeMQQueue.getAddress();
ClientSession session = manager.getSessionFactory().createSession(false, false, false);
try {
ClientSession.QueueQuery query = session.queueQuery(new SimpleString(queueName));
if (!query.isExists()) {
if (queue.getSelector() != null) {
session.createQueue(new QueueConfiguration(queueName).setFilterString(queue.getSelector()).setDurable(queue.isDurable()));
} else {
session.createQueue(new QueueConfiguration(queueName).setDurable(queue.isDurable()));
}
} else {
throw new WebApplicationException(Response.status(412).type("text/plain").entity("Queue already exists.").build());
}
} finally {
try {
session.close();
} catch (Exception ignored) {
}
}
URI uri = uriInfo.getRequestUriBuilder().path(queueName).build();
return Response.created(uri).build();
} catch (Exception e) {
if (e instanceof WebApplicationException)
throw (WebApplicationException) e;
throw new WebApplicationException(e, Response.serverError().type("text/plain").entity("Failed to create queue.").build());
}
}
public Map<String, QueueResource> getQueues() {
return queues;
}
@Path("/{queue-name}")
public synchronized QueueResource findQueue(@PathParam("queue-name") String name) throws Exception {
QueueResource queue = queues.get(name);
if (queue == null) {
String queueName = name;
ClientSession session = manager.getSessionFactory().createSession(false, false, false);
try {
ClientSession.QueueQuery query = session.queueQuery(new SimpleString(queueName));
if (!query.isExists()) {
throw new WebApplicationException(Response.status(404).type("text/plain").entity("Queue '" + name + "' does not exist").build());
}
DestinationSettings queueSettings = manager.getDefaultSettings();
boolean defaultDurable = queueSettings.isDurableSend() || query.isDurable();
queue = createQueueResource(queueName, defaultDurable, queueSettings.getConsumerSessionTimeoutSeconds(), queueSettings.isDuplicatesAllowed());
} finally {
try {
session.close();
} catch (ActiveMQException e) {
}
}
}
return queue;
}
public QueueResource createQueueResource(String queueName,
boolean defaultDurable,
int timeoutSeconds,
boolean duplicates) throws Exception {
QueueResource queueResource = new QueueResource();
queueResource.setQueueDestinationsResource(this);
queueResource.setDestination(queueName);
queueResource.setServiceManager(manager);
ConsumersResource consumers = new ConsumersResource();
consumers.setConsumerTimeoutSeconds(timeoutSeconds);
consumers.setDestination(queueName);
consumers.setSessionFactory(manager.getConsumerSessionFactory());
consumers.setServiceManager(manager);
queueResource.setConsumers(consumers);
PushConsumerResource push = new PushConsumerResource();
push.setDestination(queueName);
push.setSessionFactory(manager.getConsumerSessionFactory());
push.setJmsOptions(manager.getJmsOptions());
queueResource.setPushConsumers(push);
PostMessage sender = null;
if (duplicates) {
sender = new PostMessageDupsOk();
} else {
sender = new PostMessageNoDups();
}
sender.setServiceManager(manager);
sender.setDefaultDurable(defaultDurable);
sender.setDestination(queueName);
sender.setSessionFactory(manager.getSessionFactory());
sender.setPoolSize(manager.getProducerPoolSize());
sender.setProducerTimeToLive(manager.getProducerTimeToLive());
sender.init();
queueResource.setSender(sender);
if (manager.getPushStore() != null) {
push.setPushStore(manager.getPushStore());
List<PushRegistration> regs = manager.getPushStore().getByDestination(queueName);
for (PushRegistration reg : regs) {
push.addRegistration(reg);
}
}
queueResource.start();
getQueues().put(queueName, queueResource);
return queueResource;
}
}
| 2,715 |
1,083 |
//===--- PILWitnessTable.h - Defines the PILWitnessTable class --*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines the PILWitnessTable class, which is used to map a protocol
// conformance for a type to its implementing PILFunctions. This information is
// (FIXME will be) used by IRGen to create witness tables for protocol dispatch.
// It can also be used by generic specialization and existential
// devirtualization passes to promote witness_method and protocol_method
// instructions to static function_refs.
//
//===----------------------------------------------------------------------===//
#ifndef POLARPHP_PIL_PILWITNESSTABLE_H
#define POLARPHP_PIL_PILWITNESSTABLE_H
#include "polarphp/pil/lang/PILAllocated.h"
#include "polarphp/pil/lang/PILDeclRef.h"
#include "polarphp/pil/lang/PILFunction.h"
#include "polarphp/ast/InterfaceConformanceRef.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/ilist.h"
#include <string>
namespace polar {
class PILFunction;
class PILModule;
enum IsSerialized_t : unsigned char;
class InterfaceConformance;
class RootInterfaceConformance;
/// A mapping from each requirement of a protocol to the PIL-level entity
/// satisfying the requirement for a concrete type.
class PILWitnessTable : public llvm::ilist_node<PILWitnessTable>,
public PILAllocated<PILWitnessTable>
{
public:
/// A witness table entry describing the witness for a method.
struct MethodWitness {
/// The method required.
PILDeclRef Requirement;
/// The witness for the method.
/// This can be null in case dead function elimination has removed the method.
PILFunction *Witness;
};
/// A witness table entry describing the witness for an associated type.
struct AssociatedTypeWitness {
/// The associated type required.
AssociatedTypeDecl *Requirement;
/// The concrete semantic type of the witness.
CanType Witness;
};
/// A witness table entry describing the witness for an associated type's
/// protocol requirement.
struct AssociatedTypeInterfaceWitness {
/// The associated type required. A dependent type in the protocol's
/// context.
CanType Requirement;
/// The protocol requirement on the type.
InterfaceDecl *Interface;
/// The InterfaceConformance satisfying the requirement. Null if the
/// conformance is dependent.
InterfaceConformanceRef Witness;
};
/// A witness table entry referencing the protocol conformance for a refined
/// base protocol.
struct BaseInterfaceWitness {
/// The base protocol.
InterfaceDecl *Requirement;
/// The InterfaceConformance for the base protocol.
InterfaceConformance *Witness;
};
/// A witness table entry kind.
enum WitnessKind {
Invalid,
Method,
AssociatedType,
AssociatedTypeInterface,
BaseInterface
};
/// A witness table entry.
class Entry {
WitnessKind Kind;
union {
MethodWitness Method;
AssociatedTypeWitness AssociatedType;
AssociatedTypeInterfaceWitness AssociatedTypeInterface;
BaseInterfaceWitness BaseInterface;
};
public:
Entry() : Kind(WitnessKind::Invalid) {}
Entry(const MethodWitness &Method)
: Kind(WitnessKind::Method), Method(Method)
{}
Entry(const AssociatedTypeWitness &AssociatedType)
: Kind(WitnessKind::AssociatedType), AssociatedType(AssociatedType)
{}
Entry(const AssociatedTypeInterfaceWitness &AssociatedTypeInterface)
: Kind(WitnessKind::AssociatedTypeInterface),
AssociatedTypeInterface(AssociatedTypeInterface)
{}
Entry(const BaseInterfaceWitness &BaseInterface)
: Kind(WitnessKind::BaseInterface),
BaseInterface(BaseInterface)
{}
WitnessKind getKind() const { return Kind; }
bool isValid() const { return Kind != WitnessKind::Invalid; }
const MethodWitness &getMethodWitness() const {
assert(Kind == WitnessKind::Method);
return Method;
}
const AssociatedTypeWitness &getAssociatedTypeWitness() const {
assert(Kind == WitnessKind::AssociatedType);
return AssociatedType;
}
const AssociatedTypeInterfaceWitness &
getAssociatedTypeInterfaceWitness() const {
assert(Kind == WitnessKind::AssociatedTypeInterface);
return AssociatedTypeInterface;
}
const BaseInterfaceWitness &getBaseInterfaceWitness() const {
assert(Kind == WitnessKind::BaseInterface);
return BaseInterface;
}
void removeWitnessMethod() {
assert(Kind == WitnessKind::Method);
if (Method.Witness) {
Method.Witness->decrementRefCount();
}
Method.Witness = nullptr;
}
void print(llvm::raw_ostream &out, bool verbose,
const PrintOptions &options) const;
};
/// An entry for a conformance requirement that makes the requirement
/// conditional. These aren't public, but any witness thunks need to feed them
/// into the true witness functions.
struct ConditionalConformance {
CanType Requirement;
InterfaceConformanceRef Conformance;
};
private:
/// The module which contains the PILWitnessTable.
PILModule &Mod;
/// The symbol name of the witness table that will be propagated to the object
/// file level.
StringRef Name;
/// The linkage of the witness table.
PILLinkage Linkage;
/// The conformance mapped to this witness table.
RootInterfaceConformance *Conformance;
/// The various witnesses containing in this witness table. Is empty if the
/// table has no witness entries or if it is a declaration.
MutableArrayRef<Entry> Entries;
/// Any conditional conformances required for this witness table. These are
/// private to this conformance.
///
/// (If other private entities are introduced this could/should be switched
/// into a private version of Entries.)
MutableArrayRef<ConditionalConformance> ConditionalConformances;
/// Whether or not this witness table is a declaration. This is separate from
/// whether or not entries is empty since you can have an empty witness table
/// that is not a declaration.
bool IsDeclaration;
/// Whether or not this witness table is serialized, which allows
/// devirtualization from another module.
bool Serialized;
/// Private constructor for making PILWitnessTable definitions.
PILWitnessTable(PILModule &M, PILLinkage Linkage, IsSerialized_t Serialized,
StringRef name, RootInterfaceConformance *conformance,
ArrayRef<Entry> entries,
ArrayRef<ConditionalConformance> conditionalConformances);
/// Private constructor for making PILWitnessTable declarations.
PILWitnessTable(PILModule &M, PILLinkage Linkage, StringRef Name,
RootInterfaceConformance *conformance);
void addWitnessTable();
public:
/// Create a new PILWitnessTable definition with the given entries.
static PILWitnessTable *
create(PILModule &M, PILLinkage Linkage, IsSerialized_t Serialized,
RootInterfaceConformance *conformance, ArrayRef<Entry> entries,
ArrayRef<ConditionalConformance> conditionalConformances);
/// Create a new PILWitnessTable declaration.
static PILWitnessTable *create(PILModule &M, PILLinkage Linkage,
RootInterfaceConformance *conformance);
~PILWitnessTable();
/// Return the AST InterfaceConformance this witness table represents.
RootInterfaceConformance *getConformance() const {
return Conformance;
}
/// Return the context in which the conformance giving rise to this
/// witness table was defined.
DeclContext *getDeclContext() const;
/// Return the protocol for which this witness table is a conformance.
InterfaceDecl *getInterface() const;
/// Return the formal type which conforms to the protocol.
///
/// Note that this will not be a substituted type: it may only be meaningful
/// in the abstract context of the conformance rather than the context of any
/// particular use of it.
CanType getConformingType() const;
/// Return the symbol name of the witness table that will be propagated to the
/// object file level.
StringRef getName() const { return Name; }
/// Returns true if this witness table is a declaration.
bool isDeclaration() const { return IsDeclaration; }
/// Returns true if this witness table is a definition.
bool isDefinition() const { return !isDeclaration(); }
/// Returns true if this witness table is going to be (or was) serialized.
IsSerialized_t isSerialized() const {
return Serialized ? IsSerialized : IsNotSerialized;
}
/// Sets the serialized flag.
void setSerialized(IsSerialized_t serialized) {
assert(serialized != IsSerializable);
Serialized = (serialized ? 1 : 0);
}
/// Return all of the witness table entries.
ArrayRef<Entry> getEntries() const { return Entries; }
/// Return all of the conditional conformances.
ArrayRef<ConditionalConformance> getConditionalConformances() const {
return ConditionalConformances;
}
/// Clears methods in MethodWitness entries.
/// \p predicate Returns true if the passed entry should be set to null.
template <typename Predicate> void clearMethods_if(Predicate predicate) {
for (Entry &entry : Entries) {
if (entry.getKind() == WitnessKind::Method) {
const MethodWitness &MW = entry.getMethodWitness();
if (MW.Witness && predicate(MW)) {
entry.removeWitnessMethod();
}
}
}
}
/// Verify that the witness table is well-formed.
void verify(const PILModule &M) const;
/// Get the linkage of the witness table.
PILLinkage getLinkage() const { return Linkage; }
/// Set the linkage of the witness table.
void setLinkage(PILLinkage l) { Linkage = l; }
/// Change a PILWitnessTable declaration into a PILWitnessTable definition.
void
convertToDefinition(ArrayRef<Entry> newEntries,
ArrayRef<ConditionalConformance> conditionalConformances,
IsSerialized_t isSerialized);
// Whether a conformance should be serialized.
static bool
conformanceIsSerialized(const RootInterfaceConformance *conformance);
/// Call \c fn on each (split apart) conditional requirement of \c conformance
/// that should appear in a witness table, i.e., conformance requirements that
/// need witness tables themselves.
///
/// The \c unsigned argument to \c fn is a counter for the conditional
/// conformances, and should be used for indexing arrays of them.
///
/// This acts like \c any_of: \c fn returning \c true will stop the
/// enumeration and \c enumerateWitnessTableConditionalConformances will
/// return \c true, while \c fn returning \c false will let it continue.
static bool enumerateWitnessTableConditionalConformances(
const InterfaceConformance *conformance,
llvm::function_ref<bool(unsigned, CanType, InterfaceDecl *)> fn);
/// Print the witness table.
void print(llvm::raw_ostream &OS, bool Verbose = false) const;
/// Dump the witness table to stderr.
void dump() const;
};
} // end polar::ast namespace
//===----------------------------------------------------------------------===//
// ilist_traits for PILWitnessTable
//===----------------------------------------------------------------------===//
namespace llvm {
template <>
struct ilist_traits<::polar::PILWitnessTable> :
public ilist_node_traits<::polar::PILWitnessTable> {
using PILWitnessTable = ::polar::PILWitnessTable;
public:
static void deleteNode(PILWitnessTable *WT) { WT->~PILWitnessTable(); }
private:
void createNode(const PILWitnessTable &);
};
} // end llvm namespace
#endif
| 3,902 |
892 |
{
"schema_version": "1.2.0",
"id": "GHSA-8pr4-q5mj-2m5q",
"modified": "2022-05-05T00:29:42Z",
"published": "2022-05-05T00:29:42Z",
"aliases": [
"CVE-2013-4717"
],
"details": "Multiple SQL injection vulnerabilities in Open Ticket Request System (OTRS) Help Desk 3.0.x before 3.0.22, 3.1.x before 3.1.18, and 3.2.x before 3.2.9 allow remote authenticated users to execute arbitrary SQL commands via unspecified vectors related to Kernel/Output/HTML/PreferencesCustomQueue.pm, Kernel/System/CustomerCompany.pm, Kernel/System/Ticket/IndexAccelerator/RuntimeDB.pm, Kernel/System/Ticket/IndexAccelerator/StaticDB.pm, and Kernel/System/TicketSearch.pm.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2013-4717"
},
{
"type": "WEB",
"url": "https://web.archive.org/web/20130817120539/http://www.otrs.com/de/open-source/community-news/security-advisories/security-advisory-2013-05/"
}
],
"database_specific": {
"cwe_ids": [
"CWE-89"
],
"severity": "HIGH",
"github_reviewed": false
}
}
| 472 |
1,144 |
# -*- coding: utf-8 -*-
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Fix all version info in the input Info.plist file.
% python tweak_info_plist.py --output=out.txt --input=in.txt
--build_dir=/path/to/xcodebuild --gen_out_dir=/path/to/gen_out_dir
--keystone_dir=/path/to/KeyStone --version_file=version.txt
See mozc_version.py for the detailed information for version.txt.
"""
__author__ = "mukai"
import datetime
import logging
import optparse
import os
import re
from build_tools import mozc_version
_COPYRIGHT_YEAR = datetime.date.today().year
def _ReplaceVariables(data, environment):
"""Replace all occurrence of the variable in data by the value.
Args:
data: the original data string
environment: a dict which maps from variable names to their values
Returns:
the data string which replaces the variables by the value.
"""
def Replacer(matchobj):
"""The replace function to expand variables.
Args:
matchobj: the match object
Returns:
The value for the variable if the variable is defined in the
environment. Otherwise original string is returned.
"""
if matchobj.group(1) in environment:
return environment[matchobj.group(1)]
return matchobj.group(0)
return re.sub(r'\$\{(\w+)\}', Replacer, data)
def _RemoveDevOnlyLines(data, build_type):
"""Remove dev-only lines.
Args:
data: the original data string
build_type: build type ("dev" or "stable")
Returns:
if build_type == "dev"
the data string including dev-only lines.
else:
the data string excluding dev-only lines.
"""
pat = re.compile('<!--DEV_ONLY_START-->\n(.*)<!--DEV_ONLY_END-->\n',
re.DOTALL)
if build_type == 'dev':
return re.sub(pat, r'\1', data)
else:
return re.sub(pat, '', data)
def ParseOptions():
"""Parse command line options.
Returns:
An options data.
"""
parser = optparse.OptionParser()
parser.add_option('--version_file', dest='version_file')
parser.add_option('--output', dest='output')
parser.add_option('--input', dest='input')
parser.add_option('--build_dir', dest='build_dir')
parser.add_option('--gen_out_dir', dest='gen_out_dir')
parser.add_option('--auto_updater_dir', dest='auto_updater_dir')
parser.add_option('--mozc_dir', dest='mozc_dir')
parser.add_option('--launch_agent_dir', dest='launch_agent_dir')
parser.add_option('--build_type', dest='build_type')
(options, unused_args) = parser.parse_args()
return options
def main():
"""The main function."""
options = ParseOptions()
required_flags = [
'version_file', 'output', 'input', 'build_dir', 'gen_out_dir',
'auto_updater_dir', 'build_type', 'launch_agent_dir',
]
for flag in required_flags:
if getattr(options, flag) is None:
logging.error('--%s is not specified.', flag)
exit(-1)
version = mozc_version.MozcVersion(options.version_file)
copyright_message = '© %d Google Inc.' % _COPYRIGHT_YEAR
long_version = version.GetVersionString()
short_version = version.GetVersionInFormat('@MAJOR@.@MINOR@.@BUILD@')
if options.mozc_dir:
mozc_dir = options.mozc_dir
else:
mozc_dir = os.path.abspath(os.path.join(os.getcwd(), '..'))
variables = {
'MOZC_VERSIONINFO_MAJOR':
version.GetVersionInFormat('@MAJOR@'),
'MOZC_VERSIONINFO_MINOR':
version.GetVersionInFormat('@MINOR@'),
'MOZC_VERSIONINFO_LONG':
long_version,
'MOZC_VERSIONINFO_SHORT':
short_version,
'MOZC_VERSIONINFO_FINDER':
'Google Japanese Input %s, %s' % (long_version, copyright_message),
'GEN_OUT_DIR':
os.path.abspath(options.gen_out_dir),
'BUILD_DIR':
os.path.abspath(options.build_dir),
'AUTO_UPDATER_DIR':
os.path.abspath(options.auto_updater_dir),
'MOZC_DIR':
mozc_dir,
'LAUNCH_AGENT_DIR':
os.path.abspath(options.launch_agent_dir),
}
with open(options.input, encoding='utf-8') as input_file:
with open(options.output, 'w', encoding='utf-8') as output_file:
output_file.write(
_RemoveDevOnlyLines(_ReplaceVariables(input_file.read(), variables),
options.build_type))
if __name__ == '__main__':
main()
| 2,137 |
521 |
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright <NAME> 2014-2014. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/intrusive for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTRUSIVE_POINTER_REBIND_HPP
#define BOOST_INTRUSIVE_POINTER_REBIND_HPP
#ifndef BOOST_INTRUSIVE_DETAIL_WORKAROUND_HPP
#include <boost/intrusive/detail/workaround.hpp>
#endif //BOOST_INTRUSIVE_DETAIL_WORKAROUND_HPP
#ifndef BOOST_CONFIG_HPP
# include <boost/config.hpp>
#endif
#if defined(BOOST_HAS_PRAGMA_ONCE)
# pragma once
#endif
namespace network_boost {
namespace intrusive {
///////////////////////////
//struct pointer_rebind_mode
///////////////////////////
template <typename Ptr, typename U>
struct pointer_has_rebind
{
template <typename V> struct any
{ any(const V&) { } };
template <typename X>
static char test(int, typename X::template rebind<U>*);
template <typename X>
static int test(any<int>, void*);
static const bool value = (1 == sizeof(test<Ptr>(0, 0)));
};
template <typename Ptr, typename U>
struct pointer_has_rebind_other
{
template <typename V> struct any
{ any(const V&) { } };
template <typename X>
static char test(int, typename X::template rebind<U>::other*);
template <typename X>
static int test(any<int>, void*);
static const bool value = (1 == sizeof(test<Ptr>(0, 0)));
};
template <typename Ptr, typename U>
struct pointer_rebind_mode
{
static const unsigned int rebind = (unsigned int)pointer_has_rebind<Ptr, U>::value;
static const unsigned int rebind_other = (unsigned int)pointer_has_rebind_other<Ptr, U>::value;
static const unsigned int mode = rebind + rebind*rebind_other;
};
////////////////////////
//struct pointer_rebinder
////////////////////////
template <typename Ptr, typename U, unsigned int RebindMode>
struct pointer_rebinder;
// Implementation of pointer_rebinder<U>::type if Ptr has
// its own rebind<U>::other type (C++03)
template <typename Ptr, typename U>
struct pointer_rebinder< Ptr, U, 2u >
{
typedef typename Ptr::template rebind<U>::other type;
};
// Implementation of pointer_rebinder<U>::type if Ptr has
// its own rebind template.
template <typename Ptr, typename U>
struct pointer_rebinder< Ptr, U, 1u >
{
typedef typename Ptr::template rebind<U> type;
};
// Specialization of pointer_rebinder if Ptr does not
// have its own rebind template but has a the form Ptr<A, An...>,
// where An... comprises zero or more type parameters.
// Many types fit this form, hence many pointers will get a
// reasonable default for rebind.
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES)
template <template <class, class...> class Ptr, typename A, class... An, class U>
struct pointer_rebinder<Ptr<A, An...>, U, 0u >
{
typedef Ptr<U, An...> type;
};
//Needed for non-conforming compilers like GCC 4.3
template <template <class> class Ptr, typename A, class U>
struct pointer_rebinder<Ptr<A>, U, 0u >
{
typedef Ptr<U> type;
};
#else //C++03 compilers
template <template <class> class Ptr //0arg
, typename A
, class U>
struct pointer_rebinder<Ptr<A>, U, 0u>
{ typedef Ptr<U> type; };
template <template <class, class> class Ptr //1arg
, typename A, class P0
, class U>
struct pointer_rebinder<Ptr<A, P0>, U, 0u>
{ typedef Ptr<U, P0> type; };
template <template <class, class, class> class Ptr //2arg
, typename A, class P0, class P1
, class U>
struct pointer_rebinder<Ptr<A, P0, P1>, U, 0u>
{ typedef Ptr<U, P0, P1> type; };
template <template <class, class, class, class> class Ptr //3arg
, typename A, class P0, class P1, class P2
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2>, U, 0u>
{ typedef Ptr<U, P0, P1, P2> type; };
template <template <class, class, class, class, class> class Ptr //4arg
, typename A, class P0, class P1, class P2, class P3
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3> type; };
template <template <class, class, class, class, class, class> class Ptr //5arg
, typename A, class P0, class P1, class P2, class P3, class P4
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3, P4>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3, P4> type; };
template <template <class, class, class, class, class, class, class> class Ptr //6arg
, typename A, class P0, class P1, class P2, class P3, class P4, class P5
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3, P4, P5>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3, P4, P5> type; };
template <template <class, class, class, class, class, class, class, class> class Ptr //7arg
, typename A, class P0, class P1, class P2, class P3, class P4, class P5, class P6
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3, P4, P5, P6>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3, P4, P5, P6> type; };
template <template <class, class, class, class, class, class, class, class, class> class Ptr //8arg
, typename A, class P0, class P1, class P2, class P3, class P4, class P5, class P6, class P7
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3, P4, P5, P6, P7>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3, P4, P5, P6, P7> type; };
template <template <class, class, class, class, class, class, class, class, class, class> class Ptr //9arg
, typename A, class P0, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8
, class U>
struct pointer_rebinder<Ptr<A, P0, P1, P2, P3, P4, P5, P6, P7, P8>, U, 0u>
{ typedef Ptr<U, P0, P1, P2, P3, P4, P5, P6, P7, P8> type; };
#endif //!defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES)
template <typename Ptr, typename U>
struct pointer_rebind
: public pointer_rebinder<Ptr, U, pointer_rebind_mode<Ptr, U>::mode>
{};
template <typename T, typename U>
struct pointer_rebind<T*, U>
{ typedef U* type; };
} //namespace container {
} //namespace network_boost {
#endif // defined(BOOST_INTRUSIVE_POINTER_REBIND_HPP)
| 2,545 |
389 |
package com.mrcoder.sbredisannotations.controller;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class CacheController {
@RequestMapping(value = "/setcache/{id}", method = RequestMethod.GET)
//动态设置参数id为缓存结果的key
//Cacheable注解会先检查key存不存在,不存在就执行方法并缓存结果后返回
@Cacheable(cacheNames = "cache", key = "#id")
public String setCache(@PathVariable int id){
return "set cache";
}
@RequestMapping(value = "/updatecache/{id}", method = RequestMethod.GET)
//CachePut注解会不管key存不存在,都会更新缓存
@CachePut(cacheNames = "cache", key = "#id")
public String updateCache(@PathVariable int id){
return "update cache";
}
@RequestMapping(value = "/deletecache/{id}", method = RequestMethod.GET)
//CacheEvict注解会删除缓存
@CacheEvict(cacheNames = "cache", key = "#id")
public String deleteCache(@PathVariable int id){
return "delete cache";
}
}
| 551 |
428 |
package loon.live2d.framework;
import loon.BaseIO;
import loon.live2d.ALive2DModel;
import loon.live2d.physics.PhysicsHair;
import loon.live2d.util.Json;
import loon.live2d.util.UtSystem;
import loon.live2d.util.Json.Value;
import loon.utils.TArray;
public class L2DPhysics {
private TArray<PhysicsHair> physicsList;
private long startTimeMSec;
public L2DPhysics() {
physicsList = new TArray<PhysicsHair>();
startTimeMSec = UtSystem.getUserTimeMSec();
}
public void updateParam(ALive2DModel model) {
long timeMSec = UtSystem.getUserTimeMSec() - startTimeMSec;
for (int i = 0; i < physicsList.size; i++) {
physicsList.get(i).update(model, timeMSec);
}
}
public static L2DPhysics load(String path) throws Exception {
byte[] buf = BaseIO.loadBytes(path);
return load(buf);
}
public static L2DPhysics load(byte[] buf) throws Exception {
L2DPhysics ret = new L2DPhysics();
Value json = Json.parseFromBytes(buf);
Value params = json.get("physics_hair");
int paramNum = params.getVector(null).size;
for (int i = 0; i < paramNum; i++) {
Value param = params.get(i);
PhysicsHair physics = new PhysicsHair();
Value setup = param.get("setup");
float length = setup.get("length").toFloat();
float resist = setup.get("regist").toFloat();
float mass = setup.get("mass").toFloat();
physics.setup(length, resist, mass);
Value srcList = param.get("src");
int srcNum = srcList.getVector(null).size;
for (int j = 0; j < srcNum; j++) {
Value src = srcList.get(j);
String id = src.get("id").toString();// param ID
PhysicsHair.Src type = PhysicsHair.Src.SRC_TO_X;
String typeStr = src.get("ptype").toString();
if (typeStr.equals("x")) {
type = PhysicsHair.Src.SRC_TO_X;
} else if (typeStr.equals("y")) {
type = PhysicsHair.Src.SRC_TO_Y;
} else if (typeStr.equals("angle")) {
type = PhysicsHair.Src.SRC_TO_G_ANGLE;
}
float scale = src.get("scale").toFloat();
float weight = src.get("weight").toFloat();
physics.addSrcParam(type, id, scale, weight);
}
Value targetList = param.get("targets");
int targetNum = targetList.getVector(null).size;
for (int j = 0; j < targetNum; j++) {
Value target = targetList.get(j);
String id = target.get("id").toString();
PhysicsHair.Target type = PhysicsHair.Target.TARGET_FROM_ANGLE;
String typeStr = target.get("ptype").toString();
if (typeStr.equals("angle")) {
type = PhysicsHair.Target.TARGET_FROM_ANGLE;
} else if (typeStr.equals("angle_v")) {
type = PhysicsHair.Target.TARGET_FROM_ANGLE_V;
}
float scale = target.get("scale").toFloat();
float weight = target.get("weight").toFloat();
physics.addTargetParam(type, id, scale, weight);
}
ret.physicsList.add(physics);
}
return ret;
}
}
| 1,149 |
440 |
import os
import subprocess
# hardcoded paths
HUNTER_DIR='..'
PACKAGES_DIR=os.path.join(HUNTER_DIR, 'cmake/projects')
DOCS_PKG_DIR=os.path.join(HUNTER_DIR, 'docs', 'packages', 'pkg')
# get all wiki entries
docs_filenames = [x for x in os.listdir(DOCS_PKG_DIR) if x.endswith('.rst')]
docs_entries = [x[:-4] for x in docs_filenames]
# get all hunter package entries
pkg_entries = [x for x in os.listdir(PACKAGES_DIR) if os.path.isdir(os.path.join(PACKAGES_DIR, x))]
pkg_entries_lower = [x.lower() for x in pkg_entries]
# packages both in hunter and wiki
pkg_match = [x for x in pkg_entries if x in docs_entries]
# packages only in hunter
pkg_only_hunter = [x for x in pkg_entries if x not in pkg_match]
# output directories
packages_dir = 'packages'
only_hunter_dir = 'packages/only_hunter'
# create if not exist
for d in [packages_dir, only_hunter_dir]:
if not os.path.exists(d):
os.mkdir(d)
# header for rst files
header_format_string = """.. spelling::
{}
.. index:: unsorted ; {}
.. _pkg.{}:
{}
{}
"""
template_string = """
.. warning::
This page is a template and contains no real information.
Please send pull request with real description.
- `__FIXME__ Official <https://__FIXME__>`__
- `__FIXME__ Hunterized <https://github.com/hunter-packages/__FIXME__>`__
- `__FIXME__ Example <https://github.com/cpp-pm/hunter/blob/master/examples/__FIXME__/CMakeLists.txt>`__
- Available since `__FIXME__ vX.Y.Z <https://github.com/cpp-pm/hunter/releases/tag/vX.Y.Z>`__
- Added by `__FIXME__ <https://github.com/__FIXME__>`__ (`__FIXME__ pr-N <https://github.com/ruslo/hunter/pull/N>`__)
.. code-block:: cmake
hunter_add_package(__FIXME__)
find_package(__FIXME__ CONFIG REQUIRED)
target_link_libraries(foo __FIXME__::__FIXME__)
"""
# create dummy entries for packages only in hunter
for entry in pkg_only_hunter:
target_rst = os.path.join(only_hunter_dir, entry + '.rst')
underscores = "=" * len(entry)
header = header_format_string.format(entry, entry, entry, entry, underscores)
#print(header)
with open(target_rst, 'w') as f:
f.write(header)
f.write(template_string)
print("pkg_match entries: ", len(pkg_match))
print("pkg_only_hunter entries: ", len(pkg_only_hunter))
| 888 |
1,056 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* CPUTestCase.java
*
* Created on July 19, 2005, 5:20 PM
*
* To change this template, choose Tools | Options and locate the template under
* the Source Creation and Management node. Right-click the template and choose
* Open. You can then make changes to the template in the Source Editor.
*/
package org.netbeans.lib.profiler.tests.jfluid.cpu;
import org.netbeans.lib.profiler.ProfilerEngineSettings;
import org.netbeans.lib.profiler.TargetAppRunner;
import org.netbeans.lib.profiler.global.CommonConstants;
import org.netbeans.lib.profiler.results.EventBufferResultsProvider;
import org.netbeans.lib.profiler.results.ProfilingResultsDispatcher;
import org.netbeans.lib.profiler.results.RuntimeCCTNode;
import org.netbeans.lib.profiler.results.cpu.CPUCCTProvider;
import org.netbeans.lib.profiler.results.cpu.CPUCallGraphBuilder;
import org.netbeans.lib.profiler.results.cpu.CPUResultsSnapshot;
import org.netbeans.lib.profiler.results.cpu.FlatProfileBuilder;
import org.netbeans.lib.profiler.results.cpu.FlatProfileContainer;
import org.netbeans.lib.profiler.results.cpu.FlatProfileContainerFree;
import org.netbeans.lib.profiler.tests.jfluid.*;
import org.netbeans.lib.profiler.tests.jfluid.utils.*;
import org.netbeans.lib.profiler.utils.StringUtils;
import java.text.NumberFormat;
import java.util.*;
/**
*
* @author ehucka
*/
public abstract class CPUTestCase extends CommonProfilerTestCase {
//~ Inner Classes ------------------------------------------------------------------------------------------------------------
class Measured {
//~ Instance fields ------------------------------------------------------------------------------------------------------
public int invocations = 0;
public long time = 0;
//~ Methods --------------------------------------------------------------------------------------------------------------
public void setInvocations(int invc) {
this.invocations = invc;
}
public void setTime(long time) {
this.time = time;
}
}
private class CPUResultListener implements CPUCCTProvider.Listener {
//~ Instance fields ------------------------------------------------------------------------------------------------------
private final Object resultsLock = new Object();
private boolean hasResults = false;
//~ Methods --------------------------------------------------------------------------------------------------------------
public void cctEstablished(RuntimeCCTNode appRootNode) {
synchronized (resultsLock) {
hasResults = true;
resultsLock.notify();
}
}
public void cctReset() {
synchronized (resultsLock) {
hasResults = false;
log("cctReset "+System.currentTimeMillis());
resultsLock.notify();
}
}
public boolean wait4results(long timeout) {
synchronized (resultsLock) {
if (!hasResults) {
try {
log("wait4results "+System.currentTimeMillis());
resultsLock.wait(timeout);
} catch (InterruptedException e) {
}
}
return hasResults;
}
}
public void cctEstablished(RuntimeCCTNode appRootNode, boolean empty) {
log("cctEstablished "+empty+" "+System.currentTimeMillis());
if (!empty) {
cctEstablished(appRootNode);
}
//throw new UnsupportedOperationException("Not supported yet.");
}
}
//~ Static fields/initializers -----------------------------------------------------------------------------------------------
static int ALL_INV_ERROR_METHOD = 0;
static int LAST_INV_ERROR_METHOD = 1;
//~ Instance fields ----------------------------------------------------------------------------------------------------------
NumberFormat percentFormat;
//~ Constructors -------------------------------------------------------------------------------------------------------------
/**
* Creates a new instance of CPUTestCase
*/
public CPUTestCase(String name) {
super(name);
percentFormat = NumberFormat.getPercentInstance();
percentFormat.setMaximumFractionDigits(1);
percentFormat.setMinimumFractionDigits(0);
}
//~ Methods ------------------------------------------------------------------------------------------------------------------
public int getALL_INV_ERROR_METHOD() {
return ALL_INV_ERROR_METHOD;
}
protected double getError(int invocations, long mctime, long idealtime) {
double ideal = idealtime * invocations * 1000.0;
return Math.abs(ideal - mctime) / 1000;
}
protected void checkCPUResults(FlatProfileContainer fpc, HashMap methods, String[] measuredMethodsFilter) {
double percent = 0.0;
for (int row = 0; row < fpc.getNRows(); row++) {
percent += fpc.getPercentAtRow(row);
for (int mets = 0; mets < measuredMethodsFilter.length; mets++) {
if (fpc.getMethodNameAtRow(row).startsWith(measuredMethodsFilter[mets])) {
Measured m = (Measured) (methods.get(fpc.getMethodNameAtRow(row)));
if (m == null) {
m = new Measured();
m.time = fpc.getTimeInMcs0AtRow(row);
m.invocations = fpc.getNInvocationsAtRow(row);
methods.put(fpc.getMethodNameAtRow(row), m);
} else {
long tm = m.time;
int inv = m.invocations;
m.setTime(fpc.getTimeInMcs0AtRow(row));
m.setInvocations(fpc.getNInvocationsAtRow(row));
if ((tm > m.time) || (inv > m.invocations)) {
log("\n!!!Decreasing values: method " + fpc.getMethodNameAtRow(row) + " current time " + m.time
+ " invocations " + m.invocations + " but was time=" + tm + " invocations=" + inv + "\n");
assertFalse("Unacceptable results - decresing values (issue 65187)", true);
}
}
}
}
}
if (Math.abs(percent - 100.0) > 0.1) {
log("\n!!!Sum of percents is not 100% - " + percent + "\n");
for (int row = 0; row < fpc.getNRows(); row++) {
log(fpc.getMethodIdAtRow(row) + " " + percentFormat.format(fpc.getPercentAtRow(row) / 100) + " %");
}
assertFalse("Unacceptable results - sum of percents != 100", true);
}
}
protected void checkCPUResults(FlatProfileContainer fpc, String[] methodsNames, long[] idealTimes, double diffMillis,
String[] refMethods, ArrayList refMethodsList, int errorMethod) {
double[] errors = new double[methodsNames.length];
int[] nInv = new int[methodsNames.length];
long[] times = new long[methodsNames.length];
for (int row = 0; row < fpc.getNRows(); row++) {
for (int mets = 0; mets < methodsNames.length; mets++) {
if (fpc.getMethodNameAtRow(row).equals(methodsNames[mets])) {
nInv[mets] = fpc.getNInvocationsAtRow(row);
times[mets] = fpc.getTimeInMcs0AtRow(row);
errors[mets] = getError(nInv[mets], times[mets], idealTimes[mets]);
}
}
if (refMethods != null) {
for (int mets = 0; mets < refMethods.length; mets++) {
String mname = fpc.getMethodNameAtRow(row);
if (mname.startsWith(refMethods[mets]) && !refMethodsList.contains(mname)) {
refMethodsList.add(mname);
}
}
}
}
double best = diffMillis / 4.0;
int bestcount = 0;
boolean bigdifference = false;
for (int cntr = 0; cntr < errors.length; cntr++) {
if (errors[cntr] <= best) {
bestcount++;
}
bigdifference |= (errors[cntr] > diffMillis);
}
boolean accepted = !bigdifference || ((bestcount * 1.0) >= (errors.length * 0.5));
logFractions(errors, nInv, times, idealTimes, methodsNames);
log("");
if (!accepted) {
log("\nRESULTS WITH BIG DIFFERENCES - differences are greater than given tolerance: " + diffMillis + " ms");
log("Best count " + bestcount + " errors.length " + errors.length);
}
//assertTrue("Not acceptable results - big differences", accepted);
}
protected ProfilerEngineSettings initCpuTest(String projectName, String mainClass) {
return initCpuTest(projectName, mainClass, null);
}
protected ProfilerEngineSettings initCpuTest(String projectName, String mainClass, String[][] rootMethods) {
//System.setProperty("org.netbeans.lib.profiler.wireprotocol.WireIO", "true");
ProfilerEngineSettings settings = initTest(projectName, mainClass, rootMethods);
//defaults
settings.setCPUProfilingType(CommonConstants.CPU_INSTR_FULL);
settings.setInstrScheme(CommonConstants.INSTRSCHEME_LAZY);
settings.setInstrumentEmptyMethods(false);
settings.setInstrumentGetterSetterMethods(false);
settings.setInstrumentMethodInvoke(true);
settings.setInstrumentSpawnedThreads(rootMethods == null);
settings.setExcludeWaitTime(true);
// addJVMArgs(settings, "-Dorg.netbeans.lib.profiler.wireprotocol.WireIO=true");
//addJVMArgs(settings, "-Dorg.netbeans.lib.profiler.server.ProfilerServer=true");
// if (rootMethods == null) {
// addJVMArgs(settings, "-Dorg.netbeans.lib.profiler.server.ProfilerServer=true");
// }
settings.setThreadCPUTimerOn(false);
return settings;
}
protected void logFractions(double[] errors, int[] inv, long[] times, long[] ideals, String[] methods) {
log(complete("Error[ms]", 10) + complete("Invocs", 10) + complete("Time[ms]", 10) + complete("Ideal[ms]", 10) + "Method");
for (int i = 0; i < errors.length; i++) {
log(complete(String.valueOf(errors[i]), 9) + " " + complete(String.valueOf(inv[i]), 9) + " "
+ complete(StringUtils.mcsTimeToString(times[i]), 9) + " " + complete(String.valueOf(ideals[i] * inv[i]), 9)
+ " " + methods[i]);
}
}
protected void logInstrumented(TargetAppRunner runner)
throws Exception {
CPUResultsSnapshot snapshot = runner.getProfilerClient().getCPUProfilingResultsSnapshot();
String[] mets = snapshot.getInstrMethodNames();
log("Instrumented methods:");
for (int i = 0; i < mets.length; i++) {
log(mets[i]);
}
}
/**
* checks results after the profiled app is finished
*/
protected void startCPUTest(ProfilerEngineSettings settings, String[] measuredMethods, long[] idealTimes, double diffMillis,
String[] displayMethodsFilter, int errorMethod) {
CPUCallGraphBuilder builder = new CPUCallGraphBuilder();
//create runner
TargetAppRunner runner = new TargetAppRunner(settings, new TestProfilerAppHandler(this),
new TestProfilingPointsProcessor());
runner.addProfilingEventListener(Utils.createProfilingListener(this));
ProfilingResultsDispatcher.getDefault().addListener(builder);
CPUResultListener resultListener = new CPUResultListener();
builder.addListener(resultListener);
FlatProfileBuilder flattener = new FlatProfileBuilder();
builder.addListener(flattener);
flattener.setContext(runner.getProfilerClient(),null,null);
builder.startup(runner.getProfilerClient());
try {
runner.readSavedCalibrationData();
runner.getProfilerClient().initiateRecursiveCPUProfInstrumentation(settings.getInstrumentationRootMethods());
Process p = startTargetVM(runner);
assertNotNull("Target JVM is not started", p);
bindStreams(p);
runner.attachToTargetVMOnStartup();
waitForStatus(STATUS_RUNNING);
assertTrue("runner is not running", runner.targetAppIsRunning());
ArrayList methods = new ArrayList();
waitForStatus(STATUS_APP_FINISHED);
Thread.sleep(1000);
if (runner.targetJVMIsAlive()) {
log("Get results: " + System.currentTimeMillis());
assertTrue("Results do not exist - issue 65185.", runner.getProfilerClient().cpuResultsExist());
boolean gotResults = false;
int retryCounter = 8; // was - 4
do {
// just wait for the results to appear - forceObtainedResultsDump() has been alread called by ProfilerClient on shutdown
// runner.getProfilerClient().forceObtainedResultsDump();
gotResults = resultListener.wait4results(2500);
} while (!gotResults && (--retryCounter > 0));
assertTrue("Results are not available after 20 seconds.", gotResults); // was - 10 seconds
log("obtaining results " + String.valueOf(System.currentTimeMillis()));
//logInstrumented(runner);
FlatProfileContainerFree fpc = null;
int retry = 5;
while ((fpc == null) && (--retry > 0)) {
fpc = (FlatProfileContainerFree) flattener.createFlatProfile();
Thread.sleep(500);
}
fpc.filterOriginalData(new String[] { "" }, CommonConstants.FILTER_CONTAINS, 0.0D);
checkCPUResults(fpc, measuredMethods, idealTimes, diffMillis, displayMethodsFilter, methods, errorMethod);
}
setStatus(STATUS_MEASURED);
if (methods.size() > 0) {
Collections.sort(methods);
for (int mets = 0; mets < methods.size(); mets++) {
ref(methods.get(mets));
}
}
} catch (Exception ex) {
log(ex);
assertTrue("Exception thrown: " + ex.getMessage(), false);
} finally {
ProfilingResultsDispatcher.getDefault().pause(true);
builder.shutdown();
flattener.setContext(null,null,null);
builder.removeListener(flattener);
builder.removeListener(resultListener);
ProfilingResultsDispatcher.getDefault().removeListener(builder);
finalizeTest(runner);
}
}
/**
* check reulsts periodicaly - live results
*/
protected void startCPUTest(ProfilerEngineSettings settings, String[] measuredMethodsFilter, long checkDelay, long maxDelay) {
CPUCallGraphBuilder builder = new CPUCallGraphBuilder();
//create runner
TargetAppRunner runner = new TargetAppRunner(settings, new TestProfilerAppHandler(this),
new TestProfilingPointsProcessor());
runner.addProfilingEventListener(Utils.createProfilingListener(this));
ProfilingResultsDispatcher.getDefault().addListener(builder);
CPUResultListener resultListener = new CPUResultListener();
builder.addListener(resultListener);
FlatProfileBuilder flattener = new FlatProfileBuilder();
builder.addListener(flattener);
flattener.setContext(runner.getProfilerClient(),null,null);
builder.startup(runner.getProfilerClient());
try {
runner.readSavedCalibrationData();
runner.getProfilerClient().initiateRecursiveCPUProfInstrumentation(settings.getInstrumentationRootMethods());
Process p = startTargetVM(runner);
assertNotNull("Target JVM is not started", p);
bindStreams(p);
runner.attachToTargetVMOnStartup();
waitForStatus(STATUS_RUNNING);
assertTrue("runner is not running", runner.targetAppIsRunning());
waitForStatus(STATUS_RESULTS_AVAILABLE | STATUS_APP_FINISHED);
assertTrue("ResultsAvailable was not called - issue 69084", (isStatus(STATUS_RESULTS_AVAILABLE) || isStatus(STATUS_LIVERESULTS_AVAILABLE)));
HashMap methods = new HashMap(128);
long alltime = 0;
long time = System.currentTimeMillis();
long oldtime = time - checkDelay;
while (!isStatus(STATUS_APP_FINISHED) && !isStatus(STATUS_ERROR) && (alltime < maxDelay)) {
if ((time - oldtime) < (2 * checkDelay)) {
Thread.sleep((2 * checkDelay) - (time - oldtime));
}
if (!isStatus(STATUS_LIVERESULTS_AVAILABLE)) {
waitForStatus(STATUS_LIVERESULTS_AVAILABLE, checkDelay / 2);
}
if (runner.targetJVMIsAlive() && isStatus(STATUS_LIVERESULTS_AVAILABLE)) {
assertTrue("Results do not exist - issue 65185.", runner.getProfilerClient().cpuResultsExist());
log("Get Results: " + System.currentTimeMillis());
// runner.getProfilerClient().forceObtainedResultsDump();
// assertTrue("Results do not exist on the server - issue 65185.", runner.getProfilerClient().cpuResultsExist());
boolean gotResults = false;
int retryCounter = 4;
do {
runner.getProfilerClient().forceObtainedResultsDump();
gotResults = resultListener.wait4results(2500);
} while (!gotResults && (--retryCounter > 0));
assertTrue("CallGraphBuilder: Results do not exist.", gotResults);
log("Results obtained " + String.valueOf(System.currentTimeMillis()));
//logInstrumented(runner);
FlatProfileContainerFree fpc = null;
int retry = 5;
while ((fpc == null) && (--retry > 0)) {
fpc = (FlatProfileContainerFree) flattener.createFlatProfile();
Thread.sleep(500);
}
fpc.filterOriginalData(new String[] { "" }, CommonConstants.FILTER_CONTAINS, 0.0D);
fpc.sortBy(FlatProfileContainer.SORT_BY_TIME, true);
checkCPUResults(fpc, methods, measuredMethodsFilter);
}
alltime += (System.currentTimeMillis() - time);
oldtime = time;
time = System.currentTimeMillis();
}
if (methods.size() == 0) {
assertTrue("Results were not on the server - issue 65185", false);
}
} catch (Exception ex) {
log(ex);
assertTrue("Exception thrown: " + ex.getMessage(), false);
} finally {
ProfilingResultsDispatcher.getDefault().pause(true);
builder.shutdown();
flattener.setContext(null,null,null);
builder.removeListener(flattener);
builder.removeListener(resultListener);
ProfilingResultsDispatcher.getDefault().removeListener(builder);
finalizeTest(runner);
}
}
}
| 8,816 |
1,444 |
<filename>Mage.Sets/src/mage/cards/c/CryptRats.java<gh_stars>1000+
package mage.cards.c;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.Ability;
import mage.abilities.common.SimpleActivatedAbility;
import mage.abilities.costs.VariableCost;
import mage.abilities.costs.mana.ManaCostsImpl;
import mage.abilities.costs.mana.VariableManaCost;
import mage.abilities.dynamicvalue.common.ManacostVariableValue;
import mage.abilities.effects.Effect;
import mage.abilities.effects.common.DamageEverythingEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.SubType;
import mage.constants.Zone;
import mage.filter.FilterMana;
/**
*
* @author LevelX2
*/
public final class CryptRats extends CardImpl {
static final FilterMana filterBlack = new FilterMana();
static {
filterBlack.setBlack(true);
}
public CryptRats(UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{2}{B}");
this.subtype.add(SubType.RAT);
this.power = new MageInt(1);
this.toughness = new MageInt(1);
// {X}: Crypt Rats deals X damage to each creature and each player. Spend only black mana on X.
Effect effect = new DamageEverythingEffect(ManacostVariableValue.REGULAR);
effect.setText("{this} deals X damage to each creature and each player. Spend only black mana on X");
Ability ability = new SimpleActivatedAbility(Zone.BATTLEFIELD, effect,new ManaCostsImpl("{X}"));
VariableCost variableCost = ability.getManaCostsToPay().getVariableCosts().get(0);
if (variableCost instanceof VariableManaCost) {
((VariableManaCost) variableCost).setFilter(filterBlack);
}
this.addAbility(ability);
}
private CryptRats(final CryptRats card) {
super(card);
}
@Override
public CryptRats copy() {
return new CryptRats(this);
}
}
| 713 |
5,703 |
<gh_stars>1000+
/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.facebook.keyframes.model;
import com.facebook.keyframes.util.ArgCheckUtil;
/**
* A simple class which wraps a float[] needed for one key frame.
*/
public class KFAnimationFrame implements HasKeyFrame {
/**
* The key frame # in the animation sequence.
*/
public static final String START_FRAME_JSON_FIELD = "start_frame";
private final int mStartFrame;
/**
* The values for this key frame.
*/
public static final String DATA_JSON_FIELD = "data";
private final float[] mData;
public static class Builder {
public int startFrame;
public float[] data;
public KFAnimationFrame build() {
return new KFAnimationFrame(startFrame, data);
}
}
private KFAnimationFrame(int startFrame, float[] data) {
mStartFrame = startFrame;
mData = ArgCheckUtil.checkArg(
data,
data.length > 0,
DATA_JSON_FIELD);
}
@Override
public int getKeyFrame() {
return mStartFrame;
}
public float[] getData() {
return mData;
}
}
| 433 |
16,461 |
package abi42_0_0.org.unimodules.adapters.react.views;
import android.view.View;
import abi42_0_0.com.facebook.react.bridge.ReadableMap;
import abi42_0_0.com.facebook.react.uimanager.ThemedReactContext;
import abi42_0_0.com.facebook.react.uimanager.SimpleViewManager;
import abi42_0_0.com.facebook.react.uimanager.annotations.ReactProp;
import java.util.Map;
import javax.annotation.Nullable;
import abi42_0_0.org.unimodules.core.ModuleRegistry;
import abi42_0_0.org.unimodules.core.ViewManager;
import abi42_0_0.org.unimodules.core.interfaces.RegistryLifecycleListener;
public class SimpleViewManagerAdapter<M extends ViewManager<V>, V extends View> extends SimpleViewManager<V> implements RegistryLifecycleListener {
private M mViewManager;
public SimpleViewManagerAdapter(M viewManager) {
mViewManager = viewManager;
}
@Override
protected V createViewInstance(ThemedReactContext reactContext) {
return mViewManager.createViewInstance(reactContext);
}
@Override
public void onDropViewInstance(V view) {
mViewManager.onDropViewInstance(view);
super.onDropViewInstance(view);
}
@Nullable
@Override
public Map<String, Object> getConstants() {
return ViewManagerAdapterUtils.getConstants(mViewManager);
}
@Override
public String getName() {
return ViewManagerAdapterUtils.getViewManagerAdapterName(mViewManager);
}
@ReactProp(name = "proxiedProperties")
public void setProxiedProperties(V view, ReadableMap proxiedProperties) {
ViewManagerAdapterUtils.setProxiedProperties(getName(), mViewManager, view, proxiedProperties);
}
@Nullable
@Override
public Map<String, Object> getExportedCustomDirectEventTypeConstants() {
return ViewManagerAdapterUtils.getExportedCustomDirectEventTypeConstants(mViewManager);
}
@Override
public void onCreate(ModuleRegistry moduleRegistry) {
mViewManager.onCreate(moduleRegistry);
}
}
| 637 |
448 |
//
// IAnalyticsRender.h
// Analytics
//
// Created by huang_jiafa on 2018/11/07.
// Copyright (c) 2018 Aliyun. All rights reserved.
//
#ifndef IANALYTICS_RENDER_H
#define IANALYTICS_RENDER_H
namespace Cicada {
class IAnalyticRender {
public:
virtual ~IAnalyticRender() = default;
virtual void ReportFirstRender() = 0;
virtual void ReportFirstVideoRender() = 0;
virtual void ReportFirstAudioRender() = 0;
virtual void ReportVideoSizeChanged(int width, int height) = 0;
virtual void ReportSnapshot() = 0;
};
}// namespace Cicada
#endif // IANALYTICS_RENDER_H
| 250 |
971 |
#include "execution/sql/value_util.h"
#include "common/allocator.h"
#include "execution/sql/value.h"
namespace noisepage::execution::sql {
std::pair<StringVal, std::unique_ptr<byte[]>> ValueUtil::CreateStringVal(
const common::ManagedPointer<const char> string, const uint32_t length) {
if (length <= StringVal::InlineThreshold()) {
return {StringVal(string.Get(), length), nullptr};
}
// TODO(Matt): smarter allocation?
auto buffer = std::unique_ptr<byte[]>(common::AllocationUtil::AllocateAligned(length));
std::memcpy(buffer.get(), string.Get(), length);
return {StringVal(reinterpret_cast<const char *>(buffer.get()), length), std::move(buffer)};
}
std::pair<StringVal, std::unique_ptr<byte[]>> ValueUtil::CreateStringVal(const std::string &string) {
return CreateStringVal(common::ManagedPointer(string.data()), string.length());
}
std::pair<StringVal, std::unique_ptr<byte[]>> ValueUtil::CreateStringVal(const std::string_view string) {
return CreateStringVal(common::ManagedPointer(string.data()), string.length());
}
std::pair<StringVal, std::unique_ptr<byte[]>> ValueUtil::CreateStringVal(const StringVal string) {
return CreateStringVal(common::ManagedPointer(string.GetContent()), string.GetLength());
}
} // namespace noisepage::execution::sql
| 423 |
848 |
<filename>tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = rt.to_tensor()
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = rt.to_tensor(default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': [0],
'error': (ValueError, r'Shape \(1,\) must have rank at most 0'),
},
{
'rt_input': [[[1, 2], [3, 4]], [[5, 6]]],
'ragged_rank': 1,
'default': [7, 8, 9],
'error': (ValueError, r'Shapes \(3,\) and \(2,\) are incompatible'),
},
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
},
)
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
rt.to_tensor(default)
# This covers the tests above, but with the new implementation.
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpNewTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = ragged_conversion_ops.ragged_to_dense(rt)
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
}, {
'rt_input': [[1, 2, 3]],
'default': 'b',
'error': (TypeError, '.*'),
})
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
@test_util.run_all_in_graph_and_eager_modes
class RaggedToTensorOpAdditionalTests(test_util.TensorFlowTestCase):
def _compare_to_reference(self,
ragged_tensor,
expected=None,
default_value=None):
treatment = ragged_conversion_ops.ragged_to_dense(
ragged_tensor, default_value=default_value)
control = ragged_tensor.to_tensor(default_value=default_value)
self.assertAllEqual(control, treatment)
if expected is not None:
self.assertAllEqual(expected, treatment)
def test_already_dense_simple(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([6, 7, 8, 9, 10, 11], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[6, 7, 8], [9, 10, 11]])
def test_already_dense_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]],
default_value=constant_op.constant([31, 32], dtype=dtypes.int64))
def test_already_dense_with_dense_values(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]])
def test_ragged_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 3]]],
default_value=[2, 3])
def test_ragged_with_dense_values_and_small_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 2]]],
default_value=2)
def test_already_dense_with_dense_values_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[b'a', b'b'], [b'c', b'd'], [b'e', b'f'], [b'g', b'jalapeno'],
[b'kangaroo', b'llama'], [b'manzana', b'nectar']],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[b'a', b'b'], [b'c', b'd'], [b'e', b'f']],
[[b'g', b'jalapeno'], [b'kangaroo', b'llama'],
[b'manzana', b'nectar']]])
def test_already_dense_with_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
['a', 'b', 'c', 'd', 'e', 'antidisestablishmentarianism'],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[b'a', b'b', b'c'], [b'd', b'e', b'antidisestablishmentarianism']])
def test_already_dense(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [3, 4, 5]])
self._compare_to_reference(input_data, [[0, 1, 2], [3, 4, 5]])
def test_true_ragged(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(input_data, [[0, 1, 2], [0, 0, 0], [3, 0, 0]])
def test_true_ragged_default_3(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(
input_data, [[0, 1, 2], [3, 3, 3], [3, 3, 3]], default_value=3)
def test_three_dimensional_ragged(self):
input_data = ragged_factory_ops.constant([[[0, 1, 2], []], [], [[3]]])
self._compare_to_reference(
input_data, [[[0, 1, 2], [3, 3, 3]], [[3, 3, 3], [3, 3, 3]],
[[3, 3, 3], [3, 3, 3]]],
default_value=3)
def test_empty_tensor(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[], []], default_value=3)
def test_empty_last(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
self._compare_to_reference(input_data,
[[0, 1, 2], [0, 0, 0], [3, 0, 0], [0, 0, 0]])
def test_shape_limit(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[2, 3])
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tuple(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=(2, 3))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, 3]))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_half_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
self.assertAllEqual(actual, [[0, 1, 2, 3], [0, 0, 0, 0]])
def test_skip_eager_shape_half_limit_tensor_shape(self):
# Eager would produce a shape of [2, 4]
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
result = actual.shape.as_list()
# This is equal to [2, 4] in eager, or [2, None] in non-eager.
self.assertEqual(result[0], 2)
def test_shape_limit_shape_is_tensor_int64(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int64))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_shape_is_tensor_int32(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int32))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_expand_first_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[4, 4])
self.assertAllEqual(
actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0], [0, 0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [4, 4])
def test_value_transposed(self):
# This test tries to get a tensor in columnar format, where I am uncertain
# as to whether the underlying op, which copies data in the raw format,
# could fail.
my_value = array_ops.transpose(
constant_op.constant([[0, 1, 2, 3], [4, 5, 6, 7]]))
input_data = RaggedTensor.from_value_rowids(
values=my_value,
value_rowids=constant_op.constant([0, 1, 2, 3], dtype=dtypes.int64),
nrows=constant_op.constant(4, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[0, 4]], [[1, 5]], [[2, 6]], [[3, 7]]])
# This fails on the older version of to_tensor.
def test_broadcast_default(self):
# This test is commented out. The functionality here is not supported.
# The dense dimension here is 2 x 2
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# This placeholder has a 2 x 1 dimension.
default_value = array_ops.placeholder_with_default([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
# This fails on the older version of to_tensor.
def test_broadcast_default_no_placeholder(self):
# Again, this functionality is not supported. It fails more gracefully
# when creating the op.
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# default_value has a 2 x 1 dimension.
default_value = constant_op.constant([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
def test_shape_expand_second_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[3, 4])
self.assertAllEqual(actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0]])
def test_empty_tensor_with_shape(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=3, shape=[2, 3])
self.assertAllEqual(actual, [[3, 3, 3], [3, 3, 3]])
if __name__ == '__main__':
googletest.main()
| 10,166 |
763 |
<reponame>zabrewer/batfish
package org.batfish.representation.juniper;
import static org.junit.Assert.assertEquals;
import org.batfish.common.Warnings;
import org.batfish.datamodel.EmptyIpSpace;
import org.batfish.datamodel.HeaderSpace;
import org.batfish.datamodel.IpSpaceReference;
import org.batfish.datamodel.IpWildcard;
import org.junit.Test;
/** Test for {@link FwFromSourceAddressBookEntry } */
public class FwFromSourceAddressBookEntryTest {
@Test
public void testToHeaderspace() {
String addressBookEntryName = "addressBookEntry";
AddressBook globalAddressBook = new AddressBook("addressBook", null);
globalAddressBook
.getEntries()
.put(
addressBookEntryName,
new AddressAddressBookEntry(addressBookEntryName, IpWildcard.parse("1.1.1.0/24")));
Warnings w = new Warnings();
FwFromSourceAddressBookEntry from =
new FwFromSourceAddressBookEntry(null, globalAddressBook, addressBookEntryName);
assertEquals(
from.toHeaderspace(w),
HeaderSpace.builder()
.setSrcIps(new IpSpaceReference("addressBook~addressBookEntry"))
.build());
}
@Test
public void testToHeaderspace_noEntry() {
String addressBookEntryName = "addressBookEntry";
AddressBook globalAddressBook = new AddressBook("addressBook", null);
Warnings w = new Warnings();
FwFromSourceAddressBookEntry from =
new FwFromSourceAddressBookEntry(null, globalAddressBook, addressBookEntryName);
assertEquals(
from.toHeaderspace(w), HeaderSpace.builder().setSrcIps(EmptyIpSpace.INSTANCE).build());
}
}
| 595 |
752 |
from elegantrl.train.run_tutorial import *
from elegantrl.train.config import Arguments
from elegantrl.agents.AgentSAC import AgentModSAC
# demo for MuJoCo environment, using 1 GPU
args = Arguments(env=build_env('AntBulletEnv-v0', if_print=True), agent=AgentModSAC())
GPU_ID = 0
args.learner_gpus = (GPU_ID, )
args.agent.if_use_act_target = False
args.net_dim = 2 ** 9
args.max_memo = 2 ** 22
args.repeat_times = 2 ** 1
args.reward_scale = 2 ** -2
args.batch_size = args.net_dim * 2
args.target_step = args.env.max_step * 2
args.eval_gap = 2 ** 8
args.eval_times1 = 2 ** 1
args.eval_times2 = 2 ** 4
args.break_step = int(8e7)
args.if_allow_break = False
args.worker_num = 4
train_and_evaluate(args)
| 270 |
4,218 |
<reponame>Verchasve/conductor_test
/*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.sqs.config;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.sqs.AmazonSQSClient;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.contribs.queue.sqs.SQSObservableQueue.Builder;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import rx.Scheduler;
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Configuration
@EnableConfigurationProperties(SQSEventQueueProperties.class)
@ConditionalOnProperty(name = "conductor.event-queues.sqs.enabled", havingValue = "true")
public class SQSEventQueueConfiguration {
@ConditionalOnMissingBean
@Bean
public AmazonSQSClient getSQSClient(AWSCredentialsProvider credentialsProvider) {
return new AmazonSQSClient(credentialsProvider);
}
@Bean
public EventQueueProvider sqsEventQueueProvider(AmazonSQSClient sqsClient, SQSEventQueueProperties properties,
Scheduler scheduler) {
return new SQSEventQueueProvider(sqsClient, properties, scheduler);
}
@ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "sqs", matchIfMissing = true)
@Bean
public Map<Status, ObservableQueue> getQueues(ConductorProperties conductorProperties,
SQSEventQueueProperties properties, AmazonSQSClient sqsClient) {
String stack = "";
if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
stack = conductorProperties.getStack() + "_";
}
Status[] statuses = new Status[]{Status.COMPLETED, Status.FAILED};
Map<Status, ObservableQueue> queues = new HashMap<>();
for (Status status : statuses) {
String queuePrefix = StringUtils.isBlank(properties.getListenerQueuePrefix())
? conductorProperties.getAppId() + "_sqs_notify_" + stack
: properties.getListenerQueuePrefix();
String queueName = queuePrefix + status.name();
Builder builder = new Builder().withClient(sqsClient).withQueueName(queueName);
String auth = properties.getAuthorizedAccounts();
String[] accounts = auth.split(",");
for (String accountToAuthorize : accounts) {
accountToAuthorize = accountToAuthorize.trim();
if (accountToAuthorize.length() > 0) {
builder.addAccountToAuthorize(accountToAuthorize.trim());
}
}
ObservableQueue queue = builder.build();
queues.put(status, queue);
}
return queues;
}
}
| 1,339 |
2,482 |
package com.argusapm.android.debug.storage;
import android.os.Build;
import com.argusapm.android.Env;
import com.argusapm.android.core.Manager;
import com.argusapm.android.debug.config.DebugConfig;
import com.argusapm.android.utils.LogX;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.text.SimpleDateFormat;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* 用于将样本分析之后的有效数据写入文件,方便查找性能问题
* 防止文件无限膨胀,设置最大文件大小,超过则删除老数据
*
* @author ArgusAPM Team
*/
public class TraceWriter {
private static final String SUB_TAG = "trace";
private static final long MAX_IDLE_TIME = 1 * 60 * 1000L;
private static final long MAX_LOG_SIZE = 3 * 1024 * 1024; // 1M
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("MM-dd HH:mm:ss.SSS");
private static final Object LOCKER_WRITE_THREAD = new Object();
private static WriteFileRun sWriteThread = null;
private static ConcurrentLinkedQueue<Object[]> sQueuePool = new ConcurrentLinkedQueue<Object[]>();
public static void log(String tagName, String content) {
log(tagName, content, true);
}
private synchronized static void log(String tagName, String content, boolean forceFlush) {
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "tagName = " + tagName + " content = " + content);
}
if (sWriteThread == null) {
sWriteThread = new WriteFileRun();
Thread t = new Thread(sWriteThread);
t.setName("ApmTrace.Thread");
t.setDaemon(true);
t.setPriority(Thread.MIN_PRIORITY);
t.start();
String initContent = "---- Phone=" + Build.BRAND + "/" + Build.MODEL + "/verName:" + " ----";
sQueuePool.offer(new Object[]{tagName, initContent, Boolean.valueOf(forceFlush)});
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "init offer content = " + content);
}
}
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "offer content = " + content);
}
sQueuePool.offer(new Object[]{tagName, content, Boolean.valueOf(forceFlush)});
synchronized (LOCKER_WRITE_THREAD) {
LOCKER_WRITE_THREAD.notify();
}
}
public synchronized static void stop() {
if (sWriteThread != null) {
sWriteThread.stopRun();
sWriteThread = null;
}
}
static class WriteFileRun implements Runnable {
private volatile boolean mRunning = true;
private File fLog;
public void stopRun() {
mRunning = false;
synchronized (LOCKER_WRITE_THREAD) {
LOCKER_WRITE_THREAD.notify();
}
}
@Override
public void run() {
while (mRunning) {
try {
Object[] oneLog = sQueuePool.poll();
if (oneLog == null) {
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "oneLog == null");
}
synchronized (LOCKER_WRITE_THREAD) {
LOCKER_WRITE_THREAD.wait(MAX_IDLE_TIME);
}
} else {
String tagName = (String) oneLog[0];
String content = (String) oneLog[1];
Boolean forceFlush = (Boolean) oneLog[2];
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "oneLog = [ " + tagName + ", " + content + ", " + forceFlush + " ]");
}
if (fLog == null) {
fLog = ensureFile();
}
if (fLog != null) {
FileWriter fos = null;
Writer mWriter = null;
try {
fos = new FileWriter(fLog, true);
mWriter = new BufferedWriter(fos, 1024);
handleWrite(mWriter, tagName, content, forceFlush);
} catch (Exception e) {
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "ex : " + e);
}
} finally {
IOUtil.safeClose(mWriter);
IOUtil.safeClose(fos);
}
if (fLog.length() > MAX_LOG_SIZE) {
long time = System.currentTimeMillis();
File fTmp = copyLogTail(fLog);
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "WriteFileRun timeuse : " + (System.currentTimeMillis() - time));
}
if (fTmp != null) {
fLog.delete();
fTmp.renameTo(fLog);
}
}
} else {
if (Env.DEBUG) {
LogX.d(Env.TAG, SUB_TAG, "WriteFileRun file null");
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* log文件大于3M的时候就做一次清理,保留最后N条log,剩下清理掉
*
* @param f
* @return
*/
private File copyLogTail(File f) {
File fTmp = new File(f.getParent(), f.getName() + "_tmp");
BufferedReader br = null;
BufferedWriter bw = null;
try {
br = new BufferedReader(new FileReader(f));
LinkedList<String> list = new LinkedList<String>();
String sTmp = null;
while ((sTmp = br.readLine()) != null) {
list.add(sTmp);
if (list.size() > 3000) {
list.poll();
}
}
bw = new BufferedWriter(new FileWriter(fTmp, true));
Iterator<String> itr = list.iterator();
while (itr.hasNext()) {
bw.write(itr.next());
bw.write('\n');
}
bw.flush();
return fTmp;
} catch (Throwable e) {
e.printStackTrace();
} finally {
IOUtil.safeClose(br);
IOUtil.safeClose(bw);
}
return null;
}
}
private static File getTraceDir() {
return new File(Manager.getInstance().getBasePath() + Manager.getContext().getPackageName() + File.separator);
}
private static File ensureFile() {
File logDir = getTraceDir();
if (!logDir.exists() && !logDir.mkdirs()) {
return null;
}
File f = new File(logDir, DebugConfig.OUTPUT_FILE);
try {
if (!f.exists() && !f.createNewFile()) {
return null;
}
return f;
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
private static String getTime(long timeMillis) {
return DATE_FORMAT.format(timeMillis);
}
private static void handleWrite(Writer os, String tagName, String content, Boolean forceFlush) {
if (os != null) {
try {
os.append(getTime(System.currentTimeMillis())).append(' ').append(tagName).append(" ");
if (content != null) {
os.append(content);
}
os.append('\n');
if (forceFlush) {
os.flush();
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
| 4,805 |
2,296 |
<gh_stars>1000+
/*
* ir_Sony.hpp
*
* Contains functions for receiving and sending SIRCS/Sony IR Protocol in "raw" and standard format with 5 bit address 7 bit command
*
* This file is part of Arduino-IRremote https://github.com/Arduino-IRremote/Arduino-IRremote.
*
************************************************************************************
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
************************************************************************************
*/
#ifndef _IR_SONY_HPP
#define _IR_SONY_HPP
#include <Arduino.h>
//#define DEBUG // Activate this for lots of lovely debug output from this decoder.
#include "IRremoteInt.h" // evaluates the DEBUG for IR_DEBUG_PRINT
/** \addtogroup Decoder Decoders and encoders for different protocols
* @{
*/
//==============================================================================
// SSSS OOO N N Y Y
// S O O NN N Y Y
// SSS O O N N N Y
// S O O N NN Y
// SSSS OOO N N Y
//==============================================================================
// see https://www.sbprojects.net/knowledge/ir/sirc.php
// Here http://picprojects.org.uk/projects/sirc/ it is claimed, that many Sony remotes repeat each frame a minimum of 3 times
// LSB first, start bit + 7 command + 5 to 13 address, no stop bit
//
#define SONY_ADDRESS_BITS 5
#define SONY_COMMAND_BITS 7
#define SONY_EXTRA_BITS 8
#define SONY_BITS_MIN (SONY_COMMAND_BITS + SONY_ADDRESS_BITS) // 12 bits
#define SONY_BITS_15 (SONY_COMMAND_BITS + SONY_ADDRESS_BITS + 3) // 15 bits
#define SONY_BITS_MAX (SONY_COMMAND_BITS + SONY_ADDRESS_BITS + SONY_EXTRA_BITS) // 20 bits == SIRCS_20_PROTOCOL
#define SONY_UNIT 600 // 24 periods of 40kHz
#define SONY_HEADER_MARK (4 * SONY_UNIT) // 2400
#define SONY_ONE_MARK (2 * SONY_UNIT) // 1200
#define SONY_ZERO_MARK SONY_UNIT
#define SONY_SPACE SONY_UNIT
#define SONY_AVERAGE_DURATION 21000 // SONY_HEADER_MARK + SONY_SPACE + 12 * 2,5 * SONY_UNIT // 2.5 because we assume more zeros than ones
#define SONY_REPEAT_PERIOD 45000 // Commands are repeated every 45 ms (measured from start to start) for as long as the key on the remote control is held down.
#define SONY_REPEAT_SPACE (SONY_REPEAT_PERIOD - SONY_AVERAGE_DURATION) // 24 ms
/*
* Repeat commands should be sent in a 45 ms raster.
* There is NO delay after the last sent command / repeat!
* @param numberOfBits if == 20 send 13 address bits otherwise only 5 address bits
*/
void IRsend::sendSony(uint16_t aAddress, uint8_t aCommand, uint_fast8_t aNumberOfRepeats, uint8_t numberOfBits) {
// Set IR carrier frequency
enableIROut(SONY_KHZ); // 40 kHz
uint_fast8_t tNumberOfCommands = aNumberOfRepeats + 1;
while (tNumberOfCommands > 0) {
// Header
mark(SONY_HEADER_MARK);
space(SONY_SPACE);
// send 7 command bits LSB first
sendPulseDistanceWidthData(SONY_ONE_MARK, SONY_SPACE, SONY_ZERO_MARK, SONY_SPACE, aCommand, SONY_COMMAND_BITS,
PROTOCOL_IS_LSB_FIRST);
// send 5, 8, 13 address bits LSB first
sendPulseDistanceWidthData(SONY_ONE_MARK, SONY_SPACE, SONY_ZERO_MARK, SONY_SPACE, aAddress,
(numberOfBits - SONY_COMMAND_BITS), PROTOCOL_IS_LSB_FIRST);
tNumberOfCommands--;
// skip last delay!
if (tNumberOfCommands > 0) {
// send repeated command in a 45 ms raster
delay(SONY_REPEAT_SPACE / MICROS_IN_ONE_MILLI);
}
}
IrReceiver.restartAfterSend();
}
//+=============================================================================
bool IRrecv::decodeSony() {
// Check header "mark"
if (!matchMark(decodedIRData.rawDataPtr->rawbuf[1], SONY_HEADER_MARK)) {
return false;
}
// Check we have enough data. +2 for initial gap and start bit mark and space minus the last/MSB space. NO stop bit! 26, 32, 42
if (decodedIRData.rawDataPtr->rawlen != (2 * SONY_BITS_MIN) + 2 && decodedIRData.rawDataPtr->rawlen != (2 * SONY_BITS_MAX) + 2
&& decodedIRData.rawDataPtr->rawlen != (2 * SONY_BITS_15) + 2) {
// ??? IR_TRACE_PRINT since I saw this too often
IR_DEBUG_PRINT(F("Sony: "));
IR_DEBUG_PRINT(F("Data length="));
IR_DEBUG_PRINT(decodedIRData.rawDataPtr->rawlen);
IR_DEBUG_PRINTLN(F(" is not 12, 15 or 20"));
return false;
}
// Check header "space"
if (!matchSpace(decodedIRData.rawDataPtr->rawbuf[2], SONY_SPACE)) {
IR_DEBUG_PRINT(F("Sony: "));
IR_DEBUG_PRINTLN(F("Header space length is wrong"));
return false;
}
if (!decodePulseWidthData((decodedIRData.rawDataPtr->rawlen - 1) / 2, 3, SONY_ONE_MARK, SONY_ZERO_MARK, SONY_SPACE,
PROTOCOL_IS_LSB_FIRST)) {
IR_DEBUG_PRINT(F("Sony: "));
IR_DEBUG_PRINTLN(F("Decode failed"));
return false;
}
// Success
// decodedIRData.flags = IRDATA_FLAGS_IS_LSB_FIRST; // Not required, since this is the start value
uint8_t tCommand = decodedIRData.decodedRawData & 0x7F; // first 7 bits
uint16_t tAddress = decodedIRData.decodedRawData >> 7; // next 5 or 8 or 13 bits
/*
* Check for repeat
*/
if (decodedIRData.rawDataPtr->rawbuf[0] < (SONY_REPEAT_PERIOD / MICROS_PER_TICK)) {
decodedIRData.flags = IRDATA_FLAGS_IS_REPEAT | IRDATA_FLAGS_IS_LSB_FIRST;
}
decodedIRData.command = tCommand;
decodedIRData.address = tAddress;
decodedIRData.numberOfBits = (decodedIRData.rawDataPtr->rawlen - 1) / 2;
decodedIRData.protocol = SONY;
return true;
}
#define SONY_DOUBLE_SPACE_USECS 500 // usually see 713 - not using ticks as get number wrap around
bool IRrecv::decodeSonyMSB(decode_results *aResults) {
long data = 0;
uint8_t bits = 0;
unsigned int offset = 0; // Dont skip first space, check its size
if (aResults->rawlen < (2 * SONY_BITS_MIN) + 2) {
return false;
}
// Some Sony's deliver repeats fast after first
// unfortunately can't spot difference from of repeat from two fast clicks
if (aResults->rawbuf[0] < (SONY_DOUBLE_SPACE_USECS / MICROS_PER_TICK)) {
IR_DEBUG_PRINTLN(F("IR Gap found"));
aResults->bits = 0;
aResults->value = 0xFFFFFFFF;
decodedIRData.flags = IRDATA_FLAGS_IS_REPEAT;
decodedIRData.protocol = SONY;
return true;
}
offset++;
// Check header "mark"
if (!matchMark(aResults->rawbuf[offset], SONY_HEADER_MARK)) {
return false;
}
offset++;
// MSB first - Not compatible to standard, which says LSB first :-(
while (offset + 1 < aResults->rawlen) {
// First check for the constant space length, we do not have a space at the end of raw data
// we are lucky, since the start space is equal the data space.
if (!matchSpace(aResults->rawbuf[offset], SONY_SPACE)) {
return false;
}
offset++;
// bit value is determined by length of the mark
if (matchMark(aResults->rawbuf[offset], SONY_ONE_MARK)) {
data = (data << 1) | 1;
} else if (matchMark(aResults->rawbuf[offset], SONY_ZERO_MARK)) {
data = (data << 1) | 0;
} else {
return false;
}
offset++;
bits++;
}
aResults->bits = bits;
aResults->value = data;
aResults->decode_type = SONY;
decodedIRData.protocol = SONY;
return true;
}
/**
* Old version with MSB first data
*/
void IRsend::sendSony(unsigned long data, int nbits) {
// Set IR carrier frequency
enableIROut(SONY_KHZ);
// Header
mark(SONY_HEADER_MARK);
space(SONY_SPACE);
// Old version with MSB first Data
sendPulseDistanceWidthData(SONY_ONE_MARK, SONY_SPACE, SONY_ZERO_MARK, SONY_SPACE, data, nbits, PROTOCOL_IS_MSB_FIRST);
IrReceiver.restartAfterSend();
}
/** @}*/
#endif // _IR_SONY_HPP
| 3,728 |
892 |
{
"schema_version": "1.2.0",
"id": "GHSA-6r8p-cw9m-43r8",
"modified": "2022-05-01T02:20:58Z",
"published": "2022-05-01T02:20:58Z",
"aliases": [
"CVE-2005-3764"
],
"details": "The image gallery (imagegallery) component in Exponent CMS 0.96.3 and later versions does not properly check the MIME type of uploaded files, with unknown impact from the preview icon, possibly involving injection of HTML.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2005-3764"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/17655"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/417218"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
}
| 387 |
461 |
<filename>系统综合能力—虚拟机/ics2019/nexus-am/tests/cputest/tests/bubble-sort.c<gh_stars>100-1000
#include "trap.h"
#define N 20
int a[N] = {2, 12, 14, 6, 13, 15, 16, 10, 0, 18, 11, 19, 9, 1, 7, 5, 4, 3, 8, 17};
void bubble_sort() {
int i, j, t;
for(j = 0; j < N; j ++) {
for(i = 0; i < N - 1 - j; i ++) {
if(a[i] > a[i + 1]) {
t = a[i];
a[i] = a[i + 1];
a[i + 1] = t;
}
}
}
}
int main() {
bubble_sort();
int i;
for(i = 0; i < N; i ++) {
nemu_assert(a[i] == i);
}
nemu_assert(i == N);
bubble_sort();
for(i = 0; i < N; i ++) {
nemu_assert(a[i] == i);
}
nemu_assert(i == N);
return 0;
}
| 374 |
459 |
<reponame>jose-villegas/VCT_Engine
/*
Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
version 2 as published by the Free Software Foundation. Threading Building Blocks is
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of
the GNU General Public License along with Threading Building Blocks; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
As a special exception, you may use this file as part of a free software library without
restriction. Specifically, if other files instantiate templates or use macros or inline
functions from this file, or you compile this file and link it with other files to produce
an executable, this file does not by itself cause the resulting executable to be covered
by the GNU General Public License. This exception does not however invalidate any other
reasons why the executable file might be covered by the GNU General Public License.
*/
#define TBB_PREVIEW_WAITING_FOR_WORKERS 1
#define TBB_PREVIEW_GLOBAL_CONTROL 1
#include "tbb/global_control.h"
#include "harness.h"
const size_t MB = 1024*1024;
const double BARRIER_TIMEOUT = 10.;
void TestStackSizeSimpleControl()
{
{
tbb::global_control s0(tbb::global_control::thread_stack_size, 1*MB);
{
tbb::global_control s1(tbb::global_control::thread_stack_size, 8*MB);
ASSERT(8*MB == tbb::global_control::active_value(tbb::global_control::thread_stack_size), NULL);
}
ASSERT(1*MB == tbb::global_control::active_value(tbb::global_control::thread_stack_size), NULL);
}
}
#include "harness_concurrency_checker.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/blocked_range.h"
#include "tbb/combinable.h"
#include <limits.h> // for UINT_MAX
#include <functional> // for std::plus
struct StackSizeRun: NoAssign {
int num_threads;
Harness::SpinBarrier *barr1, *barr2;
StackSizeRun(int threads, Harness::SpinBarrier *b1, Harness::SpinBarrier *b2) :
num_threads(threads), barr1(b1), barr2(b2) {}
void operator()( int id ) const {
tbb::global_control s1(tbb::global_control::thread_stack_size, (1+id)*MB);
barr1->timed_wait(BARRIER_TIMEOUT);
ASSERT(num_threads*MB == tbb::global_control::active_value(tbb::global_control::thread_stack_size), NULL);
barr2->timed_wait(BARRIER_TIMEOUT);
}
};
void TestStackSizeThreadsControl()
{
int threads = 4;
Harness::SpinBarrier barr1(threads), barr2(threads);
NativeParallelFor( threads, StackSizeRun(threads, &barr1, &barr2) );
}
class CheckWorkersNum {
static tbb::atomic<Harness::SpinBarrier*> barrier;
// count unique worker threads
static tbb::combinable<size_t> uniqThreads;
public:
CheckWorkersNum(Harness::SpinBarrier *barr) {
barrier = barr;
}
void operator()(const tbb::blocked_range<int>&) const {
uniqThreads.local() = 1;
if (barrier) {
barrier->timed_wait(BARRIER_TIMEOUT);
Harness::Sleep(10);
barrier = NULL;
}
}
static void check(size_t expected) {
size_t seen = uniqThreads.combine(std::plus<size_t>());
ASSERT(seen == expected, NULL);
}
static void clear() { uniqThreads.clear(); }
static const size_t LOOP_ITERS = 10*1000;
};
tbb::atomic<Harness::SpinBarrier*> CheckWorkersNum::barrier;
tbb::combinable<size_t> CheckWorkersNum::uniqThreads;
void RunWorkersLimited(int tsi_max_threads, size_t parallelism, bool wait)
{
tbb::global_control s(tbb::global_control::max_allowed_parallelism, parallelism);
// try both configuration with already sleeping workers and with not yet sleeping
if (wait)
Harness::Sleep(100);
// current implementation can't have effective active value below 2
const unsigned active_parallelism = max(2U, (unsigned)parallelism);
const unsigned expected_threads = tsi_max_threads>0?
min( (unsigned)tsi_max_threads, active_parallelism )
: ( tbb::tbb_thread::hardware_concurrency()==1? 1 : active_parallelism );
Harness::SpinBarrier barr(expected_threads);
CheckWorkersNum::clear();
tbb::parallel_for(tbb::blocked_range<int>(0, CheckWorkersNum::LOOP_ITERS, 1),
CheckWorkersNum(&barr), tbb::simple_partitioner());
CheckWorkersNum::check(expected_threads);
}
void TSI_and_RunWorkers(int tsi_max_threads, size_t parallelism, size_t max_value)
{
tbb::task_scheduler_init tsi(tsi_max_threads, 0, /*blocking=*/true);
size_t active = tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
ASSERT(active == max(2U, max_value), "active_value must not be changed by task_scheduler_init");
RunWorkersLimited(tsi_max_threads, parallelism, /*wait=*/false);
}
#include "tbb/tbb_thread.h"
void TestWorkers(size_t curr_par)
{
const size_t max_parallelism =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
ASSERT(max(2U, tbb::tbb_thread::hardware_concurrency()) == max_parallelism, NULL);
{
const unsigned h_c = tbb::tbb_thread::hardware_concurrency();
tbb::global_control c(tbb::global_control::max_allowed_parallelism, curr_par);
size_t v = tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
ASSERT(!curr_par || max((size_t)2, curr_par) == v, NULL);
if (h_c > 1)
TSI_and_RunWorkers(tbb::task_scheduler_init::automatic, min(h_c, curr_par), curr_par);
if (curr_par) // do not call task_scheduler_init t(0);
TSI_and_RunWorkers((int)curr_par, curr_par, curr_par);
if (curr_par > 2) { // check that min(tsi, parallelism) is active
TSI_and_RunWorkers((int)curr_par-1, curr_par, curr_par);
TSI_and_RunWorkers((int)curr_par, curr_par-1, curr_par);
}
// check constrains on control's value: it can't be increased
tbb::global_control c1(tbb::global_control::max_allowed_parallelism, curr_par+1);
v = tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
if (curr_par)
ASSERT(max(2U, curr_par) == v, "It's impossible to increase maximal parallelism.");
else
ASSERT(2 == v, NULL);
}
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== max_parallelism,
"max parallelism has been restored successfully after decreasing/increasing");
}
void TestWorkersConstraints() {
const size_t max_parallelism =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
tbb::task_scheduler_init tsi(tbb::task_scheduler_init::automatic, 0, /*blocking=*/true);
if (max_parallelism > 3) {
tbb::global_control c(tbb::global_control::max_allowed_parallelism, max_parallelism-1);
ASSERT(max_parallelism-1 ==
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism),
"Allowed parallelism must be decreasable.");
tbb::global_control c1(tbb::global_control::max_allowed_parallelism, max_parallelism-2);
ASSERT(max_parallelism-2 ==
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism),
"Allowed parallelism must be decreasable.");
}
const size_t limit_par = min(max_parallelism, 4U);
// check that constrains are really met
for (int wait=0; wait<2; wait++) {
for (size_t num=2; num<limit_par; num++)
RunWorkersLimited(tbb::task_scheduler_init::automatic, num, wait==1);
for (size_t num=limit_par; num>1; num--)
RunWorkersLimited(tbb::task_scheduler_init::automatic, num, wait==1);
}
}
struct SetUseRun: NoAssign {
Harness::SpinBarrier *barr;
SetUseRun(Harness::SpinBarrier *b) : barr(b) {}
void operator()( int id ) const {
if (id == 0) {
for (int i=0; i<10; i++) {
tbb::task_scheduler_init tsi(tbb::task_scheduler_init::automatic, 0,
/*blocking=*/true);
tbb::parallel_for(tbb::blocked_range<int>(0, CheckWorkersNum::LOOP_ITERS, 1),
CheckWorkersNum(NULL), tbb::simple_partitioner());
barr->timed_wait(BARRIER_TIMEOUT);
}
} else {
for (int i=0; i<10; i++) {
tbb::global_control c(tbb::global_control::max_allowed_parallelism, 8);
barr->timed_wait(BARRIER_TIMEOUT);
}
}
}
};
void TestConcurrentSetUseConcurrency()
{
Harness::SpinBarrier barr(2);
NativeParallelFor( 2, SetUseRun(&barr) );
}
// check number of workers after autoinitialization
void TestAutoInit()
{
const size_t max_parallelism =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
const unsigned expected_threads = tbb::tbb_thread::hardware_concurrency()==1?
1 : (unsigned)max_parallelism;
Harness::SpinBarrier barr(expected_threads);
CheckWorkersNum::clear();
tbb::parallel_for(tbb::blocked_range<int>(0, CheckWorkersNum::LOOP_ITERS, 1),
CheckWorkersNum(&barr), tbb::simple_partitioner());
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== max_parallelism, "max_allowed_parallelism must not be changed after auto init");
CheckWorkersNum::check(expected_threads);
if (max_parallelism > 2) {
// after autoinit it's possible to decrease workers number
tbb::global_control s(tbb::global_control::max_allowed_parallelism, max_parallelism-1);
const unsigned expected_threads_1 = max(2U, (unsigned)max_parallelism-1);
barr.initialize(expected_threads_1);
CheckWorkersNum::clear();
tbb::parallel_for(tbb::blocked_range<int>(0, CheckWorkersNum::LOOP_ITERS, 1),
CheckWorkersNum(&barr), tbb::simple_partitioner());
CheckWorkersNum::check(expected_threads_1);
}
}
// need this to use TRY_BAD_EXPR_ENABLED when TBB_USE_ASSERT is not defined
#undef TBB_USE_ASSERT
#define TBB_USE_ASSERT 1
#include "harness_bad_expr.h"
void TestInvalidParallelism()
{
#if TRY_BAD_EXPR_ENABLED
const size_t max_parallelism =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
for (size_t par = 0; par<=1; par++) {
{
tbb::set_assertion_handler( AssertionFailureHandler );
TRY_BAD_EXPR( tbb::global_control c(tbb::global_control::max_allowed_parallelism, par), "Values of 1 and 0 are not supported for max_allowed_parallelism." );
tbb::set_assertion_handler( ReportError );
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== max_parallelism, NULL);
}
{
const size_t P = 2;
tbb::global_control c(tbb::global_control::max_allowed_parallelism, P);
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== P, NULL);
tbb::set_assertion_handler( AssertionFailureHandler );
TRY_BAD_EXPR( tbb::global_control cZ(tbb::global_control::max_allowed_parallelism, par), "Values of 1 and 0 are not supported for max_allowed_parallelism." );
tbb::set_assertion_handler( ReportError );
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== P, NULL);
}
ASSERT(tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism)
== max_parallelism, NULL);
}
#endif /* TRY_BAD_EXPR_ENABLED */
}
void TestTooBigStack()
{
#if __TBB_x86_32
const size_t stack_sizes[] = {512*MB, 2*1024*MB, UINT_MAX};
#else
const size_t stack_sizes[] = {512*MB, 2*1024*MB, UINT_MAX, 10LU*1024*MB};
#endif
#if __TBB_WIN8UI_SUPPORT
size_t default_ss = tbb::global_control::active_value(tbb::global_control::thread_stack_size);
#endif
for (unsigned i = 0; i<Harness::array_length(stack_sizes); i++) {
// as no stack size setting for Windows Store* apps, skip it
#if TRY_BAD_EXPR_ENABLED && __TBB_x86_64 && (_WIN32 || _WIN64) && !__TBB_WIN8UI_SUPPORT
if (stack_sizes[i] != (unsigned)stack_sizes[i]) {
size_t curr_ss = tbb::global_control::active_value(tbb::global_control::thread_stack_size);
tbb::set_assertion_handler( AssertionFailureHandler );
TRY_BAD_EXPR( tbb::global_control s1(tbb::global_control::thread_stack_size, stack_sizes[i]), "Stack size is limited to unsigned int range" );
tbb::set_assertion_handler( ReportError );
ASSERT(curr_ss == tbb::global_control::active_value(tbb::global_control::thread_stack_size), "Changing of stack size is not expected.");
continue;
}
#endif
tbb::global_control s1(tbb::global_control::thread_stack_size, stack_sizes[i]);
size_t actual_stack_sz = tbb::global_control::active_value(tbb::global_control::thread_stack_size);
#if __TBB_WIN8UI_SUPPORT
ASSERT(actual_stack_sz == default_ss, "It's ignored for Windows Store* apps");
#else
ASSERT(actual_stack_sz==stack_sizes[i], NULL);
#endif
}
}
int TestMain()
{
const unsigned h_c = tbb::tbb_thread::hardware_concurrency();
bool excessHC;
{
tbb::task_scheduler_init t(h_c+1);
excessHC = Harness::CanReachConcurrencyLevel(h_c+1);
}
if (h_c>2)
TestWorkers(h_c-1);
if (excessHC) // this requires hardware concurrency +1, and hang if not provided
TestWorkers(h_c+1);
if (excessHC || h_c >= 2)
TestWorkers(2);
if (excessHC || h_c >= 3)
TestWorkers(3);
TestWorkersConstraints();
TestConcurrentSetUseConcurrency();
TestInvalidParallelism();
TestAutoInit(); // auto-initialization done at this point
size_t default_ss = tbb::global_control::active_value(tbb::global_control::thread_stack_size);
ASSERT(default_ss, NULL);
#if !__TBB_WIN8UI_SUPPORT
// it's impossible to change stack size for Windows Store* apps, so skip the tests
TestStackSizeSimpleControl();
TestStackSizeThreadsControl();
#endif
TestTooBigStack();
ASSERT(default_ss == tbb::global_control::active_value(tbb::global_control::thread_stack_size), NULL);
return Harness::Done;
}
| 6,305 |
6,098 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# Given alpha array and using default lambda, build two cross-validation models, one with validation dataset
# and one without for binomial. Since they use the metrics from cross-validation, they should come up with
# the same models.
def glm_alpha_arrays_null_lambda_cv():
print("Testing glm cross-validation with alpha array, default lambda values for binomial models.")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C21"
h2o_data["C21"] = h2o_data["C21"].asfactor()
myX = h2o_data.names.remove(myY)
data_frames = h2o_data.split_frame(ratios=[0.8])
training_data = data_frames[0]
test_data = data_frames[1]
# build model with CV but no validation dataset
cv_model = glm(family='binomial',alpha=[0.1,0.5,0.9], nfolds = 3, fold_assignment="modulo")
cv_model.train(training_frame=training_data,x=myX,y=myY)
cv_r = glm.getGLMRegularizationPath(cv_model)
# build model with CV and with validation dataset
cv_model_valid = glm(family='binomial',alpha=[0.1,0.5,0.9], nfolds = 3, fold_assignment="modulo")
cv_model_valid.train(training_frame=training_data, validation_frame = test_data, x=myX,y=myY)
cv_r_valid = glm.getGLMRegularizationPath(cv_model_valid)
for l in range(0,len(cv_r['lambdas'])):
print("comparing coefficients for submodel {0}".format(l))
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients'][l], cv_r_valid['coefficients'][l], tol=1e-6)
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients_std'][l], cv_r_valid['coefficients_std'][l], tol=1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_alpha_arrays_null_lambda_cv)
else:
glm_alpha_arrays_null_lambda_cv()
| 861 |
345 |
import tensorflow as tf
from agent.forward import Forward
from config import *
class Access(object):
def __init__(self, state_size, action_size):
with tf.variable_scope('Access'):
# placeholder
self.inputs = tf.placeholder(tf.float32, [None] + state_size, "states")
self.actions = tf.placeholder(tf.int32, [None], "actions")
self.targets = tf.placeholder(tf.float32, [None], "discounted_rewards")
# network interface
self.actor = Forward('actor')
self.critic = Forward('critic')
self.policy = tf.nn.softmax(self.actor(self.inputs, action_size))
self.value = self.critic(self.inputs, 1)
# global optimizer
self.optimizer_actor = tf.train.RMSPropOptimizer(
LEARNING_RATE, DECAY_RATE, name='optimizer_actor')
self.optimizer_critic = tf.train.RMSPropOptimizer(
LEARNING_RATE, DECAY_RATE, name='optimizer_critic')
# saver
var_list = self.get_trainable()
var_list = list(var_list[0] + var_list[1])
self.saver = tf.train.Saver(var_list=var_list)
def get_trainable(self):
return [self.actor.get_variables(), self.critic.get_variables()]
def save(self, sess, path):
self.saver.save(sess, path)
def restore(self, sess, path):
var_list = list(self.get_trainable()[0] + self.get_trainable()[1])
saver = tf.train.Saver(var_list=var_list)
saver.restore(sess, path)
| 692 |
1,811 |
<filename>packages/cucumber-runner/schema/cucumber-runner-options.json
{
"$schema": "http://json-schema.org/draft-07/schema",
"title": "CucumberRunnerOptions",
"type": "object",
"additionalProperties": false,
"properties": {
"cucumber": {
"description": "Configuration for @stryker-mutator/cucumber-runner",
"title": "CucumberSetup",
"additionalProperties": false,
"type": "object",
"default": {},
"properties": {
"tags": {
"description": "Use to run specific features or scenarios. See https://github.com/cucumber/cucumber-js/blob/main/docs/cli.md#tags",
"type": "array",
"items": { "type": "string" }
},
"features": {
"description": "Run these feature files. See https://github.com/cucumber/cucumber-js/blob/main/docs/cli.md#running-specific-features",
"type":"array",
"items": { "type": "string" }
},
"profile": {
"description": "Configure which profile to use. The default is `undefined`, which results in the default profile being used. See https://github.com/cucumber/cucumber-js/blob/main/docs/profiles.md#profiles.",
"type":"string"
}
}
}
},
"definitions": {
"karmaProjectKind": {
"title": "ProjectKind",
"description": "Specify which kind of project you're using. This determines which command is used to start karma\n* `custom`: configure @stryker-mutator/karma-runner to use `karma start`\n* `angular-cli`: configure @stryker-mutator/karma-runner to use `ng test`",
"enum": [
"custom",
"angular-cli"
]
},
"karmaNgConfigOptions": {
"title": "NgConfigOptions",
"type": "object",
"additionalProperties": false,
"properties": {
"testArguments": {
"description": "Add [ng test arguments](https://github.com/angular/angular-cli/wiki/test#options). For example, specify an alternative project with: `\"testArguments\": { \"project\": \"my-lib\" }",
"title": "NgTestArguments",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
}
| 935 |
837 |
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2015, Rice University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Rice University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: <NAME>, <NAME>, <NAME> */
#include <ompl/base/spaces/RealVectorStateSpace.h>
#include <ompl/tools/thunder/Thunder.h>
#include <ompl/tools/lightning/Lightning.h>
#include <ompl/util/PPM.h>
#include <ompl/config.h>
#include <boost/filesystem.hpp>
#include <iostream>
namespace ob = ompl::base;
namespace og = ompl::geometric;
namespace ot = ompl::tools;
class Plane2DEnvironment
{
public:
Plane2DEnvironment(const char *ppm_file, bool useThunder = true)
{
bool ok = false;
try
{
ppm_.loadFile(ppm_file);
ok = true;
}
catch(ompl::Exception &ex)
{
OMPL_ERROR("Unable to load %s.\n%s", ppm_file, ex.what());
}
if (ok)
{
auto space(std::make_shared<ob::RealVectorStateSpace>());
space->addDimension(0.0, ppm_.getWidth());
space->addDimension(0.0, ppm_.getHeight());
maxWidth_ = ppm_.getWidth() - 1;
maxHeight_ = ppm_.getHeight() - 1;
if (useThunder)
{
expPlanner_ = std::make_shared<ot::Thunder>(space);
expPlanner_->setFilePath("thunder.db");
}
else
{
expPlanner_ = std::make_shared<ot::Lightning>(space);
expPlanner_->setFilePath("lightning.db");
}
// set state validity checking for this space
expPlanner_->setStateValidityChecker([this](const ob::State *state)
{ return isStateValid(state); });
space->setup();
expPlanner_->getSpaceInformation()->setStateValidityCheckingResolution(1.0 / space->getMaximumExtent());
vss_ = expPlanner_->getSpaceInformation()->allocValidStateSampler();
// DTC
//experience_setup_->setPlanner(std::make_shared<og::RRTConnect>(si_));
// Set the repair planner
// experience_setup_->setRepairPlanner(std::make_shared<og::RRTConnect>(si_));
}
}
~Plane2DEnvironment()
{
expPlanner_->save();
}
bool plan()
{
std::cout << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
if (!expPlanner_)
{
OMPL_ERROR("Simple setup not loaded");
return false;
}
expPlanner_->clear();
ob::ScopedState<> start(expPlanner_->getStateSpace());
vss_->sample(start.get());
ob::ScopedState<> goal(expPlanner_->getStateSpace());
vss_->sample(goal.get());
expPlanner_->setStartAndGoalStates(start, goal);
bool solved = expPlanner_->solve(10.);
if (solved)
OMPL_INFORM("Found solution in %g seconds",
expPlanner_->getLastPlanComputationTime());
else
OMPL_INFORM("No solution found");
expPlanner_->doPostProcessing();
return false;
}
private:
bool isStateValid(const ob::State *state) const
{
const int w = std::min((int)state->as<ob::RealVectorStateSpace::StateType>()->values[0], maxWidth_);
const int h = std::min((int)state->as<ob::RealVectorStateSpace::StateType>()->values[1], maxHeight_);
const ompl::PPM::Color &c = ppm_.getPixel(h, w);
return c.red > 127 && c.green > 127 && c.blue > 127;
}
ot::ExperienceSetupPtr expPlanner_;
ob::ValidStateSamplerPtr vss_;
int maxWidth_;
int maxHeight_;
ompl::PPM ppm_;
};
int main(int argc, char **)
{
std::cout << "OMPL version: " << OMPL_VERSION << std::endl;
boost::filesystem::path path(TEST_RESOURCES_DIR);
Plane2DEnvironment env((path / "ppm" / "floor.ppm").string().c_str(), argc==1);
for (unsigned int i = 0; i < 100; ++i)
env.plan();
return 0;
}
| 2,300 |
314 |
#pragma once
#include <Arduino.h>
#include <Service.h>
#ifdef __USENSOR__
#include <HCSR04.h>
#else
#include <Rotary.h>
#endif
typedef enum {
CCW,
CW,
OFF
} MotorState;
typedef enum {
UNCALIBRATED, // nothing calibrated
SEMICALIBRATED, // bottom calibrated
CALIBRATED // bottom-top calibrated
} MotorMode;
#ifdef __EEPROM__
#define ADDRESS_POSITION 0
#define ADDRESS_END_STOP_0 (ADDRESS_POSITION + sizeof(unsigned int))
#define ADDRESS_END_STOP_1 (ADDRESS_END_STOP_0 + sizeof(unsigned int))
#define ADDRESS_MODE (ADDRESS_END_STOP_1 + sizeof(MotorMode))
#endif
#ifdef __USENSOR__
#define SENSOR_TIMEOUT 125 // USensor sample rate in milliseconds
#define SENSOR_DELTA_ON 10 // USensor min change when ON in millimeters
#define SENSOR_DELTA_OFF 75 // USensor min change when OFF in millimeters
#endif
class Motor : public TimedService {
private:
#ifndef __USENSOR__
Rotary sensor;
#else
UltraSonicDistanceSensor sensor;
#endif
const uint8_t sensor_pin_1 = 0, sensor_pin_2 = 0;
const uint8_t pos_diff = 0, min_change = 0;
bool disabled = false;
long next_position = -1;
unsigned int end_stop[2] = {0u, ~0u};
MotorState state = OFF;
MotorMode mode = UNCALIBRATED;
volatile unsigned int position = 0u, position_change = 0u;
protected:
bool reverse = false;
#ifndef __USENSOR__
void update_position(unsigned char result);
#endif
void initPin(uint8_t pin, uint8_t val = LOW);
bool check_end_stops(const unsigned int end_stop_down, const unsigned int end_stop_up) const;
virtual void _off() = 0;
virtual void _dir_cw() = 0;
virtual void _dir_ccw() = 0;
public:
Motor(uint8_t _pin1, uint8_t _pin2, uint8_t stop_diff, uint8_t min_change, bool reverse);
bool begin() override;
void off();
void dir_cw();
void dir_ccw();
void set_end_stop(unsigned int end_stop, unsigned int offset = 0);
unsigned int get_position() const;
unsigned int get_position_change();
void reset_position();
void set_position(unsigned int pos);
MotorState get_state() const;
MotorMode get_mode() const;
unsigned int get_end_stop_low();
unsigned int get_end_stop_high();
void set_mode(MotorMode state);
void disable() override;
void cycle() override;
};
| 910 |
5,813 |
<reponame>RomaKoks/druid<filename>processing/src/main/java/org/apache/druid/segment/virtual/ExpressionPlanner.java<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.segment.virtual;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import org.apache.druid.math.expr.Expr;
import org.apache.druid.math.expr.ExpressionType;
import org.apache.druid.math.expr.Parser;
import org.apache.druid.segment.ColumnInspector;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.ValueType;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
public class ExpressionPlanner
{
private ExpressionPlanner()
{
// No instantiation.
}
/**
* Druid tries to be chill to expressions to make up for not having a well defined table schema across segments. This
* method performs some analysis to determine what sort of selectors can be constructed on top of an expression,
* whether or not the expression will need implicitly mapped across multi-valued inputs, if the expression produces
* multi-valued outputs, is vectorizable, and everything else interesting when making a selector.
*
* Results are stored in a {@link ExpressionPlan}, which can be examined to do whatever is necessary to make things
* function properly.
*/
public static ExpressionPlan plan(ColumnInspector inspector, Expr expression)
{
final Expr.BindingAnalysis analysis = expression.analyzeInputs();
Parser.validateExpr(expression, analysis);
EnumSet<ExpressionPlan.Trait> traits = EnumSet.noneOf(ExpressionPlan.Trait.class);
Set<String> noCapabilities = new HashSet<>();
Set<String> maybeMultiValued = new HashSet<>();
List<String> needsApplied = ImmutableList.of();
ColumnType singleInputType = null;
ExpressionType outputType = null;
final Set<String> columns = analysis.getRequiredBindings();
// check and set traits which allow optimized selectors to be created
if (columns.isEmpty()) {
traits.add(ExpressionPlan.Trait.CONSTANT);
} else if (expression.isIdentifier()) {
traits.add(ExpressionPlan.Trait.IDENTIFIER);
} else if (columns.size() == 1) {
final String column = Iterables.getOnlyElement(columns);
final ColumnCapabilities capabilities = inspector.getColumnCapabilities(column);
// These flags allow for selectors that wrap a single underlying column to be optimized, through caching results
// and via allowing deferred execution in the case of building dimension selectors.
// SINGLE_INPUT_SCALAR
// is set if an input is single valued, and the output is definitely single valued, with an additional requirement
// for strings that the column is dictionary encoded.
// SINGLE_INPUT_MAPPABLE
// is set when a single input string column, which can be multi-valued, but if so, it must be implicitly mappable
// (i.e. the expression is not treating its input as an array and not wanting to output an array)
if (capabilities != null && !analysis.hasInputArrays() && !analysis.isOutputArray()) {
boolean isSingleInputMappable = false;
boolean isSingleInputScalar = capabilities.hasMultipleValues().isFalse();
if (capabilities.is(ValueType.STRING)) {
isSingleInputScalar &= capabilities.isDictionaryEncoded().isTrue();
isSingleInputMappable = capabilities.isDictionaryEncoded().isTrue() &&
!capabilities.hasMultipleValues().isUnknown();
}
// if satisfied, set single input output type and flags
if (isSingleInputScalar || isSingleInputMappable) {
singleInputType = capabilities.toColumnType();
if (isSingleInputScalar) {
traits.add(ExpressionPlan.Trait.SINGLE_INPUT_SCALAR);
}
if (isSingleInputMappable) {
traits.add(ExpressionPlan.Trait.SINGLE_INPUT_MAPPABLE);
}
}
}
}
// if we didn't eliminate this expression as a single input scalar or mappable expression, it might need
// automatic transformation to map across multi-valued inputs (or row by row detection in the worst case)
if (
ExpressionPlan.none(
traits,
ExpressionPlan.Trait.SINGLE_INPUT_SCALAR,
ExpressionPlan.Trait.CONSTANT,
ExpressionPlan.Trait.IDENTIFIER
)
) {
final Set<String> definitelyMultiValued = new HashSet<>();
final Set<String> definitelyArray = new HashSet<>();
for (String column : analysis.getRequiredBindings()) {
final ColumnCapabilities capabilities = inspector.getColumnCapabilities(column);
if (capabilities != null) {
if (capabilities.isArray()) {
definitelyArray.add(column);
} else if (capabilities.is(ValueType.STRING) && capabilities.hasMultipleValues().isTrue()) {
definitelyMultiValued.add(column);
} else if (capabilities.is(ValueType.STRING) &&
capabilities.hasMultipleValues().isMaybeTrue() &&
!analysis.getArrayBindings().contains(column)
) {
maybeMultiValued.add(column);
}
} else {
noCapabilities.add(column);
}
}
// find any inputs which will need implicitly mapped across multi-valued rows
needsApplied =
columns.stream()
.filter(
c -> !definitelyArray.contains(c)
&& definitelyMultiValued.contains(c)
&& !analysis.getArrayBindings().contains(c)
)
.collect(Collectors.toList());
// if any multi-value inputs, set flag for non-scalar inputs
if (analysis.hasInputArrays()) {
traits.add(ExpressionPlan.Trait.NON_SCALAR_INPUTS);
}
if (!noCapabilities.isEmpty()) {
traits.add(ExpressionPlan.Trait.UNKNOWN_INPUTS);
}
if (!maybeMultiValued.isEmpty()) {
traits.add(ExpressionPlan.Trait.INCOMPLETE_INPUTS);
}
// if expression needs transformed, lets do it
if (!needsApplied.isEmpty()) {
traits.add(ExpressionPlan.Trait.NEEDS_APPLIED);
}
}
// only set output type if we are pretty confident about input types
final boolean shouldComputeOutput = ExpressionPlan.none(
traits,
ExpressionPlan.Trait.UNKNOWN_INPUTS,
ExpressionPlan.Trait.INCOMPLETE_INPUTS
);
if (shouldComputeOutput) {
outputType = expression.getOutputType(inspector);
}
// if analysis predicts output, or inferred output type, is array, output will be arrays
if (analysis.isOutputArray() || (outputType != null && outputType.isArray())) {
traits.add(ExpressionPlan.Trait.NON_SCALAR_OUTPUT);
// single input mappable may not produce array output explicitly, only through implicit mapping
traits.remove(ExpressionPlan.Trait.SINGLE_INPUT_SCALAR);
traits.remove(ExpressionPlan.Trait.SINGLE_INPUT_MAPPABLE);
}
// vectorized expressions do not support incomplete, multi-valued inputs or outputs, or implicit mapping
// they also do not support unknown inputs, but they also do not currently have to deal with them, as missing
// capabilites is indicative of a non-existent column instead of an unknown schema. If this ever changes,
// this check should also change
boolean supportsVector = ExpressionPlan.none(
traits,
ExpressionPlan.Trait.INCOMPLETE_INPUTS,
ExpressionPlan.Trait.NEEDS_APPLIED,
ExpressionPlan.Trait.NON_SCALAR_INPUTS,
ExpressionPlan.Trait.NON_SCALAR_OUTPUT
);
if (supportsVector && expression.canVectorize(inspector)) {
// make sure to compute the output type for a vector expression though, because we might have skipped it earlier
// due to unknown inputs, but that's ok here since it just means it doesnt exist
outputType = expression.getOutputType(inspector);
traits.add(ExpressionPlan.Trait.VECTORIZABLE);
}
return new ExpressionPlan(
inspector,
expression,
analysis,
traits,
outputType,
singleInputType,
Sets.union(noCapabilities, maybeMultiValued),
needsApplied
);
}
}
| 3,332 |
1,318 |
<filename>tensorflow_model_optimization/python/core/internal/tensor_encoding/testing/__init__.py
# Copyright 2019, The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing utilities for the `tensor_encoding` package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import AdaptiveNormalizeEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import aggregate_state_update_tensors
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import BaseEncodingStageTest
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import get_tensor_with_random_shape
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import is_adaptive_stage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import PlusOneEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import PlusOneOverNEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import PlusRandomNumEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import RandomAddSubtractOneEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import ReduceMeanEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import SignIntFloatEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import SimpleLinearEncodingStage
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import TestData
from tensorflow_model_optimization.python.core.internal.tensor_encoding.testing.test_utils import TimesTwoEncodingStage
| 728 |
1,968 |
#include "Rtt_AndroidBuildDialog.h"
#include "Rtt_LuaContext.h"
#include "Rtt_LuaFile.h"
#include "Rtt_MPlatform.h"
#include "Rtt_PlatformAppPackager.h"
#include "Rtt_PlatformPlayer.h"
#include "Rtt_PlatformSimulator.h"
#include "Rtt_RenderingStream.h"
#include "Rtt_Runtime.h"
#include "Rtt_SimulatorAnalytics.h"
#include "Rtt_LinuxPlatform.h"
#include "Rtt_LinuxSimulatorServices.h"
#include "Rtt_LinuxSimulatorView.h"
#include "Rtt_LinuxUtils.h"
#include "Rtt_LinuxBuildDialog.h"
#include "Rtt_SimulatorRecents.h"
#include "Rtt_AndroidAppPackager.h"
#include "Rtt_TargetAndroidAppStore.h"
#include "ListKeyStore.h"
#include "Core/Rtt_FileSystem.h"
#include <string.h>
#include <wx/valtext.h>
#include <wx/stdpaths.h>
#include <future>
#define GOOGLE_PLAY_STORE_TARGET "Google Play"
#define AMAZON_STORE_TARGET "Amazon"
#define NO_STORE_TARGET "None"
class wxRegEx;
namespace Rtt
{
AndroidBuildDialog::AndroidBuildDialog(wxWindow *parent, wxWindowID id, const wxString &title, const wxPoint &pos, const wxSize &size, long style):
wxDialog(parent, id, title, pos, size, wxCAPTION)
{
wxTextValidator appNameValidator(wxFILTER_ALPHANUMERIC);
wxTextValidator appVersionCodeValidator(wxFILTER_DIGITS);
wxTextValidator packageNameValidator(wxFILTER_ALPHANUMERIC | wxFILTER_INCLUDE_CHAR_LIST);
packageNameValidator.AddCharIncludes(".");
appNameTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0, appNameValidator);
appVersionTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0, packageNameValidator);
appVersionCodeTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0, appVersionCodeValidator);
appPackageNameTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0, packageNameValidator);
appPathTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, wxTE_READONLY);
appBuildPathTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, wxTE_READONLY);
appBuildPathButton = new wxButton(this, wxID_OPEN, wxT("..."));
const wxString targetAppStoreComboBoxChoices[] =
{
wxT(GOOGLE_PLAY_STORE_TARGET),
wxT(AMAZON_STORE_TARGET),
wxT(NO_STORE_TARGET),
};
targetAppStoreComboBox = new wxComboBox(this, wxID_ANY, wxT(""), wxDefaultPosition, wxDefaultSize, 3, targetAppStoreComboBoxChoices, wxCB_DROPDOWN | wxCB_READONLY);
keystorePathTextCtrl = new wxTextCtrl(this, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, wxTE_READONLY);
keystorePathButton = new wxButton(this, wxID_FILE1, wxT("..."));
const wxString keyAliasComboBoxChoices[] =
{
wxT("androiddebugkey"),
};
keyAliasComboBox = new wxComboBox(this, wxID_ANY, wxT(""), wxDefaultPosition, wxDefaultSize, 0, keyAliasComboBoxChoices, wxCB_DROPDOWN | wxCB_READONLY);
installToDeviceCheckbox = new wxCheckBox(this, wxID_ANY, wxEmptyString);
buildButton = new wxButton(this, wxID_OK, wxT("Build"));
cancelButton = new wxButton(this, wxID_CANCEL, wxT("Cancel"));
SetProperties();
DoLayout();
}
void AndroidBuildDialog::SetProperties()
{
SetTitle(wxT("Build For Android"));
appNameTextCtrl->SetToolTip(wxT("The name to give your projects output binary."));
appVersionTextCtrl->SetToolTip(wxT("Your projects version number/string."));
appVersionCodeTextCtrl->SetToolTip(wxT("Your projects version code (integer)."));
appPackageNameTextCtrl->SetToolTip(wxT("A unique Java style package identifier for your proejct (e.g. com.mycompany.myapp)."));
appPathTextCtrl->SetToolTip(wxT("The path to your projects main folder."));
appBuildPathTextCtrl->SetToolTip(wxT("The path to your projects generated binary."));
appBuildPathButton->SetMinSize(wxSize(40, 30));
appBuildPathButton->SetToolTip(wxT("Click here to set a new path for your projects ouput binary."));
targetAppStoreComboBox->SetToolTip(wxT("The app store you wish to target."));
targetAppStoreComboBox->SetSelection(0);
keystorePathTextCtrl->SetToolTip(wxT("The path to your projects android keystore."));
keystorePathButton->SetMinSize(wxSize(40, 30));
keystorePathButton->SetToolTip(wxT("Click here to set a new path for your projects ouput binary."));
keyAliasComboBox->SetToolTip(wxT("Your projects android keystore alias."));
keyAliasComboBox->SetSelection(0);
installToDeviceCheckbox->SetToolTip(wxT("Installs this app to your connected android device via adb after a build."));
keystorePassword = "<PASSWORD>";
keystorePasswordValid = true;
}
void AndroidBuildDialog::DoLayout()
{
wxBoxSizer *parentGridSizer = new wxBoxSizer(wxVERTICAL);
wxFlexGridSizer *buttonGridSizer = new wxFlexGridSizer(1, 2, 0, 0);
wxFlexGridSizer *optionsGridSizer = new wxFlexGridSizer(10, 3, 4, 10);
wxStaticText *appNameLabel = new wxStaticText(this, wxID_ANY, wxT("Application Name:"));
optionsGridSizer->Add(appNameLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appNameTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *appVersionLabel = new wxStaticText(this, wxID_ANY, wxT("Application Version:"));
optionsGridSizer->Add(appVersionLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appVersionTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *appVersionCodeLabel = new wxStaticText(this, wxID_ANY, wxT("Application Version Code:"));
optionsGridSizer->Add(appVersionCodeLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appVersionCodeTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *appPackageNameLabel = new wxStaticText(this, wxID_ANY, wxT("Package Name:"));
optionsGridSizer->Add(appPackageNameLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appPackageNameTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *appPathLabel = new wxStaticText(this, wxID_ANY, wxT("Application Path:"));
optionsGridSizer->Add(appPathLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appPathTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *appBuildPathLabel = new wxStaticText(this, wxID_ANY, wxT("Build Ouput Path:"));
optionsGridSizer->Add(appBuildPathLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(appBuildPathTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(appBuildPathButton, 0, 0, 0);
wxStaticText *targetAppStoreLabel = new wxStaticText(this, wxID_ANY, wxT("Target App Store:"));
optionsGridSizer->Add(targetAppStoreLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(targetAppStoreComboBox, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *keyStoreLabel = new wxStaticText(this, wxID_ANY, wxT("Keystore:"));
optionsGridSizer->Add(keyStoreLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(keystorePathTextCtrl, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(keystorePathButton, 0, 0, 0);
wxStaticText *keyAliasLabel = new wxStaticText(this, wxID_ANY, wxT("Key Alias:"));
optionsGridSizer->Add(keyAliasLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(keyAliasComboBox, 0, wxEXPAND | wxRIGHT, 10);
optionsGridSizer->Add(0, 0, 0, 0, 0);
wxStaticText *installToDeviceLabel = new wxStaticText(this, wxID_ANY, wxT("Install After Build?"));
optionsGridSizer->Add(installToDeviceLabel, 0, wxLEFT, 10);
optionsGridSizer->Add(installToDeviceCheckbox, 0, 0, 0);
optionsGridSizer->Add(0, 0, 0, 0, 0);
optionsGridSizer->AddGrowableCol(1);
parentGridSizer->Add(optionsGridSizer, 0, wxALL | wxEXPAND, 10);
wxStaticLine *staticLineSeparator = new wxStaticLine(this, wxID_ANY);
parentGridSizer->Add(staticLineSeparator, 0, wxBOTTOM | wxEXPAND | wxTOP, 5);
buttonGridSizer->Add(buildButton, 1, wxRIGHT | wxTOP, 10);
buttonGridSizer->Add(cancelButton, 1, wxRIGHT | wxTOP, 10);
buttonGridSizer->AddGrowableCol(0);
buttonGridSizer->AddGrowableCol(1);
parentGridSizer->Add(buttonGridSizer, 1, wxALIGN_CENTER_HORIZONTAL, 0);
SetSizer(parentGridSizer);
Layout();
Centre();
}
void AndroidBuildDialog::SetAppContext(SolarAppContext *appContext)
{
fAppContext = appContext;
appNameTextCtrl->SetValue(fAppContext->GetAppName());
appPathTextCtrl->SetValue(fAppContext->GetAppPath());
appBuildPathTextCtrl->SetValue(fAppContext->GetSaveFolder());
appVersionTextCtrl->SetValue("1.0");
appVersionCodeTextCtrl->SetValue("1");
char uname[256] = {0};
int rc = getlogin_r(uname, sizeof(uname));
std::string package("com.solar2d.");
package.append(uname).append(".");
package.append(fAppContext->GetAppName());
appPackageNameTextCtrl->SetValue(package);
std::string keystorePath(GetStartupPath(NULL));
keystorePath.append("/Resources/debug.keystore");
keystorePathTextCtrl->SetValue(keystorePath);
ReadKeystore(keystorePath, keystorePassword.ToStdString().c_str(), true);
// Get the version code from build.settings
const char kBuildSettings[] = "build.settings";
String filePath(&fAppContext->GetPlatform()->GetAllocator());
fAppContext->GetPlatform()->PathForFile(kBuildSettings, MPlatform::kResourceDir, MPlatform::kTestFileExists, filePath);
lua_State *L = fAppContext->GetRuntime()->VMContext().L();
const char *buildSettingsPath = filePath.GetString();
if (buildSettingsPath && 0 == luaL_loadfile(L, buildSettingsPath) && 0 == lua_pcall(L, 0, 0, 0))
{
lua_getglobal(L, "settings");
if (lua_istable(L, -1))
{
lua_getfield(L, -1, "android");
if (lua_istable(L, -1))
{
lua_getfield(L, -1, "versionCode");
if (lua_isstring(L, -1))
{
const char *versionCode = lua_tostring(L, -1);
appVersionCodeTextCtrl->SetValue(versionCode);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
}
}
}
bool AndroidBuildDialog::ReadKeystore(std::string keystorePath, std::string password, bool showErrors)
{
keystorePasswordValid = false;
ListKeyStore listKeyStore; // uses Java to read keystore
if (listKeyStore.GetAliasList(keystorePath.c_str(), password.c_str()))
{
keystorePasswordValid = true;
if (listKeyStore.GetSize() < 1)
{
if (showErrors)
{
wxMessageDialog *errorDialog = new wxMessageDialog(NULL, wxT("The selected keystore doesn't have any aliases."), wxT("Solar2D Simulator"), wxOK | wxICON_ERROR);
}
}
else
{
// Succeeded and there is at least one alias - add aliases to alias dropdown
keyAliasComboBox->Clear();
for (int i = 0; i < listKeyStore.GetSize(); i++)
{
keyAliasComboBox->Append(listKeyStore.GetAlias(i));
}
keyAliasComboBox->SetSelection(0);
}
}
else // didn't get valid password, or keystore bad format
{
if (showErrors)
{
wxMessageDialog *errorDialog = new wxMessageDialog(NULL, wxT("The selected keystore is either invalid or an incorrect password was entered."), wxT("Solar2D Simulator"), wxOK | wxICON_ERROR);
}
}
return keystorePasswordValid;
}
BEGIN_EVENT_TABLE(AndroidBuildDialog, wxDialog)
EVT_BUTTON(wxID_OPEN, AndroidBuildDialog::OnSelectOutputPathClicked)
EVT_BUTTON(wxID_FILE1, AndroidBuildDialog::OnSelectKeyStorePathClicked)
EVT_BUTTON(wxID_OK, AndroidBuildDialog::OnBuildClicked)
EVT_BUTTON(wxID_CANCEL, AndroidBuildDialog::OnCancelClicked)
END_EVENT_TABLE();
void AndroidBuildDialog::OnSelectOutputPathClicked(wxCommandEvent &event)
{
wxDirDialog openDirDialog(this, _("Choose Output Directory"), GetHomePath(), 0, wxDefaultPosition);
if (openDirDialog.ShowModal() == wxID_OK)
{
appBuildPathTextCtrl->SetValue(openDirDialog.GetPath());
}
}
void AndroidBuildDialog::OnSelectKeyStorePathClicked(wxCommandEvent &event)
{
wxFileDialog openFileDialog(this, _("Choose Keystore Path"), GetHomePath(), wxEmptyString, "KeyStore files |*.keystore", wxFD_OPEN | wxFD_FILE_MUST_EXIST);
if (openFileDialog.ShowModal() == wxID_OK)
{
keystorePathTextCtrl->SetValue(openFileDialog.GetPath());
keystorePassword = "<PASSWORD>";
// this isn't the default "debug.keystore". request the password from the user
if (keystorePathTextCtrl->GetValue().EndsWith("debug.keystore"))
{
wxPasswordEntryDialog *passEntryDialog = new wxPasswordEntryDialog(this, "Please enter the password for this keystore", wxGetPasswordFromUserPromptStr);
if (passEntryDialog->ShowModal() == wxID_OK)
{
keystorePassword = passEntryDialog->GetValue();
}
passEntryDialog->Destroy();
}
ReadKeystore(keystorePathTextCtrl->GetValue().ToStdString(), keystorePassword.ToStdString().c_str(), true);
}
}
int AndroidBuildDialog::fetchBuildResult(AndroidAppPackager* packager, AndroidAppPackagerParams* androidBuilderParams, const std::string& tmp)
{
// build the app (warning! this is blocking call)
return packager->Build(androidBuilderParams, tmp.c_str());
}
void AndroidBuildDialog::OnBuildClicked(wxCommandEvent &event)
{
LinuxPlatform *platform = wxGetApp().GetPlatform();
MPlatformServices *service = new LinuxPlatformServices(platform);
Rtt::Runtime *runtimePointer = fAppContext->GetRuntime();
wxString appName(appNameTextCtrl->GetValue());
wxString sourceDir(appPathTextCtrl->GetValue());
wxString outputDir(appBuildPathTextCtrl->GetValue());
wxString appVersion(appVersionTextCtrl->GetValue());
wxString packageName(appPackageNameTextCtrl->GetValue());
wxString appVersionCode(appVersionCodeTextCtrl->GetValue());
wxString keystore(keystorePathTextCtrl->GetValue());
wxString keystoreAlias(keyAliasComboBox->GetValue());
std::string androidTemplate(platform->getInstallDir());
std::string tmp = Rtt_GetSystemTempDirectory();
std::string targetAppStoreName(TargetAndroidAppStore::kGoogle.GetStringId());
const char *identity = "no-identity";
const char *bundleId = "bundleId";
const char *provisionFile = "";
int versionCode = wxAtoi(appVersionCode);
bool installAfterBuild = installToDeviceCheckbox->GetValue() == true;
const TargetDevice::Platform targetPlatform(TargetDevice::Platform::kAndroidPlatform);
bool isDistribution = true;
const char kBuildSettings[] = "build.settings";
Rtt::String buildSettingsPath;
wxMessageDialog *resultDialog = new wxMessageDialog(wxGetApp().GetFrame(), wxEmptyString, wxT("Build Error"), wxOK | wxICON_WARNING);
// setup paths
androidTemplate.append("/Resources");
tmp.append("/CoronaLabs");
AndroidAppPackager packager(*service, androidTemplate.c_str());
bool foundBuildSettings = packager.ReadBuildSettings(sourceDir.c_str());
const char *customBuildId = packager.GetCustomBuildId();
bool checksPassed = foundBuildSettings && !appVersion.IsEmpty() && !appName.IsEmpty()
&& !appVersionCode.IsEmpty() && !packageName.IsEmpty() && versionCode != 0 && keystorePasswordValid;
// pre-build validation
if (!foundBuildSettings)
{
resultDialog->SetMessage(wxT("build.settings file not found in project path."));
}
if (appName.IsEmpty())
{
resultDialog->SetMessage(wxT("App name cannot be empty."));
}
if (appVersion.IsEmpty())
{
resultDialog->SetMessage(wxT("App version cannot be empty."));
}
if (appVersion.StartsWith(".") || appVersion.EndsWith("."))
{
resultDialog->SetMessage(wxT("App version cannot start or end with a period (dot)."));
checksPassed = false;
}
if (appVersionCode.IsEmpty())
{
resultDialog->SetMessage(wxT("App version code cannot be empty."));
}
if (versionCode <= 0)
{
resultDialog->SetMessage(wxT("App version code must be an integer."));
}
if (packageName.IsEmpty())
{
resultDialog->SetMessage(wxT("App package name cannot be empty."));
}
if (!packageName.Contains("."))
{
resultDialog->SetMessage(wxT("App package name must contain at least one period (dot)."));
checksPassed = false;
}
if (packageName.StartsWith(".") || packageName.EndsWith("."))
{
resultDialog->SetMessage(wxT("App package name cannot start or end with a period (dot)."));
checksPassed = false;
}
if (!keystorePasswordValid)
{
resultDialog->SetMessage(wxT("The keystore password is invalid."));
}
// ensure unzip is installed on the users system
if (!wxFileName::Exists("/usr/bin/unzip"))
{
checksPassed = false;
resultDialog->SetMessage(wxT("/usr/bin/unzip not found"));
}
// ensure Solar2DBuilder exists at the correct location
wxString exePath = wxStandardPaths::Get().GetExecutablePath();
size_t k = exePath.find_last_of("/\\");
Rtt_ASSERT(k > 0);
exePath.Remove(k + 1, exePath.size() - k - 1);
wxString solar2DBuilderPath = exePath;
solar2DBuilderPath.append("Solar2DBuilder");
if (!wxFileName::Exists(solar2DBuilderPath))
{
checksPassed = false;
resultDialog->SetMessage(solar2DBuilderPath + " not found");
}
// ensure Resource is not a link
{
wxString resourcesPath = exePath;
resourcesPath.append("Resources");
struct stat buf;
if (lstat(resourcesPath.c_str(), &buf) == 0)
{
if (S_ISLNK(buf.st_mode))
{
checksPassed = false;
resultDialog->SetMessage(resourcesPath + " is a link, it must be regular folder");
}
}
else
{
checksPassed = false;
resultDialog->SetMessage(resourcesPath + " failed to stat");
}
}
outputDir.append('/');
outputDir.append(appName);
outputDir.append(".Android");
// ensure we have write access to the target output directory
if (wxDirExists(outputDir))
{
if (!wxFileName::IsDirWritable(outputDir))
{
resultDialog->SetMessage(wxT("No write access to the selected output directory."));
checksPassed = false;
}
}
else
{
if (!Rtt_MakeDirectory(outputDir))
{
resultDialog->SetMessage(wxT("Failed to create the selected output directory."));
checksPassed = false;
}
}
// checks failed, show failure popup
if (!checksPassed)
{
resultDialog->SetTitle("Invalid Settings");
resultDialog->ShowModal();
return;
}
Rtt_Log("Starting Android build...\n");
// check if a custom build ID has been assigned
if (!Rtt_StringIsEmpty(customBuildId))
{
Rtt_Log("Using custom Build Id %s\n", customBuildId);
}
// set the target app store
if (targetAppStoreComboBox->GetValue().IsSameAs(GOOGLE_PLAY_STORE_TARGET))
{
targetAppStoreName = TargetAndroidAppStore::kGoogle.GetStringId();
}
else if (targetAppStoreComboBox->GetValue().IsSameAs(AMAZON_STORE_TARGET))
{
targetAppStoreName = TargetAndroidAppStore::kAmazon.GetStringId();
}
else if (targetAppStoreComboBox->GetValue().IsSameAs(NO_STORE_TARGET))
{
targetAppStoreName = TargetAndroidAppStore::kNone.GetStringId();
}
AndroidAppPackagerParams androidBuilderParams(
appName.ToStdString().c_str(), appVersion.ToStdString().c_str(), identity, provisionFile,
sourceDir.ToStdString().c_str(), outputDir.ToStdString().c_str(), androidTemplate.c_str(),
targetPlatform, targetAppStoreName.c_str(),
(S32)Rtt::TargetDevice::VersionForPlatform(Rtt::TargetDevice::kAndroidPlatform),
customBuildId, NULL,
packageName.ToStdString().c_str(), isDistribution, keystore.ToStdString().c_str(), keystorePassword.ToStdString().c_str(), keystoreAlias.ToStdString().c_str(), keystorePassword.ToStdString().c_str()/*alias_pwd.c_str()*/, versionCode);
// select build template
fAppContext->GetPlatform()->PathForFile(kBuildSettings, Rtt::MPlatform::kResourceDir, Rtt::MPlatform::kTestFileExists, buildSettingsPath);
androidBuilderParams.SetBuildSettingsPath(buildSettingsPath.GetString());
platform->SetActivityIndicator(true);
SetCursor(wxCURSOR_WAIT); // cursor on dialog window
// the only goal of using async here is the showing wait cursor on build dialog window
std::future<int> futureBuildResult = std::async(std::launch::async, fetchBuildResult, &packager, &androidBuilderParams, tmp);
// wait for result
while (futureBuildResult.wait_for(std::chrono::milliseconds(100)) != std::future_status::ready)
{
wxYield();
}
int buildResult = futureBuildResult.get();
SetCursor(wxCURSOR_ARROW); // restore cursor
platform->SetActivityIndicator(false);
if (buildResult == 0)
{
Rtt_Log("Android build succeeded.\n");
}
else
{
Rtt_Log("Android build failed. Check the log for more details.\n");
}
EndModal(wxID_OK);
wxGetApp().GetFrame()->RemoveSuspendedPanel();
int dialogResultFlags = buildResult == 0 ? wxOK | wxICON_INFORMATION : wxOK | wxICON_ERROR;
resultDialog->SetTitle("Build Result");
resultDialog->SetMessage(buildResult == 0 ? "Your application was built successfully." : "Failed to build the application.\nSee the console for more info.");
resultDialog->SetMessageDialogStyle(dialogResultFlags);
resultDialog->ShowModal();
wxYield();
// install after build
if (buildResult == 0 && installAfterBuild)
{
const char *adbPath = "/opt/Solar2D/Android/platform-tools/adb";
if (wxFileName::Exists(adbPath))
{
std::string cmd(adbPath);
cmd.append(" install -r \"");
cmd.append(outputDir.ToStdString().c_str());
cmd.append("/").append(appName.ToStdString().c_str());
cmd.append(".apk").append("\"");
wxExecute(cmd);
}
else
{
Rtt_LogException("adb not found at the expected location of %s\n. Not copying to device as a result.\n", adbPath);
}
}
}
void AndroidBuildDialog::OnCancelClicked(wxCommandEvent &event)
{
wxGetApp().GetFrame()->RemoveSuspendedPanel();
EndModal(wxID_CLOSE);
}
};
| 8,023 |
571 |
<gh_stars>100-1000
/**
* @file ellipse.cpp
* @brief mex interface for cv::ellipse
* @ingroup imgproc
* @author <NAME>
* @date 2012
*/
#include "mexopencv.hpp"
#include "opencv2/imgproc.hpp"
using namespace std;
using namespace cv;
/**
* Main entry called from Matlab
* @param nlhs number of left-hand-side arguments
* @param plhs pointers to mxArrays in the left-hand-side
* @param nrhs number of right-hand-side arguments
* @param prhs pointers to mxArrays in the right-hand-side
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// Check the number of arguments
nargchk(nrhs>=2 && nlhs<=1);
// Argument vector
vector<MxArray> rhs(prhs, prhs+nrhs);
// cv::ellipse has two overloaded variants
bool rrect_variant = rhs[1].isStruct();
nargchk(rrect_variant ? ((nrhs%2)==0) : (nrhs>=3 && (nrhs%2)==1));
// Option processing
double angle = 0;
double startAngle = 0;
double endAngle = 360;
Scalar color;
vector<Vec4d> colors;
int thickness = 1;
int lineType = cv::LINE_8;
int shift = 0;
for (int i=(rrect_variant ? 2 : 3); i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "Angle" && !rrect_variant)
angle = rhs[i+1].toDouble();
else if (key == "StartAngle" && !rrect_variant)
startAngle = rhs[i+1].toDouble();
else if (key == "EndAngle" && !rrect_variant)
endAngle = rhs[i+1].toDouble();
else if (key == "Color")
color = (rhs[i+1].isChar()) ?
ColorType[rhs[i+1].toString()] : rhs[i+1].toScalar();
else if (key == "Colors")
colors = MxArrayToVectorVec<double,4>(rhs[i+1]);
else if (key == "Thickness")
thickness = (rhs[i+1].isChar()) ?
ThicknessType[rhs[i+1].toString()] : rhs[i+1].toInt();
else if (key == "LineType")
lineType = (rhs[i+1].isChar()) ?
LineType[rhs[i+1].toString()] : rhs[i+1].toInt();
else if (key == "Shift" && !rrect_variant)
shift = rhs[i+1].toInt();
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
// Process
Mat img(rhs[0].toMat());
if (!rrect_variant) {
Point center(rhs[1].toPoint());
Size axes(rhs[2].toSize());
ellipse(img, center, axes, angle, startAngle, endAngle,
color, thickness, lineType, shift);
}
else {
if (rhs[1].numel() == 1) {
RotatedRect box(rhs[1].toRotatedRect());
ellipse(img, box, color, thickness, lineType);
}
else {
vector<RotatedRect> box(rhs[1].toVector<RotatedRect>());
if (!colors.empty() && colors.size() != box.size())
mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch");
for (size_t i = 0; i < box.size(); ++i)
ellipse(img, box[i],
(colors.empty() ? color : Scalar(colors[i])),
thickness, lineType);
}
}
plhs[0] = MxArray(img);
}
| 1,541 |
2,141 |
<reponame>MarcoJHB/ploomber
import os
from pathlib import Path
import pytest
from ploomber.util import default
from ploomber.exceptions import DAGSpecInvalidError
def create_package_with_name(name, base='.'):
Path('setup.py').touch()
parent = Path(base, 'src', name)
parent.mkdir(parents=True)
pkg_location = (parent / 'pipeline.yaml')
pkg_location.touch()
return pkg_location
@pytest.fixture
def pkg_location():
location = create_package_with_name('package_a')
return str(location)
def test_package_location(tmp_directory, pkg_location):
assert default._package_location(root_path='.') == str(
Path('src', 'package_a', 'pipeline.yaml'))
def test_package_location_with_root_path(tmp_directory):
create_package_with_name('package_a', 'some-dir')
assert default._package_location(root_path='some-dir') == str(
Path('some-dir', 'src', 'package_a', 'pipeline.yaml'))
def test_package_location_with_custom_name(tmp_directory):
create_package_with_name('package_b')
assert default._package_location(root_path='.') == str(
Path('src', 'package_b', 'pipeline.yaml'))
def test_no_package_location(tmp_directory):
assert default._package_location(root_path='.') is None
def test_package_location_warns_if_more_than_one(tmp_directory):
create_package_with_name('package_a')
create_package_with_name('package_b')
with pytest.warns(UserWarning) as record:
out = default._package_location(root_path='.')
assert len(record) == 1
assert 'Found more than one package' in record[0].message.args[0]
# most return the first one in alphabetical order
assert out == str(Path('src', 'package_a', 'pipeline.yaml'))
def test_entry_point_env_var(monkeypatch, tmp_directory):
Path('pipeline.yaml').touch()
Path('pipeline-custom.yaml').touch()
monkeypatch.setenv('ENTRY_POINT', 'pipeline-custom.yaml')
assert default.entry_point() == 'pipeline-custom.yaml'
def test_entry_point_env_var_in_pkg(monkeypatch, tmp_directory, pkg_location):
Path('src', 'package_a', 'pipeline-custom.yaml').touch()
monkeypatch.setenv('ENTRY_POINT', 'pipeline-custom.yaml')
assert default.entry_point() == str(
Path('src', 'package_a', 'pipeline-custom.yaml'))
def test_error_if_env_var_contains_directories(monkeypatch):
monkeypatch.setenv('ENTRY_POINT', 'path/to/pipeline.yaml')
with pytest.raises(ValueError) as excinfo:
default.entry_point()
assert 'must be a filename' in str(excinfo.value)
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_pkg_location(tmp_directory, pkg_location, method):
assert getattr(default, method)() == str(pkg_location)
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_pkg_location_ignore_egg_info(tmp_directory, method):
Path('setup.py').touch()
for pkg in ['package_a.egg-info', 'package_b']:
parent = Path('src', pkg)
parent.mkdir(parents=True)
pkg_location = (parent / 'pipeline.yaml')
pkg_location.touch()
assert getattr(default,
method)() == str(Path('src', 'package_b', 'pipeline.yaml'))
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_pkg_location_multiple_pkgs(tmp_directory, method):
Path('setup.py').touch()
for pkg in ['package_a', 'package_b']:
parent = Path('src', pkg)
parent.mkdir(parents=True)
pkg_location = (parent / 'pipeline.yaml')
pkg_location.touch()
assert getattr(default,
method)() == str(Path('src', 'package_a', 'pipeline.yaml'))
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_error_if_not_exists(method):
with pytest.raises(DAGSpecInvalidError):
getattr(default, method)()
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_in_parent_folder(tmp_directory, method):
Path('dir').mkdir()
Path('pipeline.yaml').touch()
os.chdir('dir')
assert getattr(default, method)() == str(Path('..', 'pipeline.yaml'))
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_in_src_while_in_sibling_folder(tmp_directory, method):
Path('setup.py').touch()
pkg = Path('src', 'package')
pkg.mkdir(parents=True)
(pkg / 'pipeline.yaml').touch()
Path('tests').mkdir()
os.chdir('tests')
assert getattr(default, method)() == str(
Path('..', 'src', 'package', 'pipeline.yaml'))
@pytest.mark.parametrize('method', ['entry_point', 'entry_point_with_name'])
def test_entry_point_from_root_path(tmp_directory, method):
Path('dir').mkdir()
Path('dir', 'pipeline.yaml').touch()
assert getattr(default, method)(root_path='dir')
def test_entry_point_with_name(tmp_directory):
Path('pipeline.yaml').touch()
Path('pipeline.train.yaml').touch()
assert default.entry_point_with_name(
name='pipeline.train.yaml') == 'pipeline.train.yaml'
def test_entry_point_with_nameuses_pipeline_yaml_if_src_one_is_missing(
tmp_directory):
Path('setup.py').touch()
Path('pipeline.yaml').touch()
assert default.entry_point_with_name() == 'pipeline.yaml'
@pytest.mark.parametrize('spec_name, env_name', [
['pipeline.yaml', 'env.yaml'],
['pipeline.train.yaml', 'env.yaml'],
])
def test_path_to_env_local(tmp_directory, spec_name, env_name):
Path(env_name).touch()
Path('dir').mkdir()
Path('dir', spec_name).touch()
assert default.path_to_env_from_spec(Path('dir', spec_name)) == str(
Path(env_name).resolve())
def test_path_to_env_loads_file_with_same_name(tmp_directory):
Path('env.train.yaml').touch()
Path('dir').mkdir()
Path('dir', 'pipeline.train.yaml').touch()
assert default.path_to_env_from_spec(
Path('dir',
'pipeline.train.yaml')) == str(Path('env.train.yaml').resolve())
def test_path_to_env_prefers_file_wih_name_over_plain_env_yaml(tmp_directory):
Path('env.train.yaml').touch()
Path('env.yaml').touch()
Path('dir').mkdir()
Path('dir', 'pipeline.train.yaml').touch()
assert default.path_to_env_from_spec(
Path('dir',
'pipeline.train.yaml')) == str(Path('env.train.yaml').resolve())
def test_path_to_env_prefers_env_variable(tmp_directory, monkeypatch):
monkeypatch.setenv('PLOOMBER_ENV_FILENAME', 'env.local.yaml')
Path('env.local.yaml').touch()
Path('env.train.yaml').touch()
Path('env.yaml').touch()
Path('dir').mkdir()
Path('dir', 'pipeline.train.yaml').touch()
assert default.path_to_env_from_spec(
Path('dir',
'pipeline.train.yaml')) == str(Path('env.local.yaml').resolve())
def test_error_if_env_var_has_directories(monkeypatch):
monkeypatch.setenv('PLOOMBER_ENV_FILENAME', 'path/to/env.local.yaml')
with pytest.raises(ValueError):
default.path_to_env_from_spec('pipeline.yaml')
def test_error_if_env_var_file_missing(monkeypatch):
monkeypatch.setenv('PLOOMBER_ENV_FILENAME', 'env.local.yaml')
with pytest.raises(FileNotFoundError):
default.path_to_env_from_spec('pipeline.yaml')
def test_path_to_parent_sibling(tmp_directory):
Path('dir').mkdir()
Path('dir', 'env.yaml').touch()
assert default.path_to_env_from_spec('dir/pipeline.yaml') == str(
Path('dir', 'env.yaml').resolve())
@pytest.mark.parametrize('arg', ['dir/pipeline.yaml', None])
def test_path_to_env_none(tmp_directory, arg):
Path('dir').mkdir()
assert default.path_to_env_from_spec(arg) is None
def test_path_to_env_error_if_no_extension():
with pytest.raises(ValueError) as excinfo:
default.path_to_env_from_spec('pipeline')
expected = ("Expected path to spec to have a file extension "
"but got: 'pipeline'")
assert str(excinfo.value) == expected
def test_path_to_env_error_if_dir(tmp_directory):
Path('pipeline.yaml').mkdir()
with pytest.raises(ValueError) as excinfo:
default.path_to_env_from_spec('pipeline.yaml')
expected = ("Expected path to spec 'pipeline.yaml' to be a file "
"but got a directory instead")
assert str(excinfo.value) == expected
def test_finds_pipeline_yaml(tmp_directory):
expected = Path(tmp_directory).resolve()
pip = Path('pipeline.yaml').resolve()
pip.touch()
dir_ = Path('path', 'to', 'dir')
dir_.mkdir(parents=True)
os.chdir(dir_)
assert expected == default.find_root_recursively().resolve()
def test_finds_pipeline_with_name(tmp_directory):
expected = Path(tmp_directory).resolve()
pip = Path('pipeline.serve.yaml').resolve()
pip.touch()
dir_ = Path('path', 'to', 'dir')
dir_.mkdir(parents=True)
os.chdir(dir_)
assert expected == default.find_root_recursively(
filename='pipeline.serve.yaml').resolve()
def test_finds_setup_py(tmp_directory):
expected = Path(tmp_directory).resolve()
pip = Path('setup.py').resolve()
pip.touch()
Path('src', 'package').mkdir(parents=True)
Path('src', 'package', 'pipeline.yaml').touch()
dir_ = Path('path', 'to', 'dir')
dir_.mkdir(parents=True)
os.chdir(dir_)
assert expected == default.find_root_recursively().resolve()
def test_ignores_src_package_pipeline_if_setup_py(tmp_directory):
expected = Path(tmp_directory).resolve()
pip = Path('setup.py').resolve()
pip.touch()
dir_ = Path('src', 'package')
dir_.mkdir(parents=True)
os.chdir(dir_)
Path('pipeline.yaml').touch()
assert expected == default.find_root_recursively().resolve()
def test_error_if_no_pipeline_yaml_and_no_setup_py(tmp_directory):
with pytest.raises(DAGSpecInvalidError) as excinfo:
default.find_root_recursively()
assert ('Looked recursively for a setup.py or '
'pipeline.yaml in parent folders') in str(excinfo.value)
def test_error_if_setup_py_but_no_src_package_pipeline(tmp_directory):
pip = Path('setup.py').resolve()
pip.touch()
dir_ = Path('path', 'to', 'dir')
dir_.mkdir(parents=True)
os.chdir(dir_)
with pytest.raises(DAGSpecInvalidError) as excinfo:
default.find_root_recursively()
assert 'expected to find a pipeline.yaml file' in str(excinfo.value)
def test_error_if_both_setup_py_and_pipeline_yaml_exist(tmp_directory):
Path('setup.py').touch()
Path('pipeline.yaml').touch()
Path('src', 'package').mkdir(parents=True)
Path('src', 'package', 'pipeline.yaml').touch()
dir_ = Path('path', 'to', 'dir')
dir_.mkdir(parents=True)
os.chdir(dir_)
with pytest.raises(DAGSpecInvalidError):
default.find_root_recursively()
def test_find_root_recursively_error_pipeline_yaml_under_src_but_no_setup_py(
tmp_directory):
parent = Path('src', 'package')
parent.mkdir(parents=True)
(parent / 'pipeline.yaml').touch()
os.chdir(parent)
with pytest.raises(DAGSpecInvalidError) as excinfo:
default.find_root_recursively()
assert 'Invalid project layout' in str(excinfo.value)
assert str(Path('src', 'package')) in str(excinfo.value)
@pytest.mark.parametrize('root_location, working_dir, to_create, filename', [
['some/dir/pipeline.yaml', 'some/dir', 'some/pipeline.yaml', None],
[
'some/dir/pipeline.x.yaml', 'some/dir', 'some/pipeline.x.yaml',
'pipeline.x.yaml'
],
[
'some/dir/another/dir/pipeline.yaml', 'some/dir/another/dir',
'some/pipeline.yaml', None
],
])
def test_warns_if_other_pipeline_yaml_as_parents_of_root_path(
tmp_directory, root_location, working_dir, to_create, filename):
Path(root_location).parent.mkdir(parents=True, exist_ok=True)
pip = Path(root_location).resolve()
pip.touch()
to_create = Path(to_create)
to_create.parent.mkdir(parents=True, exist_ok=True)
to_create.touch()
os.chdir(working_dir)
with pytest.warns(UserWarning) as record:
default.find_root_recursively(filename=filename)
assert len(record) == 1
assert 'Found project root with filename' in record[0].message.args[0]
def test_doesnt_warn_if_pipeline_yaml_in_the_same_directory(tmp_directory):
Path('pipeline.yaml').touch()
Path('pipeline.serve.yaml').touch()
with pytest.warns(None) as record:
default.find_root_recursively()
assert not len(record)
def test_doesnt_warn_if_there_arent_nested_pipeline_yaml(tmp_directory):
p = Path('path', 'to', 'dir', 'pipeline.yaml')
p.parent.mkdir(parents=True)
p.touch()
os.chdir(p.parent)
with pytest.warns(None) as record:
default.find_root_recursively()
assert not len(record)
def test_returns_spec_location_if_setup_py_is_in_a_parent_folder(
tmp_directory):
p = Path('path', 'to', 'dir', 'pipeline.yaml')
p.parent.mkdir(parents=True)
p = p.resolve()
p.touch()
Path('setup.py').touch()
os.chdir(p.parent)
assert Path(
default.find_root_recursively()).resolve() == p.parent.resolve()
def test_error_if_filename_contains_directories():
with pytest.raises(ValueError) as excinfo:
default.find_root_recursively(filename='a/b')
assert "'a/b' should be a filename" in str(excinfo.value)
@pytest.mark.parametrize('to_create, to_move', [
[
['setup.py', 'src/my_package/pipeline.yaml'],
'.',
],
])
def test_find_package_name(tmp_directory, to_create, to_move):
for f in to_create:
Path(f).parent.mkdir(exist_ok=True, parents=True)
if f.endswith('/'):
Path(f).mkdir()
else:
Path(f).touch()
os.chdir(to_move)
assert default.find_package_name() == 'my_package'
def test_error_if_no_project_root(tmp_directory):
Path('setup.py').touch()
with pytest.raises(DAGSpecInvalidError) as excinfo:
default.find_package_name()
expected = "Failed to determine project root"
assert expected in str(excinfo.value)
def test_error_if_no_package(tmp_directory):
Path('pipeline.yaml').touch()
with pytest.raises(ValueError) as excinfo:
default.find_package_name()
expected = "Could not find a valid package."
assert expected in str(excinfo.value)
@pytest.mark.parametrize('filename, name', [
['pipeline.yaml', None],
['pipeline.serve.yaml', 'serve'],
[Path('src', 'my_pkg', 'pipeline.yaml'), None],
[Path('src', 'my_pkg', 'pipeline.serve.yaml'), 'serve'],
])
def test_entry_point_relative(tmp_directory, filename, name):
Path(filename).parent.mkdir(parents=True, exist_ok=True)
Path(filename).touch()
assert default.entry_point_relative(name=name) == str(filename)
def test_entry_point_relative_error_if_both_exist(tmp_directory):
Path('pipeline.yaml').touch()
dir_ = Path('src', 'some_pkg')
dir_.mkdir(parents=True)
(dir_ / 'pipeline.yaml').touch()
with pytest.raises(DAGSpecInvalidError):
default.entry_point_relative()
def test_entry_point_relative_error_if_doesnt_exist(tmp_directory):
with pytest.raises(DAGSpecInvalidError):
default.entry_point_relative()
@pytest.mark.parametrize('arg, expected', [
['env.x.yaml', 'x'],
['env.x.y.yaml', 'x'],
['env.yaml', None],
['env', None],
])
def test_extract_name(arg, expected):
assert default.extract_name(arg) == expected
| 6,389 |
669 |
<reponame>Dig-Doug/runtime<gh_stars>100-1000
/*
* Copyright 2020 The TensorFlow Runtime Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Compact pointer to HostContext
//
// This file declares HostContextPtr, a compact pointer representation for
// HostContext.
#ifndef TFRT_HOST_CONTEXT_HOST_CONTEXT_PTR_H_
#define TFRT_HOST_CONTEXT_HOST_CONTEXT_PTR_H_
#include <array>
#include <cassert>
#include <cstdint>
#include "tfrt/support/mutex.h"
namespace tfrt {
class HostContext;
class HostContextPtr;
// HostContextPool manages all the live HostContext instances. It limits the
// number of live HostContext instances to 256 to allow referecing a HostContext
// with a 1-byte int. This is used to keep sizeof(HostContextPtr) to 1 byte.
class HostContextPool {
public:
static constexpr int kCompacity = 256;
static HostContextPool& instance() {
static HostContextPool* pool = new HostContextPool();
return *pool;
}
HostContextPtr AllocateForHostContext(HostContext* host);
void FreeHostContext(HostContext* host);
HostContext* GetHostContextByIndex(int index) const;
private:
HostContextPool() = default;
mutable mutex mutex_;
std::array<HostContext*, kCompacity> all_host_contexts_;
};
// HostContextPtr implements a compact pointer for a HostContext by storing the
// instance index of the HostContext object. It is intended to be used in places
// where saving the memory space is important, otherwise, HostContext* should be
// used.
class HostContextPtr {
public:
// Implicitly convert HostContext* to HostContextPtr.
HostContextPtr(HostContext* host); // NOLINT
HostContext* operator->() const { return get(); }
HostContext& operator*() const { return *get(); }
HostContext* get() const;
private:
friend class HostContextPool;
friend class ReadyChain;
explicit HostContextPtr(int index) : index_{static_cast<uint8_t>(index)} {
assert(index < HostContextPool::kCompacity);
}
uint8_t index() const { return index_; }
const uint8_t index_ = 0;
};
} // namespace tfrt
#endif // TFRT_HOST_CONTEXT_HOST_CONTEXT_PTR_H_
| 780 |
45,293 |
<gh_stars>1000+
package test.Boo.SubBoo;
public class C {
public interface Nested {}
}
| 34 |
8,966 |
<reponame>tkmikan/pwntools
#ifndef _STRING_H
#define _STRING_H
#include <sys/cdefs.h>
#include <sys/types.h>
__BEGIN_DECLS
__readmem__(2) __writemem__(1)
char *strcpy(char* __restrict__ dest, const char* __restrict__ src) __THROW __nonnull((1,2));
__readmemsz__(2,4) __writememsz__(1,4)
void *memccpy(void* __restrict__ dest, const void* __restrict__ src, int c, size_t n) __THROW __nonnull((1,2));
__readmemsz__(2,3) __writememsz__(1,3)
void *memmove(void* dest, const void *src, size_t n) __THROW __nonnull((1,2));
__readmemsz__(1,3) __readmemsz__(2,3)
int memccmp(const void* s1, const void* s2, int c, size_t n) __THROW __pure __nonnull((1,2));
__writememsz__(1,3)
void* memset(void* s, int c, size_t n) __THROW __nonnull((1));
__readmemsz__(1,3) __readmemsz__(2,3)
int memcmp(const void* s1, const void* s2, size_t n) __THROW __pure __nonnull((1,2));
__writememsz__(1,3) __readmemsz__(2,3)
void* memcpy(void* __restrict__ dest, const void* __restrict__ src, size_t n) __THROW __nonnull((1,2));
__writememsz__(1,3) __readmem__(2)
char *strncpy(char* __restrict__ dest, const char* __restrict__ src, size_t n) __THROW __nonnull((1,2));
__readmemsz__(1,3) __readmemsz__(2,3)
int strncmp(const char* s1, const char* s2, size_t n) __THROW __pure __nonnull((1,2));
__writemem__(1) __readmem__(2)
char *strcat(char* __restrict__ dest, const char* __restrict__ src) __THROW __nonnull((1,2));
__writememsz__(1,3) __readmem__(2)
char *strncat(char* __restrict__ dest, const char* __restrict__ src, size_t n) __THROW __nonnull((1,2));
__readmem__(1) __readmem__(2)
int strcmp(const char *s1, const char *s2) __THROW __pure __nonnull((1,2));
__readmem__(1)
size_t strlen(const char *s) __THROW __pure __nonnull((1));
__readmemsz__(1,2)
size_t strnlen(const char *s,size_t maxlen) __THROW __pure __nonnull((1));
#ifdef _GNU_SOURCE
__readmem__(1) __readmem__(2)
int strverscmp(const char* s1,const char* s2) __THROW __pure __nonnull((1,2));
#endif
__readmem__(1) __readmem__(2)
char *strstr(const char *haystack, const char *needle) __THROW __pure __nonnull((1,2));
__readmem__(1)
char *strdup(const char *s) __THROW __attribute_malloc__ __nonnull((1));
__readmem__(1)
char *strchr(const char *s, int c) __THROW __pure __nonnull((1));
__readmem__(1)
char *strrchr(const char *s, int c) __THROW __pure __nonnull((1));
__readmem__(1) __readmem__(2)
size_t strspn(const char *s, const char *_accept) __THROW __nonnull((1,2));
__readmem__(1) __readmem__(2)
size_t strcspn(const char *s, const char *reject) __THROW __nonnull((1,2));
__readmem__(1) __readmem__(2)
char *strpbrk(const char *s, const char *_accept) __THROW __nonnull((1,2));
__readmem__(1) __readmem__(2)
char *strsep(char ** __restrict__ stringp, const char * __restrict__ delim) __THROW __nonnull((1,2));
__readmemsz__(1,3)
void* memchr(const void *s, int c, size_t n) __THROW __pure __nonnull((1));
#ifdef _GNU_SOURCE
__readmemsz__(1,3)
void* memrchr(const void *s, int c, size_t n) __THROW __pure __nonnull((1));
#endif
/* I would like to make this const, but <NAME> points out it has to
* be char* :-( */
char *strerror(int errnum) __THROW __attribute_const__;
/* work around b0rken GNU crapware like tar 1.13.19 */
#define strerror strerror
__writememsz__(2,3)
int strerror_r(int errnum,char* buf,size_t n) __THROW __attribute_dontuse__;
#ifdef _GNU_SOURCE
char *strsignal(int signum) __THROW __attribute_const__;
__readmemsz__(1,2) __readmemsz__(3,4)
void *memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) __THROW __nonnull((1,3));
__writememsz__(1,3) __readmemsz__(2,3)
void* mempcpy(void* __restrict__ dest,const void* __restrict__ src,size_t n) __THROW __nonnull((1,2));
__readmemsz__(1,2)
char *strndup(const char *s,size_t n) __THROW __attribute_malloc__ __nonnull((1));
#define strdupa(s) ({ const char* tmp=s; size_t l=strlen(tmp)+1; char* x=alloca(l); memcpy(x,tmp,l); })
#define strndupa(s,n) ({ const char* tmp=s; const char* y=memchr(tmp,0,(n)); size_t l=y?y-tmp:n; char* x=alloca(l+1); x[l]=0; memcpy(x,tmp,l); })
#endif
__readwritemem__(1)
char *strtok(char * __restrict__ s, const char * __restrict__ delim) __THROW __nonnull((2));
__readwritemem__(1)
char *strtok_r(char * __restrict__ s, const char * __restrict__ delim, char ** __restrict__ ptrptr) __THROW __nonnull((2,3));
__writememsz__(1,3) __readmemsz__(2,3)
size_t strlcpy(char * __restrict__ dst, const char * __restrict__ src, size_t size) __THROW __nonnull((1,2));
__writememsz__(1,3) __readmem__(2)
size_t strlcat(char * __restrict__ dst, const char * __restrict__ src, size_t size) __THROW __nonnull((1,2));
__readmem__(1) __readmem__(2)
int strcoll(const char *s1, const char *s2) __THROW __nonnull((1,2));
__writememsz__(1,3) __readmemsz__(2,3)
size_t strxfrm(char *dest, const char * __restrict__ src, size_t n) __THROW __nonnull((1,2));
#ifdef _BSD_SOURCE
#include <strings.h>
#endif
__writemem__(1) __readmem__(2)
char* stpcpy(char * __restrict__ dest, const char * __restrict__ src) __THROW __nonnull((1,2));
__writememsz__(1,3) __readmemsz__(2,3)
char* stpncpy(char* __restrict__ dest, const char* __restrict__ src, size_t n) __THROW __nonnull((1,2));
#ifdef _GNU_SOURCE
int ffsl(long i) __THROW __attribute_const__;
int ffsll(long long i) __THROW __attribute_const__;
#endif
/* This is an OpenBSD extension that acts like bzero but is hopefully
* not removed from by the compiler's dead store optimization pass.
* It is meant for scrubbing crypto keys and passwords from memory after
* use, so they don't show up in swap files or core dumps. */
__writememsz__(1,2)
void explicit_bzero(void*, size_t) __THROW __dontinline__;
/* More OpenBSD extensions. These are for comparing passwords and hashes
* without leaking timing information on how long the common prefix was.
* The comparison always compares all the bytes, even if there is a
* mismatch early on. */
__readmemsz__(1,3) __readmemsz__(2,3)
int timingsafe_memcmp(const void* s1, const void* s2, size_t n) __THROW __nonnull((1,2)) __dontinline__;
__readmemsz__(1,3) __readmemsz__(2,3)
int timingsafe_bcmp(const void* s1, const void* s2, size_t n) __THROW __nonnull((1,2)) __dontinline__;
/* NetBSD has its own extension that is happened to be mentioned in the
* Linux man page for memcmp, so we support it, too */
__readmemsz__(1,3) __readmemsz__(2,3)
int consttime_memequal(void *b1, void *b2, size_t len) __THROW __nonnull((1,2)) __dontinline__;
/* NetBSD also has its own extension for memset :-( */
__writememsz__(1,3)
void* explicit_memset(void *b, int c, size_t len) __THROW __nonnull((1)) __dontinline__;
__END_DECLS
#endif
| 2,780 |
14,668 |
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_REPORTING_STORAGE_STORAGE_QUEUE_H_
#define COMPONENTS_REPORTING_STORAGE_STORAGE_QUEUE_H_
#include <list>
#include <map>
#include <memory>
#include <string>
#include "base/callback.h"
#include "base/containers/flat_set.h"
#include "base/files/file.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
#include "base/memory/ref_counted_delete_on_sequence.h"
#include "base/memory/scoped_refptr.h"
#include "base/strings/string_piece.h"
#include "base/task/sequenced_task_runner.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/timer/timer.h"
#include "components/reporting/compression/compression_module.h"
#include "components/reporting/encryption/encryption_module_interface.h"
#include "components/reporting/proto/synced/record.pb.h"
#include "components/reporting/storage/storage_configuration.h"
#include "components/reporting/storage/storage_uploader_interface.h"
#include "components/reporting/util/status.h"
#include "components/reporting/util/statusor.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace reporting {
namespace test {
// Storage Queue operation kind used to associate operations with failures for
// testing purposes
enum class StorageQueueOperationKind {
kReadBlock,
kWriteBlock,
kWriteMetadata
};
} // namespace test
// Storage queue represents single queue of data to be collected and stored
// persistently. It allows to add whole data records as necessary,
// flush previously collected records and confirm records up to certain
// sequencing id to be eliminated.
class StorageQueue : public base::RefCountedDeleteOnSequence<StorageQueue> {
public:
// Callback type for UploadInterface provider for this queue.
using AsyncStartUploaderCb = base::RepeatingCallback<void(
UploaderInterface::UploadReason,
UploaderInterface::UploaderInterfaceResultCb)>;
// Creates StorageQueue instance with the specified options, and returns it
// with the |completion_cb| callback. |async_start_upload_cb| is a factory
// callback that instantiates UploaderInterface every time the queue starts
// uploading records - periodically or immediately after Write (and in the
// near future - upon explicit Flush request).
static void Create(
const QueueOptions& options,
AsyncStartUploaderCb async_start_upload_cb,
scoped_refptr<EncryptionModuleInterface> encryption_module,
scoped_refptr<CompressionModule> compression_module,
base::OnceCallback<void(StatusOr<scoped_refptr<StorageQueue>>)>
completion_cb);
// Wraps and serializes Record (taking ownership of it), encrypts and writes
// the resulting blob into the StorageQueue (the last file of it) with the
// next sequencing id assigned. The write is a non-blocking operation -
// caller can "fire and forget" it (|completion_cb| allows to verify that
// record has been successfully enqueued). If file is going to become too
// large, it is closed and new file is created.
// Helper methods: AssignLastFile, WriteHeaderAndBlock, OpenNewWriteableFile,
// WriteMetadata, DeleteOutdatedMetadata.
void Write(Record record, base::OnceCallback<void(Status)> completion_cb);
// Confirms acceptance of the records up to |sequencing_id| (inclusively).
// All records with sequencing ids <= this one can be removed from
// the StorageQueue, and can no longer be uploaded.
// If |force| is false (which is used in most cases), |sequencing_id| is
// only accepted if no higher ids were confirmed before; otherwise it is
// accepted unconditionally.
// Helper methods: RemoveConfirmedData.
void Confirm(absl::optional<int64_t> sequencing_id,
bool force,
base::OnceCallback<void(Status)> completion_cb);
// Initiates upload of collected records. Called periodically by timer, based
// on upload_period of the queue, and can also be called explicitly - for
// a queue with an infinite or very large upload period. Multiple |Flush|
// calls can safely run in parallel.
// Starts by calling |async_start_upload_cb_| that instantiates
// |UploaderInterface uploader|. Then repeatedly reads EncryptedRecord(s) one
// by one from the StorageQueue starting from |first_sequencing_id_|, handing
// each one over to |uploader|->ProcessRecord (keeping ownership of the
// buffer) and resuming after result callback returns 'true'. Only files that
// have been closed are included in reading; |Upload| makes sure to close the
// last writeable file and create a new one before starting to send records to
// the |uploader|. If some records are not available or corrupt,
// |uploader|->ProcessGap is called. If the monotonic order of sequencing is
// broken, INTERNAL error Status is reported. |Upload| can be stopped after
// any record by returning 'false' to |processed_cb| callback - in that case
// |Upload| will behave as if the end of data has been reached. While one or
// more |Upload|s are active, files can be added to the StorageQueue but
// cannot be deleted. If processing of the record takes significant time,
// |uploader| implementation should be offset to another thread to avoid
// locking StorageQueue. Helper methods: SwitchLastFileIfNotEmpty,
// CollectFilesForUpload.
void Flush();
// Test only: makes specified records fail on specified operation kind.
void TestInjectErrorsForOperation(
const test::StorageQueueOperationKind operation_kind,
std::initializer_list<int64_t> sequencing_ids);
// Access queue options.
const QueueOptions& options() const { return options_; }
StorageQueue(const StorageQueue& other) = delete;
StorageQueue& operator=(const StorageQueue& other) = delete;
protected:
virtual ~StorageQueue();
private:
friend class base::RefCountedDeleteOnSequence<StorageQueue>;
friend class base::DeleteHelper<StorageQueue>;
// Private data structures for Read and Write (need access to the private
// StorageQueue fields).
class WriteContext;
class ReadContext;
class ConfirmContext;
// Private envelope class for single file in a StorageQueue.
class SingleFile : public base::RefCountedThreadSafe<SingleFile> {
public:
// Factory method creates a SingleFile object for existing
// or new file (of zero size). In case of any error (e.g. insufficient disk
// space) returns status.
static StatusOr<scoped_refptr<SingleFile>> Create(
const base::FilePath& filename,
int64_t size);
Status Open(bool read_only); // No-op if already opened.
void Close(); // No-op if not opened.
Status Delete();
// Attempts to read |size| bytes from position |pos| and returns
// reference to the data that were actually read (no more than |size|).
// End of file is indicated by empty data.
// |max_buffer_size| specifies the largest allowed buffer, which
// must accommodate the largest possible data block plus header and
// overhead.
// |expect_readonly| must match to is_readonly() (when set to false,
// the file is expected to be writeable; this only happens when scanning
// files restarting the queue).
StatusOr<base::StringPiece> Read(uint32_t pos,
uint32_t size,
size_t max_buffer_size,
bool expect_readonly = true);
// Appends data to the file.
StatusOr<uint32_t> Append(base::StringPiece data);
bool is_opened() const { return handle_.get() != nullptr; }
bool is_readonly() const {
DCHECK(is_opened());
return is_readonly_.value();
}
uint64_t size() const { return size_; }
std::string name() const { return filename_.MaybeAsASCII(); }
protected:
virtual ~SingleFile();
private:
friend class base::RefCountedThreadSafe<SingleFile>;
// Private constructor, called by factory method only.
SingleFile(const base::FilePath& filename, int64_t size);
// Flag (valid for opened file only): true if file was opened for reading
// only, false otherwise.
absl::optional<bool> is_readonly_;
const base::FilePath filename_; // relative to the StorageQueue directory
uint64_t size_ = 0; // tracked internally rather than by filesystem
std::unique_ptr<base::File> handle_; // Set only when opened/created.
// When reading the file, this is the buffer and data positions.
// If the data is read sequentially, buffered portions are reused
// improving performance. When the sequential order is broken (e.g.
// we start reading the same file in parallel from different position),
// the buffer is reset.
size_t data_start_ = 0;
size_t data_end_ = 0;
uint64_t file_position_ = 0;
size_t buffer_size_ = 0;
std::unique_ptr<char[]> buffer_;
};
// Private constructor, to be called by Create factory method only.
StorageQueue(scoped_refptr<base::SequencedTaskRunner> sequenced_task_runner,
const QueueOptions& options,
AsyncStartUploaderCb async_start_upload_cb,
scoped_refptr<EncryptionModuleInterface> encryption_module,
scoped_refptr<CompressionModule> compression_module);
// Initializes the object by enumerating files in the assigned directory
// and determines the sequence information of the last record.
// Must be called once and only once after construction.
// Returns OK or error status, if anything failed to initialize.
// Called once, during initialization.
// Helper methods: EnumerateDataFiles, ScanLastFile, RestoreMetadata.
Status Init();
// Retrieves last record digest (does not exist at a generation start).
absl::optional<std::string> GetLastRecordDigest() const;
// Helper method for Init(): process single data file.
// Return sequencing_id from <prefix>.<sequencing_id> file name, or Status
// in case there is any error.
StatusOr<int64_t> AddDataFile(
const base::FilePath& full_name,
const base::FileEnumerator::FileInfo& file_info);
// Helper method for Init(): sets generation id based on data file name.
// For backwards compatibility, accepts file name without generation too.
Status SetGenerationId(const base::FilePath& full_name);
// Helper method for Init(): enumerates all data files in the directory.
// Valid file names are <prefix>.<sequencing_id>, any other names are ignored.
// Adds used data files to the set.
Status EnumerateDataFiles(base::flat_set<base::FilePath>* used_files_set);
// Helper method for Init(): scans the last file in StorageQueue, if there are
// files at all, and learns the latest sequencing id. Otherwise (if there
// are no files) sets it to 0.
Status ScanLastFile();
// Helper method for Write(): increments sequencing id and assigns last
// file to place record in. |size| parameter indicates the size of data that
// comprise the record expected to be appended; if appending the record will
// make the file too large, the current last file will be closed, and a new
// file will be created and assigned to be the last one.
StatusOr<scoped_refptr<SingleFile>> AssignLastFile(size_t size);
// Helper method for Write() and Read(): creates and opens a new empty
// writeable file, adding it to |files_|.
StatusOr<scoped_refptr<SingleFile>> OpenNewWriteableFile();
// Helper method for Write(): stores a file with metadata to match the
// incoming new record. Synchronously composes metadata to record, then
// asynchronously writes it into a file with next sequencing id and then
// notifies the Write operation that it can now complete. After that it
// asynchronously deletes all other files with lower sequencing id
// (multiple Writes can see the same files and attempt to delete them, and
// that is not an error).
Status WriteMetadata(base::StringPiece current_record_digest);
// Helper method for RestoreMetadata(): loads and verifies metadata file
// contents. If accepted, adds the file to the set.
Status ReadMetadata(const base::FilePath& meta_file_path,
size_t size,
int64_t sequencing_id,
base::flat_set<base::FilePath>* used_files_set);
// Helper method for Init(): locates file with metadata that matches the
// last sequencing id and loads metadata from it.
// Adds used metadata file to the set.
Status RestoreMetadata(base::flat_set<base::FilePath>* used_files_set);
// Delete all files except those listed in the set.
void DeleteUnusedFiles(
const base::flat_set<base::FilePath>& used_files_setused_files_set);
// Helper method for Write(): deletes meta files up to, but not including
// |sequencing_id_to_keep|. Any errors are ignored.
void DeleteOutdatedMetadata(int64_t sequencing_id_to_keep);
// Helper method for Write(): composes record header and writes it to the
// file, followed by data. Stores record digest in the queue, increments
// next sequencing id.
Status WriteHeaderAndBlock(base::StringPiece data,
base::StringPiece current_record_digest,
scoped_refptr<SingleFile> file);
// Helper method for Upload: if the last file is not empty (has at least one
// record), close it and create the new one, so that its records are also
// included in the reading.
Status SwitchLastFileIfNotEmpty();
// Helper method for Upload: collects and sets aside |files| in the
// StorageQueue that have data for the Upload (all files that have records
// with sequencing ids equal or higher than |sequencing_id|).
std::map<int64_t, scoped_refptr<SingleFile>> CollectFilesForUpload(
int64_t sequencing_id) const;
// Helper method for Confirm: Moves |first_sequencing_id_| to
// (|sequencing_id|+1) and removes files that only have records with seq
// ids below or equal to |sequencing_id| (below |first_sequencing_id_|).
Status RemoveConfirmedData(int64_t sequencing_id);
// Helper method to release all file instances held by the queue.
// Files on the disk remain as they were.
void ReleaseAllFileInstances();
// Helper method to retry upload if prior one failed or if some events below
// |next_sequencing_id| were not uploaded.
void CheckBackUpload(Status status, int64_t next_sequencing_id);
// Helper method called by periodic time to upload data.
void PeriodicUpload();
// Sequential task runner for all activities in this StorageQueue
// (must be first member in class).
scoped_refptr<base::SequencedTaskRunner> sequenced_task_runner_;
SEQUENCE_CHECKER(storage_queue_sequence_checker_);
// Immutable options, stored at the time of creation.
const QueueOptions options_;
// Current generation id, unique per device and queue.
// Set up once during initialization by reading from the 'gen_id.NNNN' file
// matching the last sequencing id, or generated anew as a random number if no
// such file found (files do not match the id).
int64_t generation_id_ = 0;
// Digest of the last written record (loaded at queue initialization, absent
// if the new generation has just started, and no records where stored yet).
absl::optional<std::string> last_record_digest_;
// Queue of the write context instances in the order of creation, sequencing
// ids and record digests. Context is always removed from this queue before
// being destructed. We use std::list rather than std::queue, because
// if the write fails, it needs to be removed from the queue regardless of
// whether it is at the head, tail or middle.
std::list<WriteContext*> write_contexts_queue_;
// Next sequencing id to store (not assigned yet).
int64_t next_sequencing_id_ = 0;
// First sequencing id store still has (no records with lower
// sequencing id exist in store).
int64_t first_sequencing_id_ = 0;
// First unconfirmed sequencing id (no records with lower
// sequencing id will be ever uploaded). Set by the first
// Confirm call.
// If first_unconfirmed_sequencing_id_ < first_sequencing_id_,
// [first_unconfirmed_sequencing_id_, first_sequencing_id_) is a gap
// that cannot be filled in and is uploaded as such.
absl::optional<int64_t> first_unconfirmed_sequencing_id_;
// Latest metafile. May be null.
scoped_refptr<SingleFile> meta_file_;
// Ordered map of the files by ascending sequencing id.
std::map<int64_t, scoped_refptr<SingleFile>> files_;
// Counter of the Read operations. When not 0, none of the files_ can be
// deleted. Incremented by |ReadContext::OnStart|, decremented by
// |ReadContext::OnComplete|. Accessed by |RemoveConfirmedData|.
// All accesses take place on sequenced_task_runner_.
int32_t active_read_operations_ = 0;
// Upload timer (active only if options_.upload_period() is not 0).
base::RepeatingTimer upload_timer_;
// Upload provider callback.
const AsyncStartUploaderCb async_start_upload_cb_;
// Encryption module.
scoped_refptr<EncryptionModuleInterface> encryption_module_;
// Compression module.
scoped_refptr<CompressionModule> compression_module_;
// Test only: records specified to fail for a given operation kind.
base::flat_map<test::StorageQueueOperationKind, base::flat_set<int64_t>>
test_injected_failures_;
// Weak pointer factory (must be last member in class).
base::WeakPtrFactory<StorageQueue> weakptr_factory_{this};
};
} // namespace reporting
#endif // COMPONENTS_REPORTING_STORAGE_STORAGE_QUEUE_H_
| 5,367 |
450 |
package uk.co.adventuregamestudio.runtime;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Properties;
public class ReadOnlyINI {
private Properties cfg;
private String dir = "/";
public ReadOnlyINI(String directory) {
cfg = new Properties();
dir = directory;
}
public boolean load() {
boolean retval = false;
FileInputStream fis = null;
try {
retval = true;
String cfg_filename = "android.cfg";
String filePath = dir + "/" + cfg_filename;
fis = new FileInputStream(filePath);
cfg.load(fis);
} catch (FileNotFoundException e) {
retval = false;
System.out.println("Configuration error FileNotFound: " + e.getMessage());
} catch (IOException e) {
retval = false;
System.out.println("Configuration error IOException: " + e.getMessage());
} finally {
if (null != fis)
{
try
{
fis.close();
}
catch (Exception e)
{
System.out.println("Configuration error Exception: " + e.getMessage());
}
}
}
return retval;
}
public String get(String key) {
String retval = "0";
String tmp = cfg.getProperty(key);
if(tmp != null) {
retval = tmp;
}
return retval;
}
}
| 772 |
440 |
<filename>tools/GUIEditor/CGUIDummyEditorStub.h<gh_stars>100-1000
/*
This is a custom editor for stubbing problematic elements out,
for example elements which include modal screens
*/
#ifndef __C_GUI_DUMMY_EDITOR_STUB_H_INCLUDED__
#define __C_GUI_DUMMY_EDITOR_STUB_H_INCLUDED__
#include "IGUIElement.h"
#include "IGUIEnvironment.h"
#include "IGUIStaticText.h"
namespace irr
{
namespace gui
{
class CGUIDummyEditorStub : public IGUIElement
{
public:
//! constructor
CGUIDummyEditorStub(IGUIEnvironment* environment, IGUIElement *parent, const char *text) :
IGUIElement(EGUIET_ELEMENT, environment, parent, -1, core::rect<s32>(0, 0, 100, 100) ),
TextBox(0), TypeName(text)
{
#ifdef _DEBUG
setDebugName("CGUIDummyEditorStub");
#endif
core::dimension2du d = Environment->getSkin()->getFont()->getDimension(L"A");
s32 h = d.Height / 2;
s32 w = d.Width / 2;
TextBox = environment->addStaticText(core::stringw(text).c_str(),
core::rect<s32>(50-w, 50-h, 50+w, 50+h),
false, false, this, -1, false);
TextBox->grab();
TextBox->setSubElement(true);
TextBox->setAlignment(EGUIA_CENTER, EGUIA_CENTER, EGUIA_CENTER, EGUIA_CENTER);
}
virtual ~CGUIDummyEditorStub()
{
if (TextBox)
TextBox->drop();
}
virtual const c8* getTypeName() const { return TypeName.c_str(); }
protected:
IGUIStaticText* TextBox;
core::stringc TypeName;
};
} // namespace gui
} // namespace irr
#endif
| 669 |
1,346 |
package com.ctrip.platform.dal.dao.helper;
import org.junit.Test;
import java.sql.SQLException;
/**
* @author c7ch23en
*/
public class DalRowMapperExtractorTest {
@Test
public void testOOMForLargeCount() throws SQLException {
DalRowMapperExtractor<Integer> extractor = new DalRowMapperExtractor<>(new DalObjectRowMapper<>(Integer.class), 999999999);
extractor.extract(new MockResultSet());
}
}
| 162 |
441 |
<reponame>vinhig/rbfx<filename>Source/ThirdParty/SLikeNet/Source/src/PS4Includes.cpp
/*
* This file was taken from RakNet 4.082 without any modifications.
* Please see licenses/RakNet license.txt for the underlying license and related copyright.
*/
| 101 |
5,169 |
{
"name": "Sniper",
"version": "0.1.1",
"summary": "Sniper help you to manage localization strings in Google Spread Sheet",
"description": "Sniper provide a easy way to sync your location strings in Google Spread Sheet. You no longer need to build your app again because of fixing a typo. It also included version control for different app version.",
"homepage": "https://github.com/RedSoldierLtd/Sniper",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/RedSoldierLtd/Sniper.git",
"tag": "0.1.1"
},
"platforms": {
"ios": "8.0"
},
"source_files": "Sniper/Classes/**/*",
"dependencies": {
"Alamofire": [
"~> 3.4"
],
"CSwiftV": [
]
}
}
| 306 |
370 |
<reponame>celestehorgan/k8s.io<filename>audit/projects/k8s-infra-e2e-boskos-021/service-accounts/[email protected]/description.json
{
"displayName": "Compute Engine default service account",
"email": "<EMAIL>",
"name": "projects/k8s-infra-e2e-boskos-021/serviceAccounts/<EMAIL>",
"oauth2ClientId": "107935880739977037781",
"projectId": "k8s-infra-e2e-boskos-021",
"uniqueId": "107935880739977037781"
}
| 188 |
461 |
<gh_stars>100-1000
/*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
*
* updateEKFQuatAtt2.h
*
* Code generation for function 'updateEKFQuatAtt2'
*
*/
#ifndef UPDATEEKFQUATATT2_H
#define UPDATEEKFQUATATT2_H
/* Include files */
#include <stddef.h>
#include <stdlib.h>
#include "rtwtypes.h"
#include "updateEKFQuatAtt2_types.h"
/* Function Declarations */
extern void updateEKFQuatAtt2(const double gyr_rps[3], const double acc_mps2[3],
const double mag_unit[3], double Va_mps, double T, double *roll_deg, double
*pitch_deg, double *yaw_deg, double *yaw_mag_deg);
extern void updateEKFQuatAtt2_initialize(void);
extern void updateEKFQuatAtt2_terminate(void);
#endif
/* End of code generation (updateEKFQuatAtt2.h) */
| 317 |
2,041 |
<reponame>altundasbatu/SublimeTextSettings
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os
import re
import webbrowser
import itertools
import threading
from datetime import datetime, tzinfo, timedelta
import time
platform = sublime.platform()
ST3 = int(sublime.version()) >= 3000
if ST3:
from .APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
else:
from APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
sublime_plugin.ViewEventListener = object
# io is not operable in ST2 on Linux, but in all other cases io is better
# https://github.com/SublimeTextIssues/Core/issues/254
if not ST3 and platform == 'linux':
import codecs as io
else:
import io
NT = platform == 'windows'
if NT:
import subprocess
if ST3:
from datetime import timezone
else:
class timezone(tzinfo):
__slots__ = ("_offset", "_name")
def __init__(self, offset, name=None):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
self._offset = offset
self._name = name
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return timedelta(0)
def tznow():
t = time.time()
d = datetime.fromtimestamp(t)
u = datetime.utcfromtimestamp(t)
return d.replace(tzinfo=timezone(d - u))
def check_parentheses(date_format, regex_group, is_date=False):
if is_date:
try:
parentheses = regex_group if datetime.strptime(regex_group.strip(), date_format) else ''
except ValueError:
parentheses = ''
else:
try:
parentheses = '' if datetime.strptime(regex_group.strip(), date_format) else regex_group
except ValueError:
parentheses = regex_group
return parentheses
class PlainTasksNewCommand(PlainTasksBase):
def runCommand(self, edit):
# list for ST3 support;
# reversed because with multiple selections regions would be messed up after first iteration
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
header_to_task = self.view.settings().get('header_to_task', False)
# ST3 (3080) moves sel when call view.replace only by delta between original and
# new regions, so if sel is not in eol and we replace line with two lines,
# then cursor won’t be on next line as it should
sels = self.view.sel()
eol = None
for i, line in enumerate(regions):
line_contents = self.view.substr(line).rstrip()
not_empty_line = re.match('^(\s*)(\S.*)$', self.view.substr(line))
empty_line = re.match('^(\s+)$', self.view.substr(line))
current_scope = self.view.scope_name(line.a)
eol = line.b # need for ST3 when new content has line break
if 'item' in current_scope:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
elif 'header' in current_scope and line_contents and not header_to_task:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif 'separator' in current_scope:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif not ('header' and 'separator') in current_scope or header_to_task:
eol = None
if not_empty_line:
grps = not_empty_line.groups()
line_contents = (grps[0] if len(grps[0]) > 0 else self.before_tasks_bullet_spaces) + self.open_tasks_bullet + self.tasks_bullet_space + grps[1]
elif empty_line: # only whitespaces
grps = empty_line.groups()
line_contents = grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
else: # completely empty, no whitespaces
line_contents = self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
else:
print('oops, need to improve PlainTasksNewCommand')
if eol:
# move cursor to eol of original line, workaround for ST3
sels.subtract(sels[~i])
sels.add(sublime.Region(eol, eol))
self.view.replace(edit, line, line_contents)
# convert each selection to single cursor, ready to type
new_selections = []
for sel in list(self.view.sel()):
eol = self.view.line(sel).b
new_selections.append(sublime.Region(eol, eol))
self.view.sel().clear()
for sel in new_selections:
self.view.sel().add(sel)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksNewWithDateCommand(PlainTasksBase):
def runCommand(self, edit):
self.view.run_command('plain_tasks_new')
sels = list(self.view.sel())
suffix = ' @created%s' % tznow().strftime(self.date_format)
points = []
for s in reversed(sels):
if self.view.substr(sublime.Region(s.b - 2, s.b)) == ' ':
point = s.b - 2 # keep double whitespace at eol
else:
point = s.b
self.view.insert(edit, point, suffix)
points.append(point)
self.view.sel().clear()
offset = len(suffix)
for i, sel in enumerate(sels):
self.view.sel().add(sublime.Region(points[~i] + i*offset, points[~i] + i*offset))
class PlainTasksCompleteCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
done_line_end, now = self.format_line_end(self.done_tag, tznow())
offset = len(done_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'''
(?x)^(\s*)(\[x\]|.) # 0,1 indent & bullet
(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*) # 2 very task
(?=
((?:\s@done|@project|@[wl]asted|$).*) # 3 ending either w/ done or w/o it & no date
| # or
(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$ # 4 date & possible project tag after
)
''' # rcm is the same, except bullet & ending
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
done_line_end = done_line_end.rstrip()
if line_contents.endswith(' '):
done_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
replacement = u'%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), done_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.done_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
grps = done_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip())
offset = -offset
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksCancelCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
canc_line_end, now = self.format_line_end(self.canc_tag, tznow())
offset = len(canc_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'^(\s*)(\[x\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@done|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
canc_line_end = canc_line_end.rstrip()
if line_contents.endswith(' '):
canc_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_cle = self.view.insert(edit, line.end(), canc_line_end)
replacement = u'%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_cle,
'tag': 'wasted'}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), canc_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol,
'tag': 'wasted'}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.canc_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
sublime.status_message('You cannot cancel what have been done, can you?')
# grps = done_matches.groups()
# parentheses = check_parentheses(self.date_format, grps[4] or '')
# replacement = u'%s%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2], parentheses)
# self.view.replace(edit, line, replacement.rstrip())
# offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksArchiveCommand(PlainTasksBase):
def runCommand(self, edit, partial=False):
rds = 'meta.item.todo.completed'
rcs = 'meta.item.todo.cancelled'
# finding archive section
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if partial:
all_tasks = self.get_archivable_tasks_within_selections()
else:
all_tasks = self.get_all_archivable_tasks(archive_pos, rds, rcs)
if not all_tasks:
sublime.status_message('Nothing to archive')
else:
if archive_pos and archive_pos.a > 0:
line = self.view.full_line(archive_pos).end()
else:
create_archive = u'\n\n___________________\n%s\n' % self.archive_name
self.view.insert(edit, self.view.size(), create_archive)
line = self.view.size()
projects = get_all_projects_and_separators(self.view)
# adding tasks to archive section
for task in all_tasks:
line_content = self.view.substr(task)
match_task = re.match(r'^\s*(\[[x-]\]|.)(\s+.*$)', line_content, re.U)
current_scope = self.view.scope_name(task.a)
if rds in current_scope or rcs in current_scope:
pr = self.get_task_project(task, projects)
if self.project_postfix:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
line_content.strip(),
(u' @project(%s)' % pr) if pr else '',
' ' if line_content.endswith(' ') else '')
else:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
match_task.group(1), # bullet
(u'%s%s:' % (self.tasks_bullet_space, pr)) if pr else '',
match_task.group(2)) # very task
else:
eol = u'{0}{1}\n'.format(self.before_tasks_bullet_spaces * 2, line_content.lstrip())
line += self.view.insert(edit, line, eol)
# remove moved tasks (starting from the last one otherwise it screw up regions after the first delete)
for task in reversed(all_tasks):
self.view.erase(edit, self.view.full_line(task))
self.view.run_command('plain_tasks_sort_by_date')
def get_task_project(self, task, projects):
index = -1
for ind, pr in enumerate(projects):
if task < pr:
if ind > 0:
index = ind-1
break
#if there is no projects for task - return empty string
if index == -1:
return ''
prog = re.compile(r'^\n*(\s*)(.+):(?=\s|$)\s*(\@[^\s]+(\(.*?\))?\s*)*')
hierarhProject = ''
if index >= 0:
depth = re.match(r"\s*", self.view.substr(self.view.line(task))).group()
while index >= 0:
strProject = self.view.substr(projects[index])
if prog.match(strProject):
spaces = prog.match(strProject).group(1)
if len(spaces) < len(depth):
hierarhProject = prog.match(strProject).group(2) + ((" / " + hierarhProject) if hierarhProject else '')
depth = spaces
if len(depth) == 0:
break
else:
sep = re.compile(r'(^\s*)---.{3,5}---+$')
spaces = sep.match(strProject).group(1)
if len(spaces) < len(depth):
depth = spaces
if len(depth) == 0:
break
index -= 1
if not hierarhProject:
return ''
else:
return hierarhProject
def get_task_note(self, task, tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
if note not in tasks:
tasks.append(note)
note_line = self.view.line(note_line).end() + 1
def get_all_archivable_tasks(self, archive_pos, rds, rcs):
done_tasks = [i for i in self.view.find_by_selector(rds) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in done_tasks:
self.get_task_note(i, done_tasks)
canc_tasks = [i for i in self.view.find_by_selector(rcs) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in canc_tasks:
self.get_task_note(i, canc_tasks)
all_tasks = done_tasks + canc_tasks
all_tasks.sort()
return all_tasks
def get_archivable_tasks_within_selections(self):
all_tasks = []
for region in self.view.sel():
for l in self.view.lines(region):
line = self.view.line(l)
if ('completed' in self.view.scope_name(line.a)) or ('cancelled' in self.view.scope_name(line.a)):
all_tasks.append(line)
self.get_task_note(line, all_tasks)
return all_tasks
class PlainTasksNewTaskDocCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.new_file()
view.settings().add_on_change('color_scheme', lambda: self.set_proper_scheme(view))
view.set_syntax_file('Packages/PlainTasks/PlainTasks.sublime-syntax' if ST3 else
'Packages/PlainTasks/PlainTasks.tmLanguage')
def set_proper_scheme(self, view):
if view.id() != sublime.active_window().active_view().id():
return
pts = sublime.load_settings('PlainTasks.sublime-settings')
if view.settings().get('color_scheme') == pts.get('color_scheme'):
return
# Since we cannot create file with syntax, there is moment when view has no settings,
# but it is activated, so some plugins (e.g. Color Highlighter) set wrong color scheme
view.settings().set('color_scheme', pts.get('color_scheme'))
class PlainTasksOpenUrlCommand(sublime_plugin.TextCommand):
#It is horrible regex but it works perfectly
URL_REGEX = r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))
+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))"""
def run(self, edit):
s = self.view.sel()[0]
start, end = s.a, s.b
if 'url' in self.view.scope_name(start):
while self.view.substr(start) != '<': start -= 1
while self.view.substr(end) != '>': end += 1
rgn = sublime.Region(start + 1, end)
# optional select URL
self.view.sel().add(rgn)
url = self.view.substr(rgn)
if NT and all([ST3, ':' in url]):
# webbrowser uses os.startfile() under the hood, and it is not reliable in py3;
# thus call start command for url with scheme (eg skype:nick) and full path (eg c:\b)
subprocess.Popen(['start', url], shell=True)
else:
webbrowser.open_new_tab(url)
else:
self.search_bare_weblink_and_open(start, end)
def search_bare_weblink_and_open(self, start, end):
# expand selection to nearest stopSymbols
view_size = self.view.size()
stopSymbols = ['\t', ' ', '\"', '\'', '>', '<', ',']
# move the selection back to the start of the url
while (start > 0
and not self.view.substr(start - 1) in stopSymbols
and self.view.classify(start) & sublime.CLASS_LINE_START == 0):
start -= 1
# move end of selection forward to the end of the url
while (end < view_size
and not self.view.substr(end) in stopSymbols
and self.view.classify(end) & sublime.CLASS_LINE_END == 0):
end += 1
# grab the URL
url = self.view.substr(sublime.Region(start, end))
# optional select URL
self.view.sel().add(sublime.Region(start, end))
exp = re.search(self.URL_REGEX, url, re.X)
if exp and exp.group(0):
strUrl = exp.group(0)
if strUrl.find("://") == -1:
strUrl = "http://" + strUrl
webbrowser.open_new_tab(strUrl)
else:
sublime.status_message("Looks like there is nothing to open")
class PlainTasksOpenLinkCommand(sublime_plugin.TextCommand):
LINK_PATTERN = re.compile( # simple ./path/
r'''(?ixu)(?:^|[ \t])\.[\\/]
(?P<fn>
(?:[a-z]\:[\\/])? # special case for Windows full path
(?:[^\\/:">]+[\\/]?)+) # the very path (single filename/relative/full)
(?=[\\/:">]) # stop matching path
# options:
(>(?P<sym>\w+))?(\:(?P<line>\d+))?(\:(?P<col>\d+))?(\"(?P<text>[^\n]*)\")?
''')
MD_LINK = re.compile( # markdown [](path)
r'''(?ixu)\][ \t]*\(\<?(?:file\:///?)?
(?P<fn>.*?((\\\))?.*?)*)
(?:\>?[ \t]*
\"((\:(?P<line>\d+))?(\:(?P<col>\d+))?|(\>(?P<sym>\w+))?|(?P<text>[^\n]*))
\")?
\)
''')
WIKI_LINK = re.compile( # ORGMODE, NV, and all similar formats [[link][opt-desc]]
r'''(?ixu)\[\[(?:file(?:\+(?:sys|emacs))?\:)?(?:\.[\\/])?
(?P<fn>.*?((\\\])?.*?)*)
(?# options for orgmode link [[path::option]])
(?:\:\:(((?P<line>\d+))?(\:(?P<col>\d+))?|(\*(?P<sym>\w+))?|(?P<text>.*?((\\\])?.*?)*)))?
\](?:\[(.*?)\])?
\]
(?# options for NV [[path]] "option" — NV not support it, but PT should support so it wont break NV)
(?:[ \t]*
\"((\:(?P<linen>\d+))?(\:(?P<coln>\d+))?|(\>(?P<symn>\w+))?|(?P<textn>[^\n]*))
\")?
''')
def _format_res(self, res):
if res[3] == 'f':
return [res[0], "line: %d column: %d" % (int(res[1]), int(res[2]))]
elif res[3] == 'd':
return [res[0], 'Add folder to project' if ST3 else 'Folders are supported only in Sublime 3']
else:
return [res[0], res[1]]
def _on_panel_selection(self, selection, text=None, line=0):
if selection < 0:
self.panel_hidden = True
return
self.stop_thread = True
self.thread.join()
win = sublime.active_window()
win.run_command('hide_overlay')
res = self._current_res[selection]
if not res[3]:
return # user chose to stop search
if not ST3 and res[3] == "d":
return sublime.status_message('Folders are supported only in Sublime 3')
elif res[3] == "d":
data = win.project_data()
if not data:
data = {}
if "folders" not in data:
data["folders"] = []
data["folders"].append({'follow_symlinks': True,
'path': res[0]})
win.set_project_data(data)
else:
self.opened_file = win.open_file('%s:%s:%s' % res[:3],
sublime.ENCODED_POSITION)
if text:
sublime.set_timeout(lambda: self.find_text(self.opened_file, text, line), 300)
def search_files(self, all_folders, fn, sym, line, col, text):
'''run in separate thread; worker'''
fn = fn.replace('/', os.sep)
if os.path.isfile(fn): # check for full path
self._current_res.append((fn, line, col, "f"))
elif os.path.isdir(fn):
self._current_res.append((fn, 0, 0, "d"))
seen_folders = []
for folder in sorted(set(all_folders)):
for root, subdirs, _ in os.walk(folder):
if self.stop_thread:
return
if root in seen_folders:
continue
else:
seen_folders.append(root)
subdirs = [f for f in subdirs if os.path.join(root, f) not in seen_folders]
tname = '%s at %s' % (fn, root)
self.thread.name = tname if ST3 else tname.encode('utf8')
name = os.path.normpath(os.path.abspath(os.path.join(root, fn)))
if os.path.isfile(name):
item = (name, line, col, "f")
if item not in self._current_res:
self._current_res.append(item)
if os.path.isdir(name):
item = (name, 0, 0, "d")
if item not in self._current_res:
self._current_res.append(item)
self._current_res = self._current_res[1:] # remove 'Stop search' item
if not self._current_res:
return sublime.error_message('File was not found\n\n\t%s' % fn)
if len(self._current_res) == 1:
sublime.set_timeout(lambda: self._on_panel_selection(0), 1)
else:
entries = [self._format_res(res) for res in self._current_res]
sublime.set_timeout(lambda: self.window.show_quick_panel(entries, lambda i: self._on_panel_selection(i, text=text, line=line)), 1)
def run(self, edit):
if hasattr(self, 'thread'):
if self.thread.is_alive:
self.stop_thread = True
self.thread.join()
point = self.view.sel()[0].begin()
line = self.view.substr(self.view.line(point))
fn, sym, line, col, text = self.parse_link(line)
if not fn:
sublime.status_message('Line does not contain a valid link to file')
return
self.window = win = sublime.active_window()
self._current_res = [('Stop search', '', '', '')]
# init values to update quick panel
self.items = 0
self.panel_hidden = True
if sym:
for name, _, pos in win.lookup_symbol_in_index(sym):
if name.endswith(fn):
line, col = pos
self._current_res.append((name, line, col, "f"))
all_folders = win.folders() + [os.path.dirname(v.file_name()) for v in win.views() if v.file_name()]
self.stop_thread = False
self.thread = threading.Thread(target=self.search_files, args=(all_folders, fn, sym, line, col, text))
self.thread.setName('is starting')
self.thread.start()
self.progress_bar()
def find_text(self, view, text, line):
result = view.find(text, view.sel()[0].a if line else 0, sublime.LITERAL)
view.sel().clear()
view.sel().add(result.a)
view.set_viewport_position(view.text_to_layout(view.size()), False)
view.show_at_center(result)
def progress_bar(self, i=0, dir=1):
if not self.thread.is_alive():
PlainTasksStatsStatus.set_stats(self.view)
return
if self._current_res and sublime.active_window().active_view().id() == self.view.id():
items = len(self._current_res)
if items != self.items:
self.window.run_command('hide_overlay')
self.items = items
if self.panel_hidden:
entries = [self._format_res(res) for res in self._current_res]
self.window.show_quick_panel(entries, self._on_panel_selection)
self.panel_hidden = False
# This animates a little activity indicator in the status area
before = i % 8
after = (7) - before
if not after: dir = -1
if not before: dir = 1
i += dir
self.view.set_status('PlainTasks', u'Please wait%s…%ssearching %s' %
(' ' * before, ' ' * after, self.thread.name if ST3 else self.thread.name.decode('utf8')))
sublime.set_timeout(lambda: self.progress_bar(i, dir), 100)
return
def parse_link(self, line):
match_link = self.LINK_PATTERN.search(line)
match_md = self.MD_LINK.search(line)
match_wiki = self.WIKI_LINK.search(line)
if match_link:
fn, sym, line, col, text = match_link.group('fn', 'sym', 'line', 'col', 'text')
elif match_md:
fn, sym, line, col, text = match_md.group('fn', 'sym', 'line', 'col', 'text')
# unescape some chars
fn = (fn.replace('\\(', '(').replace('\\)', ')'))
elif match_wiki:
fn = match_wiki.group('fn')
sym = match_wiki.group('sym') or match_wiki.group('symn')
line = match_wiki.group('line') or match_wiki.group('linen')
col = match_wiki.group('col') or match_wiki.group('coln')
text = match_wiki.group('text') or match_wiki.group('textn')
# unescape some chars
fn = (fn.replace('\\[', '[').replace('\\]', ']'))
if text:
text = (text.replace('\\[', '[').replace('\\]', ']'))
return fn, sym, line or 0, col or 0, text
class PlainTasksSortByDate(PlainTasksBase):
def runCommand(self, edit):
if not re.search(r'(?su)%[Yy][-./ ]*%m[-./ ]*%d\s*%H.*%M', self.date_format):
# TODO: sort with dateutil so we wont depend on specific date_format
return
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if archive_pos:
have_date = r'(^\s*[^\n]*?\s\@(?:done|cancelled)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)'
all_tasks_prefixed_date = []
all_tasks = self.view.find_all(have_date, 0, u"\\2\\1", all_tasks_prefixed_date)
tasks_prefixed_date = []
tasks = []
for ind, task in enumerate(all_tasks):
if task.a > archive_pos.b:
tasks.append(task)
tasks_prefixed_date.append(all_tasks_prefixed_date[ind])
notes = []
for ind, task in enumerate(tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
notes.append(note)
tasks_prefixed_date[ind] += u'\n' + self.view.substr(note)
note_line = note.end() + 1
to_remove = tasks+notes
to_remove.sort()
for i in reversed(to_remove):
self.view.erase(edit, self.view.full_line(i))
tasks_prefixed_date.sort(reverse=self.view.settings().get('new_on_top', True))
eol = archive_pos.end()
for a in tasks_prefixed_date:
eol += self.view.insert(edit, eol, u'\n' + re.sub(r'^\([\d\w,\.:\-\/ ]*\)([^\b]*$)', u'\\1', a))
else:
sublime.status_message("Nothing to sort")
class PlainTasksRemoveBold(sublime_plugin.TextCommand):
def run(self, edit):
for s in reversed(list(self.view.sel())):
a, b = s.begin(), s.end()
for r in sublime.Region(b + 2, b), sublime.Region(a - 2, a):
self.view.erase(edit, r)
class PlainTasksStatsStatus(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
self.set_stats(view)
def on_post_save(self, view):
self.on_activated(view)
@staticmethod
def set_stats(view):
view.set_status('PlainTasks', PlainTasksStatsStatus.get_stats(view))
@staticmethod
def get_stats(view):
msgf = view.settings().get('stats_format', '$n/$a done ($percent%) $progress Last task @done $last')
special_interest = re.findall(r'{{.*?}}', msgf)
for i in special_interest:
matches = view.find_all(i.strip('{}'))
pend, done, canc = [], [], []
for t in matches:
# one task may contain same tag/word several times—we count amount of tasks, not tags
t = view.line(t).a
scope = view.scope_name(t)
if 'pending' in scope and t not in pend:
pend.append(t)
elif 'completed' in scope and t not in done:
done.append(t)
elif 'cancelled' in scope and t not in canc:
canc.append(t)
msgf = msgf.replace(i, '%d/%d/%d'%(len(pend), len(done), len(canc)))
ignore_archive = view.settings().get('stats_ignore_archive', False)
if ignore_archive:
archive_pos = view.find(view.settings().get('archive_name', 'Archive:'), 0, sublime.LITERAL)
pend = len([i for i in view.find_by_selector('meta.item.todo.pending') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
done = len([i for i in view.find_by_selector('meta.item.todo.completed') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
canc = len([i for i in view.find_by_selector('meta.item.todo.cancelled') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
else:
pend = len(view.find_by_selector('meta.item.todo.pending'))
done = len(view.find_by_selector('meta.item.todo.completed'))
canc = len(view.find_by_selector('meta.item.todo.cancelled'))
allt = pend + done + canc
percent = ((done+canc)/float(allt))*100 if allt else 0
factor = int(round(percent/10)) if percent<90 else int(percent/10)
barfull = view.settings().get('bar_full', u'■')
barempty = view.settings().get('bar_empty', u'□')
progress = '%s%s' % (barfull*factor, barempty*(10-factor)) if factor else ''
tasks_dates = []
view.find_all('(^\s*[^\n]*?\s\@(?:done)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)', 0, "\\2", tasks_dates)
date_format = view.settings().get('date_format', '(%y-%m-%d %H:%M)')
tasks_dates = [check_parentheses(date_format, t, is_date=True) for t in tasks_dates]
tasks_dates.sort(reverse=True)
last = tasks_dates[0] if tasks_dates else '(UNKNOWN)'
msg = (msgf.replace('$o', str(pend))
.replace('$d', str(done))
.replace('$c', str(canc))
.replace('$n', str(done+canc))
.replace('$a', str(allt))
.replace('$percent', str(int(percent)))
.replace('$progress', progress)
.replace('$last', last)
)
return msg
class PlainTasksCopyStats(sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.score_selector(0, "text.todo") > 0
def run(self, edit):
msg = self.view.get_status('PlainTasks')
replacements = self.view.settings().get('replace_stats_chars', [])
if replacements:
for o, r in replacements:
msg = msg.replace(o, r)
sublime.set_clipboard(msg)
class PlainTasksArchiveOrgCommand(PlainTasksBase):
def runCommand(self, edit):
# Archive the curent subtree to our archive file, not just completed tasks.
# For now, it's mapped to ctrl-shift-o or super-shift-o
# TODO: Mark any tasks found as complete, or maybe warn.
# Get our archive filename
archive_filename = self.__createArchiveFilename()
# Figure out our subtree
region = self.__findCurrentSubtree()
if region.empty():
# How can we get here?
sublime.error_message("Error:\n\nCould not find a tree to archive.")
return
# Write our region or our archive file
success = self.__writeArchive(archive_filename, region)
# only erase our region if the write was successful
if success:
self.view.erase(edit,region)
return
def __writeArchive(self, filename, region):
# Write out the given region
sublime.status_message(u'Archiving tree to {0}'.format(filename))
try:
# Have to use io.open because windows doesn't like writing
# utf8 to regular filehandles
with io.open(filename, 'a', encoding='utf8') as fh:
data = self.view.substr(region)
# Is there a way to read this in?
fh.write(u"--- ✄ -----------------------\n")
fh.write(u"Archived {0}:\n".format(tznow().strftime(
self.date_format)))
# And, finally, write our data
fh.write(u"{0}\n".format(data))
return True
except Exception as e:
sublime.error_message(u"Error:\n\nUnable to append to {0}\n{1}".format(
filename, str(e)))
return False
def __createArchiveFilename(self):
# Create our archive filename, from the mask in our settings.
# Split filename int dir, base, and extension, then apply our mask
path_base, extension = os.path.splitext(self.view.file_name())
dir = os.path.dirname(path_base)
base = os.path.basename(path_base)
sep = os.sep
# Now build our new filename
try:
# This could fail, if someone messed up the mask in the
# settings. So, if it did fail, use our default.
archive_filename = self.archive_org_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
except:
# Use our default mask
archive_filename = self.archive_org_default_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
# Display error, letting the user know
sublime.error_message(u"Error:\n\nInvalid filemask:{0}\nUsing default: {1}".format(
self.archive_org_filemask, self.archive_org_default_filemask))
return archive_filename
def __findCurrentSubtree(self):
# Return the region that starts at the cursor, or starts at
# the beginning of the selection
line = self.view.line(self.view.sel()[0].begin())
# Start finding the region at the beginning of the next line
region = self.view.indented_region(line.b + 2)
if region.contains(line.b):
# there is no subtree
return sublime.Region(-1, -1)
if not region.empty():
region = sublime.Region(line.a, region.b)
return region
class PlainTasksFoldToTags(PlainTasksFold):
TAG = r'(?u)@\w+'
def run(self, edit):
tag_sels = [s for s in list(self.view.sel()) if 'tag.todo' in self.view.scope_name(s.a)]
if not tag_sels:
sublime.status_message('Cursor(s) must be placed on tag(s)')
return
tags = self.extract_tags(tag_sels)
tasks = [self.view.line(f) for f in self.view.find_all(r'[ \t](%s)' % '|'.join(tags)) if 'pending' in self.view.scope_name(f.a)]
if not tasks:
sublime.status_message('Pending tasks with given tags are not found')
print(tags, tag_sels)
return
self.exec_folding(self.add_projects_and_notes(tasks))
def extract_tags(self, tag_sels):
tags = []
for s in tag_sels:
start = end = s.a
limit = self.view.size()
while all(self.view.substr(start) != c for c in '@ \n'):
start -= 1
if start == 0:
break
while all(self.view.substr(end) != c for c in '( @\n'):
end += 1
if end == limit:
break
match = re.match(self.TAG, self.view.substr(sublime.Region(start, end)))
tag = match.group(0) if match else False
if tag and tag not in tags:
tags.append(tag)
return tags
class PlainTasksAddGutterIconsForTags(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
view.erase_regions('critical')
view.erase_regions('high')
view.erase_regions('low')
view.erase_regions('today')
icon_critical = view.settings().get('icon_critical', '')
icon_high = view.settings().get('icon_high', '')
icon_low = view.settings().get('icon_low', '')
icon_today = view.settings().get('icon_today', '')
if not any((icon_critical, icon_high, icon_low, icon_today)):
return
critical = 'string.other.tag.todo.critical'
high = 'string.other.tag.todo.high'
low = 'string.other.tag.todo.low'
today = 'string.other.tag.todo.today'
r_critical = view.find_by_selector(critical)
r_high = view.find_by_selector(high)
r_low = view.find_by_selector(low)
r_today = view.find_by_selector(today)
if not any((r_critical, r_high, r_low, r_today)):
return
view.add_regions('critical', r_critical, critical, icon_critical, sublime.HIDDEN)
view.add_regions('high', r_high, high, icon_high, sublime.HIDDEN)
view.add_regions('low', r_low, low, icon_low, sublime.HIDDEN)
view.add_regions('today', r_today, today, icon_today, sublime.HIDDEN)
def on_post_save(self, view):
self.on_activated(view)
def on_load(self, view):
self.on_activated(view)
class PlainTasksHover(sublime_plugin.ViewEventListener):
'''Show popup with actions when hover over bullet'''
msg = ('<style>' # four curly braces because it will be modified with format method twice
'html {{{{background-color: color(var(--background) blenda(white 75%))}}}}'
'body {{{{margin: .1em .3em}}}}'
'p {{{{margin: .5em 0}}}}'
'a {{{{text-decoration: none}}}}'
'span.icon {{{{font-weight: bold; font-size: 1.3em}}}}'
'#icon-done {{{{color: var(--greenish)}}}}'
'#icon-cancel {{{{color: var(--redish)}}}}'
'#icon-archive {{{{color: var(--bluish)}}}}'
'#icon-outside {{{{color: var(--purplish)}}}}'
'#done {{{{color: var(--greenish)}}}}'
'#cancel {{{{color: var(--redish)}}}}'
'#archive {{{{color: var(--bluish)}}}}'
'#outside {{{{color: var(--purplish)}}}}'
'</style><body>'
'{actions}'
)
complete = '<a href="complete\v{point}"><span class="icon" id="icon-done">✔</span> <span id="done">Toggle complete</span></a>'
cancel = '<a href="cancel\v{point}"><span class="icon" id="icon-cancel">✘</span> <span id="cancel">Toggle cancel</span></a>'
archive = '<a href="archive\v{point}"><span class="icon" id="icon-archive">📚</span> <span id="archive">Archive</span></a>'
archivetofile = '<a href="tofile\v{point}"><span class="icon" id="icon-outside">📤</span> <span id="outside">Archive to file</span></a>'
actions = {
'text.todo meta.item.todo.pending': '<p>{complete}</p><p>{cancel}</p>'.format(complete=complete, cancel=cancel),
'text.todo meta.item.todo.completed': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete),
'text.todo meta.item.todo.cancelled': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p><p>{cancel}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete, cancel=cancel)
}
@classmethod
def is_applicable(cls, settings):
return settings.get('syntax') == 'Packages/PlainTasks/PlainTasks.sublime-syntax'
def on_hover(self, point, hover_zone):
self.view.hide_popup()
if hover_zone != sublime.HOVER_TEXT:
return
line = self.view.line(point)
line_scope_name = self.view.scope_name(line.a).strip()
if 'meta.item.todo' not in line_scope_name:
return
bullet = any(('bullet' in self.view.scope_name(p) for p in (point, point - 1)))
if not bullet:
return
width, height = self.view.viewport_extent()
self.view.show_popup(self.msg.format(actions=self.actions.get(line_scope_name)).format(point=point), 0, point or self.view.sel()[0].begin() or 1, width, height / 2, self.exec_action)
def exec_action(self, msg):
action, at = msg.split('\v')
case = {
'complete': lambda: self.view.run_command('plain_tasks_complete'),
'cancel': lambda: self.view.run_command('plain_tasks_cancel'),
'archive': lambda: self.view.run_command("plain_tasks_archive", {"partial": True}),
'tofile': lambda: self.view.run_command('plain_tasks_org_archive'),
}
self.view.sel().clear()
self.view.sel().add(sublime.Region(int(at)))
case[action]()
self.view.hide_popup()
class PlainTasksGotoTag(sublime_plugin.TextCommand):
def run(self, edit):
self.initial_viewport = self.view.viewport_position()
self.initial_sels = list(self.view.sel())
self.tags = sorted(
[r for r in self.view.find_by_selector('meta.tag.todo')
if not any(s in self.view.scope_name(r.a) for s in ('completed', 'cancelled'))
] +
self.view.find_by_selector('string.other.tag.todo.critical') +
self.view.find_by_selector('string.other.tag.todo.high') +
self.view.find_by_selector('string.other.tag.todo.low') +
self.view.find_by_selector('string.other.tag.todo.today')
)
window = self.view.window() or sublime.active_window()
items = [[self.view.substr(t), u'{0}: {1}'.format(self.view.rowcol(t.a)[0], self.view.substr(self.view.line(t)).strip())] for t in self.tags]
if ST3:
from bisect import bisect_left
# find the closest tag after current position of viewport, to avoid scrolling
closest_index = bisect_left([r.a for r in self.tags], self.view.layout_to_text(self.initial_viewport))
llen = len(self.tags)
selected_index = closest_index if closest_index < llen else llen - 1
window.show_quick_panel(items, self.on_done, 0, selected_index, self.on_highlighted)
else:
window.show_quick_panel(items, self.on_done)
def on_done(self, index):
if index < 0:
self.view.sel().clear()
self.view.sel().add_all(self.initial_sels)
self.view.set_viewport_position(self.initial_viewport)
return
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.tags[index].a))
self.view.show_at_center(self.tags[index])
def on_highlighted(self, index):
self.view.sel().clear()
self.view.sel().add(self.tags[index])
self.view.show(self.tags[index], True)
| 25,296 |
879 |
package com.bookstore.repository;
import com.bookstore.entity.Author;
import com.bookstore.entity.Book;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
@Repository
@Transactional(readOnly = true)
public interface BookRepository extends JpaRepository<Book, Long> {
Book findByTitle(String title);
@Transactional
@Modifying(flushAutomatically = true, clearAutomatically = true)
@Query("DELETE FROM Book b WHERE b.author.id=?1")
public int deleteByAuthorIdentifier(Long id);
@Transactional
@Modifying(flushAutomatically = true, clearAutomatically = true)
@Query("DELETE FROM Book b WHERE b.author.id IN ?1")
public int deleteBulkByAuthorIdentifier(List<Long> id);
@Transactional
@Modifying(flushAutomatically = true, clearAutomatically = true)
@Query("DELETE FROM Book b WHERE b.author IN ?1")
public int deleteBulkByAuthors(List<Author> authors);
}
| 392 |
796 |
<reponame>harmonyos-mirror/OpenArkCompiler-test
#!/usr/bin/python3
# -*- coding:utf-8 -*-
#
# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
#
# http://license.coscl.org.cn/MulanPSL
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v1 for more details.
#
import shlex
from functools import total_ordering
from maple_test.utils import PASS, EXEC_FLAG, ERRCHECK_FLAG, EXPECT_FLAG, DEPENDENCE_FLAG
from maple_test.utils import read_file
from maple_test.utils import split_comment, filter_line, filter_command_line
from maple_test.utils import FAIL, UNRESOLVED
class Case:
def __init__(self, path, test_path, comment):
if path != test_path:
self.name = str(path).replace(".", "_")
self.path = test_path / path
self.test_name = test_path.name
else:
self.name = "{}_{}".format(path.parent.name, path.name).replace(".", "_")
self.path = path
self.test_name = path.parent.name
self.test_path = test_path
self.relative_path = path
self.comment = comment
try:
_, comment_lines = split_comment(comment, read_file(self.path),)
except UnicodeDecodeError as e:
print(e)
self.commands = []
self.expect = []
self.dependence = {}
else:
self.commands = extract_commands(comment_lines)
self.expect = extract_expect(comment_lines)
self.dependence = extract_dependence(comment_lines)
def __repr__(self):
return str(self.relative_path)
def extract_expect(comment_lines):
expect_line = [filter_line(line, EXPECT_FLAG) for line in comment_lines]
expect_line = [line for line in expect_line if line]
if not expect_line:
return PASS
return expect_line[-1]
def extract_dependence(comment_lines):
support_separartor = ",; "
dependence = []
for line in comment_lines:
line = filter_line(line, DEPENDENCE_FLAG)
if not line:
continue
parser = shlex.shlex(line)
parser.whitespace += support_separartor
parser.whitespace_split = True
dependence += list(parser)
return set(dependence)
def extract_commands(comment_lines):
commands = []
flag = False
merge_command = ""
for command in comment_lines:
command = filter_command_line(command)
if not command:
continue
if command.strip()[-1] == "\\":
flag = True
merge_command += "{} ".format(command.strip()[:-1])
else:
if flag:
merge_command += "{} ".format(command)
flag = False
if merge_command == "":
commands.append(command)
else:
commands.append(merge_command)
merge_command = ""
if not commands and merge_command.strip():
commands.append(merge_command)
return commands
def read_list(path):
if not path.exists():
return {"*"}, {}
valid_lines, _ = split_comment("#", read_file(path))
include_flag = "[ALL-TEST-CASE]"
exclude_flag = "[EXCLUDE-TEST-CASE]"
case_list = set()
exclude_case_list = set()
is_exclude = False
for line in valid_lines:
if line.find(include_flag) != -1:
is_exclude = False
elif line.find(exclude_flag) != -1:
is_exclude = True
elif is_exclude:
exclude_case_list.add(line)
else:
case_list.add(line)
if not case_list:
case_list = {"*"}
return case_list, exclude_case_list
@total_ordering
class Result:
def __init__(self, case, task, cfg, status, commands, commands_result):
self.case = case
self.task = task
self.cfg = cfg
self.time = None
self.status = status
self.commands = commands
self.commands_result = commands_result
def gen_xml(self, root):
from xml.etree import ElementTree
case = ElementTree.SubElement(
root,
"testcase",
name=str(self.case),
classname="{}.{}".format(self.task, self.cfg),
)
if self.status == FAIL:
failure = ElementTree.SubElement(case, "failure")
if isinstance(self.commands_result, str):
failure.text = "Test case preparation failed, "
failure.text += self.commands_result
else:
failure.text = "List of commands:\n"
for cmd in self.commands:
failure.text += "EXEC: {}\n".format(cmd)
failure.text += "----\n"
failure.text += self.command_result_to_text(self.commands_result[-1])
elif self.status == UNRESOLVED:
skipped = ElementTree.SubElement(case, "skipped")
skipped.text = "No valid command statement was found."
def command_result_to_text(self, result):
text = "EXEC: {}\n".format(result.get("cmd"))
text += "Return code: {}\n".format(result.get("return_code"))
text += "Stdout: \n{}\n".format(result.get("stdout"))
text += "Stderr: \n{}\n".format(result.get("stderr"))
return text
def gen_json_result(self):
from collections import OrderedDict
result = OrderedDict()
result["name"] = "{}/{} :: {}".format(self.task, self.case, self.cfg)
result["result"] = self.status
result["commands"] = self.commands
result["output"] = self.commands_result
return result
def __lt__(self, other):
return self.case < other.case
| 2,685 |
12,940 |
{
"$schema": "https://aka.ms/azure-quickstart-templates-metadata-schema#",
"type": "QuickStart",
"itemDisplayName": "Create a Service Bus Topic with Subscription and SQL Filter",
"description": "This template creates a Service Bus Namespace and Topic with a Subscription using a SQL Filter expression to recieve only the messages that match the defined SQL Filter Expression.",
"summary": "This template creates a Service Bus Namespace and Topic with a Subscription that has a SQL Filter defined.",
"githubUsername": "crpietschmann",
"dateUpdated": "2021-05-11"
}
| 154 |
353 |
CSEXPORT IPluginManager& CSCONV Export_IPluginManager_Get()
{
return IPluginManager::Get();
}
CSEXPORT void CSCONV Export_IPluginManager_RefreshPluginsList(IPluginManager* instance)
{
instance->RefreshPluginsList();
}
CSEXPORT csbool CSCONV Export_IPluginManager_LoadModulesForEnabledPlugins(IPluginManager* instance, /*ELoadingPhase::Type*/int32 LoadingPhase)
{
return instance->LoadModulesForEnabledPlugins((ELoadingPhase::Type)LoadingPhase);
}
CSEXPORT void CSCONV Export_IPluginManager_GetLocalizationPathsForEnabledPlugins(IPluginManager* instance, TArray<FString>& OutLocResPaths)
{
instance->GetLocalizationPathsForEnabledPlugins(OutLocResPaths);
}
CSEXPORT csbool CSCONV Export_IPluginManager_AreRequiredPluginsAvailable(IPluginManager* instance)
{
return instance->AreRequiredPluginsAvailable();
}
#if !IS_MONOLITHIC
CSEXPORT csbool CSCONV Export_IPluginManager_CheckModuleCompatibility(IPluginManager* instance, TArray<FString>& OutIncompatibleModules)
{
return instance->CheckModuleCompatibility(OutIncompatibleModules);
}
#endif
CSEXPORT void CSCONV Export_IPluginManager_FindPlugin(IPluginManager* instance, const FString& Name, TSharedPtr<IPlugin>& result)
{
result = instance->FindPlugin(Name);
}
CSEXPORT void CSCONV Export_IPluginManager_GetEnabledPlugins(IPluginManager* instance, TArray<TSharedRef<IPlugin>>& result)
{
result = instance->GetEnabledPlugins();
}
CSEXPORT void CSCONV Export_IPluginManager_GetEnabledPluginsWithContent(IPluginManager* instance, TArray<TSharedRef<IPlugin>>& result)
{
result = instance->GetEnabledPluginsWithContent();
}
CSEXPORT void CSCONV Export_IPluginManager_GetDiscoveredPlugins(IPluginManager* instance, TArray<TSharedRef<IPlugin>>& result)
{
result = instance->GetDiscoveredPlugins();
}
CSEXPORT void CSCONV Export_IPluginManager_AddPluginSearchPath(IPluginManager* instance, const FString& ExtraDiscoveryPath, csbool bRefresh)
{
instance->AddPluginSearchPath(ExtraDiscoveryPath, (bool)bRefresh);
}
CSEXPORT void CSCONV Export_IPluginManager_GetPluginsWithPakFile(IPluginManager* instance, TArray<TSharedRef<IPlugin>>& result)
{
result = instance->GetPluginsWithPakFile();
}
CSEXPORT void CSCONV Export_IPluginManager_MountNewlyCreatedPlugin(IPluginManager* instance, const FString& PluginName)
{
instance->MountNewlyCreatedPlugin(PluginName);
}
CSEXPORT void CSCONV Export_IPluginManager(RegisterFunc registerFunc)
{
REGISTER_FUNC(Export_IPluginManager_Get);
REGISTER_FUNC(Export_IPluginManager_RefreshPluginsList);
REGISTER_FUNC(Export_IPluginManager_LoadModulesForEnabledPlugins);
REGISTER_FUNC(Export_IPluginManager_GetLocalizationPathsForEnabledPlugins);
REGISTER_FUNC(Export_IPluginManager_AreRequiredPluginsAvailable);
#if !IS_MONOLITHIC
REGISTER_FUNC(Export_IPluginManager_CheckModuleCompatibility);
#endif
REGISTER_FUNC(Export_IPluginManager_FindPlugin);
REGISTER_FUNC(Export_IPluginManager_GetEnabledPlugins);
REGISTER_FUNC(Export_IPluginManager_GetEnabledPluginsWithContent);
REGISTER_FUNC(Export_IPluginManager_GetDiscoveredPlugins);
REGISTER_FUNC(Export_IPluginManager_AddPluginSearchPath);
REGISTER_FUNC(Export_IPluginManager_GetPluginsWithPakFile);
REGISTER_FUNC(Export_IPluginManager_MountNewlyCreatedPlugin);
}
| 1,054 |
302 |
<reponame>vadkasevas/BAS
#ifndef WRITEFILE_H
#define WRITEFILE_H
#include <string>
void WriteStringToFile(const std::string& filename,const std::string& data);
#endif // WRITEFILE_H
| 68 |
5,454 |
<filename>source/.eslintrc.json
{
"extends": ["../.eslintrc.json", "plugin:node/recommended"],
"plugins": ["node"],
"parserOptions": {
"ecmaVersion": 2020,
"parser": "babel-eslint",
"sourceType": "module"
},
"rules": {
"no-control-regex": "off", // Control characters are used to split git messages
"no-process-exit": "off"
}
}
| 142 |
617 |
/**
* Copyright 2011-2013 FoundationDB, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* The original from which this derives bore the following: */
/*
Derby - Class org.apache.derby.impl.sql.compile.BinaryComparisonOperatorNode
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to you under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.bj58.sql.parser;
import com.bj58.sql.StandardException;
import com.bj58.sql.types.ValueClassName;
/**
* This node is the superclass for all binary comparison operators, such as =,
* <>, <, etc.
*
*/
public abstract class BinaryComparisonOperatorNode extends BinaryOperatorNode
{
private boolean forQueryRewrite;
/**
* Initializer for a BinaryComparisonOperatorNode
*
* @param leftOperand The left operand of the comparison
* @param rightOperand The right operand of the comparison
* @param operator The name of the operator
* @param methodName The name of the method to call in the generated class
*/
public void init(Object leftOperand,
Object rightOperand,
Object operator,
Object methodName) {
super.init(leftOperand, rightOperand, operator, methodName,
ValueClassName.DataValueDescriptor, ValueClassName.DataValueDescriptor);
}
/**
* Fill this node with a deep copy of the given node.
*/
public void copyFrom(QueryTreeNode node) throws StandardException {
super.copyFrom(node);
BinaryComparisonOperatorNode other = (BinaryComparisonOperatorNode)node;
this.forQueryRewrite = other.forQueryRewrite;
}
/**
* This node was generated as part of a query rewrite. Bypass the
* normal comparability checks.
* @param val true if this was for a query rewrite
*/
public void setForQueryRewrite(boolean val) {
forQueryRewrite=val;
}
/**
* Was this node generated in a query rewrite?
*
* @return true if it was generated in a query rewrite.
*/
public boolean isForQueryRewrite() {
return forQueryRewrite;
}
}
| 1,065 |
1,256 |
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef nsUTF8Prober_h__
#define nsUTF8Prober_h__
#include "nsCharSetProber.h"
#include "nsCodingStateMachine.h"
class nsUTF8Prober: public nsCharSetProber {
public:
nsUTF8Prober(){mNumOfMBChar = 0;
mCodingSM = new nsCodingStateMachine(&UTF8SMModel);
Reset(); }
virtual ~nsUTF8Prober(){delete mCodingSM;}
nsProbingState HandleData(const char* aBuf, uint32_t aLen);
const char* GetCharSetName() {return "UTF-8";}
nsProbingState GetState(void) {return mState;}
void Reset(void);
float GetConfidence(void);
protected:
nsCodingStateMachine* mCodingSM;
nsProbingState mState;
uint32_t mNumOfMBChar;
};
#endif /* nsUTF8Prober_h__ */
| 386 |
302 |
from builtins import map
import re
def split_name(name):
dot = name.rfind('.')
if dot >= 0:
return name[:dot], name[dot + 1:]
else:
return '', name
def get_package(name):
return split_name(name)[0]
_escape_re = re.compile('\W')
def escape(name):
return _escape_re.sub('_', name)
_id_re = re.compile(r'[^a-zA-Z0-9_]')
def escape_id(name):
return _id_re.sub('_', name)
def escape_package(name):
package = name.split('.')
return ".".join(map(escape_id, package))
def mangle_package(name):
package = name.split('.')
package = list(map(escape_id, package))
if package[0] == '_globals':
package.pop(0)
package = [''] + package
return "$".join(package)
from compiler.js.component import component_generator
from compiler.js.generator import generator
| 302 |
397 |
<gh_stars>100-1000
/**
* Copyright (c) <NAME> <Centrum Wiskunde & Informatica> and Contributors.
* All rights reserved.
*
* This file is licensed under the BSD 2-Clause License, which accompanies this project
* and is available under https://opensource.org/licenses/BSD-2-Clause.
*/
package io.usethesource.capsule.util.iterator;
import java.util.NoSuchElementException;
public class EmptySupplierIterator<K, V> implements SupplierIterator<K, V> {
private static final SupplierIterator EMPTY_ITERATOR = new EmptySupplierIterator();
public static <K, V> SupplierIterator<K, V> emptyIterator() {
return EMPTY_ITERATOR;
}
@Override
public boolean hasNext() {
return false;
}
@Override
public K next() {
throw new NoSuchElementException();
}
@Override
public V get() {
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 297 |
3,702 |
<reponame>mondeique/metatron-discovery
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package app.metatron.discovery.common;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collector;
import java.util.stream.Collectors;
/**
* Created by kyungtaak on 2016. 10. 30..
*/
public class CustomCollectors {
public static <T, K, U> Collector<T, ?, Map<K,U>> toLinkedMap(
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends U> valueMapper) {
return Collectors.toMap(keyMapper, valueMapper,
(u, v) -> {
throw new IllegalStateException(String.format("Duplicate key %s", u));
},
LinkedHashMap::new);
}
}
| 412 |
377 |
/*******************************************************************************
* * Copyright 2013 Impetus Infotech.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
******************************************************************************/
package com.impetus.client.cassandra.thrift.cql;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import javax.persistence.Query;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.impetus.client.cassandra.common.CassandraConstants;
import com.impetus.kundera.PersistenceProperties;
import com.impetus.kundera.client.cassandra.persistence.CassandraCli;
/**
* @author impadmin
*
*/
public class LoggingConfigurationTest
{
private EntityManagerFactory emf;
private EntityManager em;
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception
{
CassandraCli.cassandraSetUp();
Map propertyMap = new HashMap();
propertyMap.put(CassandraConstants.CQL_VERSION, CassandraConstants.CQL_VERSION_3_0);
propertyMap.put(PersistenceProperties.KUNDERA_DDL_AUTO_PREPARE, "create");
emf = Persistence.createEntityManagerFactory("twissandraTest", propertyMap);
em = emf.createEntityManager();
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception
{
em.close();
emf.close();
}
@Test
public void test()
{
LoggingConfiguration lc = new LoggingConfiguration();
lc.setId("lc1");
lc.setLable("one");
lc.setLongName("first lc");
LoggingConfiguration lc1 = new LoggingConfiguration();
lc1.setId("lc2");
lc1.setLable("two");
lc1.setLongName("second lc");
em.persist(lc);
em.persist(lc1);
em.clear();
em.createNativeQuery(
"insert into \"LoggingConfiguration\" (id,label,logname,nnow) values ('1','one','first lc','now')",
LoggingConfiguration.class).executeUpdate();
em.createNativeQuery(
"insert into \"LoggingConfiguration\" (id,label,logname,nnow) values ('2','two','second log','now')",
LoggingConfiguration.class).executeUpdate();
Query query = em.createQuery("SELECT lc FROM LoggingConfiguration lc WHERE lc.logname = :logname",
LoggingConfiguration.class);
query.setParameter("logname", "first lc");
List<LoggingConfiguration> matchingConfigurations = query.getResultList();
int count = 0;
Assert.assertNotNull(matchingConfigurations);
for (LoggingConfiguration configuration : matchingConfigurations)
{
if (configuration.getId().equals("1"))
{
Assert.assertNotNull(configuration.getNnow());
Assert.assertEquals("one", configuration.getLable());
Assert.assertEquals("first lc", configuration.getLongName());
count++;
}
else
{
Assert.assertNull(configuration.getNnow());
Assert.assertEquals("one", configuration.getLable());
Assert.assertEquals("lc1", configuration.getId());
Assert.assertEquals("first lc", configuration.getLongName());
count++;
}
}
Assert.assertEquals(2, count);
matchingConfigurations = em.createNativeQuery("SELECT * FROM \"LoggingConfiguration\"",
LoggingConfiguration.class).getResultList();
Assert.assertNotNull(matchingConfigurations);
for (LoggingConfiguration configuration : matchingConfigurations)
{
if (configuration.getId().equals("1"))
{
Assert.assertNotNull(configuration.getNnow());
Assert.assertEquals("one", configuration.getLable());
Assert.assertEquals("first lc", configuration.getLongName());
count++;
}
else if (configuration.getId().equals("2"))
{
Assert.assertNotNull(configuration.getNnow());
Assert.assertEquals("two", configuration.getLable());
Assert.assertEquals("second log", configuration.getLongName());
count++;
}
else if (configuration.getId().equals("lc1"))
{
Assert.assertNull(configuration.getNnow());
Assert.assertEquals("one", configuration.getLable());
Assert.assertEquals("lc1", configuration.getId());
Assert.assertEquals("first lc", configuration.getLongName());
count++;
}
else
{
Assert.assertNull(configuration.getNnow());
Assert.assertEquals("two", configuration.getLable());
Assert.assertEquals("lc2", configuration.getId());
Assert.assertEquals("second lc", configuration.getLongName());
count++;
}
}
Assert.assertEquals(6, count);
}
}
| 2,472 |
4,457 |
{
"main": "dist/trpc-server-adapters-express.cjs.js",
"module": "dist/trpc-server-adapters-express.esm.js"
}
| 49 |
1,178 |
<reponame>leozz37/makani
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for parsing and analyzing AVL files."""
import collections
import importlib
import logging
import sys
import warnings
import numpy
class AvlReader(object):
"""Parses and analyzes AVL files.
Attributes:
filename: Filename of the AVL file to be processed.
avl: Ordered dict that represents the parsed structure of the AVL file.
properties: Dict that represents properties of the aircraft,
surfaces, and surface sections. Its structure mimics that of
the AVL file itself.
"""
def __init__(self, filename):
"""Initializes the class by parsing the AVL file."""
self.filename = filename
with open(filename, 'r') as f:
self.avl = self.Parse(f.read())
self.properties = self.Analyze(self.avl)
def Analyze(self, avl):
"""Analyze properties of the AVL geometry.
Args:
avl: Ordered dict representing a parsed AVL file.
Returns:
Dict that represents properties of the aircraft, surfaces, and
surface sections. Its structure mimics that of the AVL file
itself.
"""
properties = dict()
properties['surfaces'] = []
for avl_surface in avl['surfaces']:
transform = self._GetSurfaceTransformation(avl_surface)
sections = []
for avl_section in avl_surface['sections']:
sections.append(self._CalcSectionProperties(avl_section, transform))
panels = []
for section1, section2 in zip(sections[0:-1], sections[1:]):
panels.append(self._CalcPanelProperties(section1, section2))
surface = self._CalcSurfaceProperties(sections, panels)
surface['name'] = avl_surface['name']
surface['sections'] = sections
surface['panels'] = panels
properties['surfaces'].append(surface)
return properties
def _CalcSectionProperties(self, avl_section, transform=lambda x: x):
"""Calculates the properties of sections, i.e. stations along the span."""
# Apply the scaling and offset parameters, if any, from the AVL
# file.
chord = avl_section['Chord'] * transform([0.0, 1.0, 0.0])[1]
leading_edge_avl = transform([avl_section['Xle'],
avl_section['Yle'],
avl_section['Zle']])
return {
'chord': chord,
'incidence': numpy.pi / 180.0 * avl_section['Ainc'],
'leading_edge_b': numpy.array([-leading_edge_avl[0],
leading_edge_avl[1],
-leading_edge_avl[2]]),
'quarter_chord_b': numpy.array([-leading_edge_avl[0] - chord / 4.0,
leading_edge_avl[1],
-leading_edge_avl[2]])
}
def _CalcPanelProperties(self, section1, section2):
"""Calculates properties of the areas between sections."""
span = numpy.sqrt(
(section2['leading_edge_b'][1] - section1['leading_edge_b'][1])**2.0 +
(section2['leading_edge_b'][2] - section1['leading_edge_b'][2])**2.0)
area = (section1['chord'] + section2['chord']) * span / 2.0
taper_ratio = section2['chord'] / section1['chord']
c = ((2.0 * section1['chord'] + section2['chord']) /
(section1['chord'] + section2['chord']) / 3.0)
mean_incidence = (c * section1['incidence'] +
(1.0 - c) * section2['incidence'])
aerodynamic_center_b = (c * section1['quarter_chord_b'] +
(1.0 - c) * section2['quarter_chord_b'])
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'mean_aerodynamic_chord': (2.0 / 3.0 * section1['chord'] *
(1.0 + taper_ratio + taper_ratio**2.0) /
(1.0 + taper_ratio)),
'mean_incidence': mean_incidence,
'taper_ratio': taper_ratio,
'span': span,
'standard_mean_chord': area / span
}
def _CalcSurfaceProperties(self, sections, panels):
"""Calculates properties of full surfaces."""
area = 0.0
aerodynamic_center_b = numpy.array([0.0, 0.0, 0.0])
mean_aerodynamic_chord = 0.0
mean_incidence = 0.0
for panel in panels:
area += panel['area']
aerodynamic_center_b += panel['area'] * panel['aerodynamic_center_b']
mean_aerodynamic_chord += panel['area'] * panel['mean_aerodynamic_chord']
mean_incidence += panel['area'] * panel['mean_incidence']
aerodynamic_center_b /= area
mean_aerodynamic_chord /= area
mean_incidence /= area
# Set the span vector from the leading edge of the first section
# to the leading edge of the last section. Ignore the x
# component. Choose the direction such that the span is along the
# surface coordinate y axis.
span_b = sections[0]['leading_edge_b'] - sections[-1]['leading_edge_b']
span_b[0] = 0.0
if abs(span_b[1]) > abs(span_b[2]):
if span_b[1] < 0.0:
span_b *= -1.0
else:
if span_b[2] < 0.0:
span_b *= -1.0
span = numpy.linalg.norm(span_b)
# Surface coordinates are defined such that they are aligned with
# body coordinates for horizontal surfaces and are rotated about
# body x such that surface z is aligned with the *negative* body y
# for vertical surfaces. The negative is required to match the
# convention in AVL.
surface_x_b = [1.0, 0.0, 0.0]
surface_y_b = span_b / span
surface_z_b = numpy.cross(surface_x_b, surface_y_b)
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'aspect_ratio': span * span / area,
'dcm_b2surface': numpy.array([surface_x_b, surface_y_b, surface_z_b]),
'mean_aerodynamic_chord': mean_aerodynamic_chord,
'mean_incidence': mean_incidence,
'span': span,
'standard_mean_chord': area / span
}
def _GetSurfaceTransformation(self, surface):
"""Returns surface scaling and offset transformation function."""
if all([k in surface for k in ['Xscale', 'Yscale', 'Zscale']]):
scale = [surface['Xscale'], surface['Yscale'], surface['Zscale']]
else:
scale = [1.0, 1.0, 1.0]
if all([k in surface for k in ['dX', 'dY', 'dZ']]):
offset = [surface['dX'], surface['dY'], surface['dZ']]
else:
offset = [0.0, 0.0, 0.0]
return lambda coord: [x * m + b for x, m, b in zip(coord, scale, offset)]
def PlotGeometry(self):
"""Plots 3-D line drawing of surfaces."""
# b/120081442: Next lines removed the module initialization load of the
# matplotlib module which was causing a bazel pip-installed package issue on
# batch sim workers.
pyplot = importlib.import_module('matplotlib.pyplot')
mplot_3d = importlib.import_module('mpl_toolkits.mplot3d')
# Importing Axes3D has the side effect of enabling 3D projections, but
# it is not directly used, so we remove it here.
del mplot_3d.Axes3D
axes = pyplot.figure().add_subplot(1, 1, 1, projection='3d')
axes.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_yaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_zaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_xaxis.gridlines.set_color(('blue'))
axes.w_yaxis.gridlines.set_color(('blue'))
axes.w_zaxis.gridlines.set_color(('blue'))
# The _axinfo update requires additional specification of linestyle and
# linewidth on our linux distributions in order to function properly.
axes.w_xaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_yaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_zaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.set_aspect('equal')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
half_span = self.avl['Bref'] / 2.0
axes.set_xlim((-half_span * 0.5, half_span * 1.5))
axes.set_ylim((-half_span, half_span))
axes.set_zlim((-half_span, half_span))
color_order = ['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue',
'violet', 'gray']
legend_plots = []
legend_labels = []
for i, surface in enumerate(self.avl['surfaces']):
transform = self._GetSurfaceTransformation(surface)
leading_edge_xs = []
leading_edge_ys = []
leading_edge_zs = []
trailing_edge_xs = []
trailing_edge_ys = []
trailing_edge_zs = []
for section in surface['sections']:
coord = transform([section['Xle'], section['Yle'], section['Zle']])
leading_edge_xs.append(coord[0])
leading_edge_ys.append(coord[1])
leading_edge_zs.append(coord[2])
coord = transform([section['Xle'] + section['Chord'],
section['Yle'],
section['Zle']])
trailing_edge_xs.append(coord[0])
trailing_edge_ys.append(coord[1])
trailing_edge_zs.append(coord[2])
xs = leading_edge_xs + list(reversed(trailing_edge_xs))
ys = leading_edge_ys + list(reversed(trailing_edge_ys))
zs = leading_edge_zs + list(reversed(trailing_edge_zs))
surface_line, = axes.plot(xs + [xs[0]], ys + [ys[0]], zs + [zs[0]],
color=color_order[i])
legend_plots.append(surface_line)
legend_labels.append(surface['name'])
# Plot symmetric surfaces.
if self.avl['iYsym']:
axes.plot(xs + [xs[0]], -numpy.array(ys + [ys[0]]), zs + [zs[0]], '--',
color=color_order[i])
elif 'Ydupl' in surface:
y_scale = surface['Yscale'] if 'Yscale' in surface else 1.0
axes.plot(xs + [xs[0]],
-numpy.array(ys + [ys[0]]) + 2.0 * surface['Ydupl'] * y_scale,
zs + [zs[0]], '--',
color=color_order[i])
axes.legend(legend_plots, legend_labels, loc='lower left',
prop={'size': 10})
pyplot.show()
def Parse(self, avl_file):
"""Parses AVL file.
Args:
avl_file: String of the read AVL file.
Returns:
Dictionary representing the information stored in the AVL file.
"""
# Make iterator over lines in file. Automatically, remove comments
# and blank lines. Terminate the file with an END keyword (this
# isn't mentioned in the AVL documentation, but at least one of the
# example files uses this convention and it makes the parsing more
# natural.
lines = iter([l.split('!', 1)[0].strip()
for l in avl_file.splitlines()
if l.strip() and l[0] not in '#!'] + ['END'])
# Parse the AVL header for information on the case name, reference
# areas, etc.
avl, line = self._ParseHeader(lines)
# Loop through the rest of the file, which should only be composed
# of surfaces and bodies.
while True:
tokens = line.split()
keyword = tokens[0][0:4]
if keyword == 'SURFACE'[0:4]:
surface, line = self._ParseSurface(lines)
avl.setdefault('surfaces', []).append(surface)
elif keyword == 'BODY':
body, line = self._ParseBody(lines)
avl.setdefault('body', []).append(body)
else:
if keyword != 'END':
logging.error('Encountered unexpected keyword: %s', tokens[0])
break
return avl
def _ParseHeader(self, lines):
"""Parses header information."""
header = collections.OrderedDict()
header['case'] = lines.next()
tokens = lines.next().split()
header['Mach'] = float(tokens[0])
tokens = lines.next().split()
header['iYsym'] = int(tokens[0])
header['iZsym'] = int(tokens[1])
header['Zsym'] = float(tokens[2])
tokens = lines.next().split()
header['Sref'] = float(tokens[0])
header['Cref'] = float(tokens[1])
header['Bref'] = float(tokens[2])
tokens = lines.next().split()
header['Xref'] = float(tokens[0])
header['Yref'] = float(tokens[1])
header['Zref'] = float(tokens[2])
line = lines.next()
try:
# CDp is optional.
header['CDp'] = float(line.split()[0])
line = lines.next()
except (IndexError, ValueError):
pass
return header, line
def _ParseAirfoil(self, lines):
"""Parses airfoil camber line definition."""
airfoil = [[]]
while True:
line = lines.next()
tokens = line.split()
try:
airfoil.append([float(tokens[0]), float(tokens[1])])
except (IndexError, ValueError):
break
return airfoil, line
def _ParseFilename(self, lines):
"""Parses filename of airfoil definition."""
line = lines.next()
# The file name may either be quoted or not.
if line[0] == '"':
filename = line.split()[0][1:-1]
else:
filename = line
return filename
def _ParseSection(self, lines):
"""Parses information describing cross-section of surface along span."""
section = collections.OrderedDict()
tokens = lines.next().split()
section['Xle'] = float(tokens[0])
section['Yle'] = float(tokens[1])
section['Zle'] = float(tokens[2])
section['Chord'] = float(tokens[3])
section['Ainc'] = float(tokens[4])
try:
# Nspan and Sspace are optional.
section['Nspan'] = int(tokens[5])
section['Sspace'] = float(tokens[6])
except (IndexError, ValueError):
pass
next_line = None
first_keyword = True
while True:
line = next_line if next_line else lines.next()
next_line = None
tokens = line.split()
keyword = tokens[0][0:4]
# Issue warnings if there is a suspicious ordering of the camber
# line keywords. According to the AVL documentation, the camber
# line keywords must immediately follow the data line of the
# SECTION keyword, and also later camber line keywords overwrite
# earlier ones.
if keyword in ['NACA', 'AIRFOIL'[0:4], 'AFILE'[0:4]]:
if not first_keyword:
logging.warning('%s did not immediately follow the data line of the '
'SECTION keyword.', tokens[0])
if any([k in section for k in ['naca', 'airfoil', 'afile']]):
logging.warning('Another camber line definition exists. This will '
'overwrite it.')
if keyword == 'NACA':
# Parse NACA camber line.
section['naca'] = int(lines.next().split()[0])
assert 0 <= section['naca'] and section['naca'] <= 9999
elif keyword == 'AIRFOIL'[0:4]:
# Parse airfoil coordinates.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['airfoil'], next_line = self._ParseAirfoil(lines)
elif keyword == 'AFILE'[0:4]:
# Parse airfoil filename.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['afile'] = self._ParseFilename(lines)
elif keyword == 'DESIGN'[0:4]:
# Parse design variable.
tokens = lines.next().split()
design = collections.OrderedDict()
design['DName'] = tokens[0]
try:
design['Wdes'] = float(tokens[1])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for Wdes.
logging.warning('Wdes value is missing for %s.', design['DName'])
section.setdefault('designs', []).append(design)
elif keyword == 'CONTROL'[0:4]:
# Parse control variable.
tokens = lines.next().split()
control = collections.OrderedDict()
control['name'] = tokens[0]
control['gain'] = float(tokens[1])
control['Xhinge'] = float(tokens[2])
control['XYZhvec'] = [float(tokens[3]),
float(tokens[4]),
float(tokens[5])]
try:
control['SgnDup'] = float(tokens[6])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for SgnDup.
logging.warning('SgnDup value is missing for %s.', control['name'])
section.setdefault('controls', []).append(control)
elif keyword == 'CLAF':
# Parse dCL/da scaling factor.
section['CLaf'] = float(lines.next().split()[0])
elif keyword == 'CDCL':
# Parse CD(CL) function parameters.
tokens = lines.next().split()
section['CL1'] = float(tokens[0])
section['CD1'] = float(tokens[1])
section['CL2'] = float(tokens[2])
section['CD2'] = float(tokens[3])
section['CL3'] = float(tokens[4])
section['CD3'] = float(tokens[5])
else:
break
first_keyword = False
return section, line
def _ParseSurface(self, lines):
"""Parses definition of a lifting surface."""
surface = collections.OrderedDict()
surface['name'] = lines.next()
tokens = lines.next().split()
surface['Nchord'] = int(tokens[0])
surface['Cspace'] = float(tokens[1])
try:
# Nspan and Sspace are optional.
surface['Nspan'] = int(tokens[2])
surface['Sspace'] = float(tokens[3])
except (IndexError, ValueError):
pass
next_line = None
while True:
line = next_line if next_line else lines.next()
next_line = None
keyword = line.split()[0][0:4]
if keyword in ['COMPONENT'[0:4], 'INDEX'[0:4]]:
# Parse component grouping.
surface['Lcomp'] = int(lines.next().split()[0])
elif keyword == 'YDUPLICATE'[0:4]:
# Parse duplicated surface y-plane.
surface['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse surface scaling.
tokens = lines.next().split()
surface['Xscale'] = float(tokens[0])
surface['Yscale'] = float(tokens[1])
surface['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse surface translation.
tokens = lines.next().split()
surface['dX'] = float(tokens[0])
surface['dY'] = float(tokens[1])
surface['dZ'] = float(tokens[2])
elif keyword == 'ANGLE'[0:4]:
# Parse surface incidence angle.
surface['dAinc'] = float(lines.next().split()[0])
elif keyword == 'NOWAKE'[0:4]:
surface['nowake'] = True
elif keyword == 'NOALBE'[0:4]:
surface['noalbe'] = True
elif keyword == 'NOLOAD'[0:4]:
surface['noload'] = True
elif keyword == 'SECTION'[0:4]:
# Parse airfoil section camber line along span.
section, next_line = self._ParseSection(lines)
surface.setdefault('sections', []).append(section)
else:
break
return surface, line
def _ParseBody(self, lines):
"""Parses description of non-lifting bodies shape."""
body = collections.OrderedDict()
body['name'] = lines.next()
tokens = lines.next().split()
body['Nbody'] = int(tokens[0])
body['Bspace'] = float(tokens[1])
while True:
line = lines.next()
keyword = line.split()[0][0:4]
if keyword == 'YDUPLICATE'[0:4]:
body['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse body scaling.
tokens = lines.next().split()
body['Xscale'] = float(tokens[0])
body['Yscale'] = float(tokens[1])
body['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse body translation.
tokens = lines.next().split()
body['dX'] = float(tokens[0])
body['dY'] = float(tokens[1])
body['dZ'] = float(tokens[2])
elif keyword == 'BFILE'[0:4]:
# Parse body shape filename.
body['bfile'] = self._ParseFilename(lines)
else:
break
return body, line
def main(argv):
# Internal matplotlib functions currently trigger the following
# warnings.
warnings.filterwarnings('ignore', 'elementwise comparison failed; returning '
'scalar instead, but in the future will perform '
'elementwise comparison')
warnings.filterwarnings('ignore', 'comparison to `None` will result in an '
'elementwise object comparison in the future.')
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO)
avl = AvlReader(argv[1])
avl.PlotGeometry()
logging.shutdown()
if __name__ == '__main__':
main(sys.argv)
| 9,536 |
2,461 |
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.kubernetes.discovery;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.cloud.client.ConditionalOnDiscoveryEnabled;
import org.springframework.cloud.client.ConditionalOnDiscoveryHealthIndicatorEnabled;
import org.springframework.cloud.client.ConditionalOnReactiveDiscoveryEnabled;
import org.springframework.cloud.client.discovery.DiscoveryClient;
import org.springframework.cloud.client.discovery.ReactiveDiscoveryClient;
import org.springframework.cloud.client.discovery.event.InstanceRegisteredEvent;
import org.springframework.cloud.client.discovery.health.DiscoveryClientHealthIndicatorProperties;
import org.springframework.cloud.client.discovery.health.reactive.ReactiveDiscoveryClientHealthIndicator;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.reactive.function.client.WebClient;
/**
* @author <NAME>
*/
@Configuration(proxyBeanMethods = false)
@ConditionalOnDiscoveryEnabled
@ConditionalOnProperty(value = { "spring.cloud.kubernetes.enabled", "spring.cloud.kubernetes.discovery.enabled" },
matchIfMissing = true)
@EnableConfigurationProperties({ DiscoveryClientHealthIndicatorProperties.class,
KubernetesDiscoveryClientProperties.class })
public class KubernetesDiscoveryClientAutoConfiguration {
@Configuration(proxyBeanMethods = false)
public static class Servlet {
@Bean
@ConditionalOnMissingClass("org.springframework.web.reactive.function.client.WebClient")
public RestTemplate restTemplate() {
return new RestTemplateBuilder().build();
}
@Bean
@ConditionalOnMissingClass("org.springframework.web.reactive.function.client.WebClient")
public DiscoveryClient kubernetesDiscoveryClient(RestTemplate restTemplate,
KubernetesDiscoveryClientProperties properties) {
return new KubernetesDiscoveryClient(restTemplate, properties);
}
@Bean
@ConditionalOnClass({ HealthIndicator.class })
@ConditionalOnDiscoveryHealthIndicatorEnabled
public InitializingBean indicatorInitializer(ApplicationEventPublisher applicationEventPublisher,
ApplicationContext applicationContext) {
return () -> applicationEventPublisher
.publishEvent(new InstanceRegisteredEvent<>(applicationContext.getId(), null));
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnReactiveDiscoveryEnabled
public static class Reactive {
@Bean
@ConditionalOnClass(name = { "org.springframework.web.reactive.function.client.WebClient" })
@ConditionalOnMissingBean(WebClient.Builder.class)
public WebClient.Builder webClientBuilder() {
return WebClient.builder();
}
@Bean
@ConditionalOnClass(name = { "org.springframework.web.reactive.function.client.WebClient" })
public ReactiveDiscoveryClient kubernetesReactiveDiscoveryClient(WebClient.Builder webClientBuilder,
KubernetesDiscoveryClientProperties properties) {
return new KubernetesReactiveDiscoveryClient(webClientBuilder, properties);
}
@Bean
@ConditionalOnClass(name = "org.springframework.boot.actuate.health.ReactiveHealthIndicator")
@ConditionalOnDiscoveryHealthIndicatorEnabled
public ReactiveDiscoveryClientHealthIndicator kubernetesReactiveDiscoveryClientHealthIndicator(
KubernetesReactiveDiscoveryClient client, DiscoveryClientHealthIndicatorProperties properties,
ApplicationContext applicationContext) {
ReactiveDiscoveryClientHealthIndicator healthIndicator = new ReactiveDiscoveryClientHealthIndicator(client,
properties);
InstanceRegisteredEvent event = new InstanceRegisteredEvent(applicationContext.getId(), null);
healthIndicator.onApplicationEvent(event);
return healthIndicator;
}
}
}
| 1,412 |
777 |
<filename>tools/cygprofile/cygprofile_utils_unittest.py
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import cygprofile_utils
class TestCygprofileUtils(unittest.TestCase):
def testInvertMapping(self):
inputMap = {'1': ['2', '3'],
'4': ['2', '5']}
self.assertEqual(cygprofile_utils.InvertMapping(inputMap),
{'2': ['1', '4'],
'3': ['1'],
'5': ['4']})
if __name__ == '__main__':
unittest.main()
| 251 |
1,025 |
<reponame>jeongjoonyoo/CodeXL
//==================================================================================
// Copyright (c) 2016 , Advanced Micro Devices, Inc. All rights reserved.
//
/// \author AMD Developer Tools Team
/// \file gwApplicationCommands.h
///
//==================================================================================
//------------------------------ gwApplicationCommands.h ---------------------------
#ifndef __GWGAPPLICATIONCOMMANDS_H
#define __GWGAPPLICATIONCOMMANDS_H
// AMDTGpuDebuggingComponents:
#include <AMDTGpuDebuggingComponents/Include/gdApplicationCommands.h>
// Local:
#include <AMDTGpuDebugging/Include/gwgDEBuggerAppWrapperDLLBuild.h>
// ----------------------------------------------------------------------------------
// Class Name: gwApplicationCommands : public gdApplicationCommands
// General Description: This class is handling application commands for CodeXL
// standalone application.
// The class contain only commands with different implementations
// in standalone and VS package, and that are used from somewhere
// else then the application menu
// Author: <NAME>
// Creation Date: 20/7/2011
// ----------------------------------------------------------------------------------
class GW_API gwApplicationCommands : public gdApplicationCommands
{
friend class gwgDEBuggerAppWrapper;
public:
virtual ~gwApplicationCommands();
// The following functions should be implemented for each inherited class of this class:
// Breakpoints:
virtual bool openBreakpointsDialog();
virtual bool isBreakpointsDialogCommandEnabled();
virtual void displayOpenCLProgramSourceCode(afApplicationTreeItemData* pProgramItemData);
virtual void displayOpenGLSLShaderCode(afApplicationTreeItemData* pShaderItemData);
virtual bool displayImageBufferObject(afApplicationTreeItemData* pItemData, const gtString& itemText);
// Accessors for the single instance view objects:
virtual gdPropertiesEventObserver* propertiesEventObserver();
virtual gdMemoryView* memoryView();
virtual gdStatisticsPanel* statisticsPanel();
virtual gdAPICallsHistoryPanel* callsHistoryPanel();
virtual gdCallStackView* callStackView();
virtual gdDebuggedProcessEventsView* debuggedProcessEventsView();
virtual gdStateVariablesView* stateVariablesView();
virtual gdCommandQueuesView* commandQueuesView();
virtual gdBreakpointsView* breakpointsView();
virtual gdWatchView* watchView();
virtual gdLocalsView* localsView();
// Update UI:
virtual void updateToolbarCommands();
virtual void updateToolbars();
// Raise view commands:
virtual bool raiseStatisticsView();
virtual bool raiseCommandQueuesView();
virtual bool raiseMemoryView();
protected:
// Do not allow the use of my constructor:
gwApplicationCommands();
};
#endif // __gwgApplicationCommands_H
| 902 |
348 |
<filename>docs/data/t2/076/76501.json
{"nom":"Pierrefiques","dpt":"Seine-Maritime","inscrits":108,"abs":22,"votants":86,"blancs":13,"nuls":4,"exp":69,"res":[{"panneau":"1","voix":44},{"panneau":"2","voix":25}]}
| 89 |
652 |
#include "Python.h"
#include "Python-ast.h"
#include "node.h"
#include "token.h"
#include "graminit.h"
#include "code.h"
#include "symtable.h"
#define UNDEFINED_FUTURE_FEATURE "future feature %.100s is not defined"
#define ERR_LATE_FUTURE \
"from __future__ imports must occur at the beginning of the file"
static int
future_check_features(PyFutureFeatures *ff, stmt_ty s, const char *filename)
{
int i;
asdl_seq *names;
assert(s->kind == ImportFrom_kind);
names = s->v.ImportFrom.names;
for (i = 0; i < asdl_seq_LEN(names); i++) {
alias_ty name = (alias_ty)asdl_seq_GET(names, i);
const char *feature = _PyUnicode_AsString(name->name);
if (!feature)
return 0;
if (strcmp(feature, FUTURE_NESTED_SCOPES) == 0) {
continue;
} else if (strcmp(feature, FUTURE_GENERATORS) == 0) {
continue;
} else if (strcmp(feature, FUTURE_DIVISION) == 0) {
continue;
} else if (strcmp(feature, FUTURE_ABSOLUTE_IMPORT) == 0) {
continue;
} else if (strcmp(feature, FUTURE_WITH_STATEMENT) == 0) {
continue;
} else if (strcmp(feature, FUTURE_PRINT_FUNCTION) == 0) {
continue;
} else if (strcmp(feature, FUTURE_UNICODE_LITERALS) == 0) {
continue;
} else if (strcmp(feature, FUTURE_BARRY_AS_BDFL) == 0) {
ff->ff_features |= CO_FUTURE_BARRY_AS_BDFL;
} else if (strcmp(feature, "braces") == 0) {
PyErr_SetString(PyExc_SyntaxError,
"not a chance");
PyErr_SyntaxLocationEx(filename, s->lineno, s->col_offset);
return 0;
} else {
PyErr_Format(PyExc_SyntaxError,
UNDEFINED_FUTURE_FEATURE, feature);
PyErr_SyntaxLocationEx(filename, s->lineno, s->col_offset);
return 0;
}
}
return 1;
}
static int
future_parse(PyFutureFeatures *ff, mod_ty mod, const char *filename)
{
int i, found_docstring = 0, done = 0, prev_line = 0;
if (!(mod->kind == Module_kind || mod->kind == Interactive_kind))
return 1;
/* A subsequent pass will detect future imports that don't
appear at the beginning of the file. There's one case,
however, that is easier to handle here: A series of imports
joined by semi-colons, where the first import is a future
statement but some subsequent import has the future form
but is preceded by a regular import.
*/
for (i = 0; i < asdl_seq_LEN(mod->v.Module.body); i++) {
stmt_ty s = (stmt_ty)asdl_seq_GET(mod->v.Module.body, i);
if (done && s->lineno > prev_line)
return 1;
prev_line = s->lineno;
/* The tests below will return from this function unless it is
still possible to find a future statement. The only things
that can precede a future statement are another future
statement and a doc string.
*/
if (s->kind == ImportFrom_kind) {
identifier modname = s->v.ImportFrom.module;
if (modname &&
!PyUnicode_CompareWithASCIIString(modname, "__future__")) {
if (done) {
PyErr_SetString(PyExc_SyntaxError,
ERR_LATE_FUTURE);
PyErr_SyntaxLocationEx(filename, s->lineno, s->col_offset);
return 0;
}
if (!future_check_features(ff, s, filename))
return 0;
ff->ff_lineno = s->lineno;
}
else
done = 1;
}
else if (s->kind == Expr_kind && !found_docstring) {
expr_ty e = s->v.Expr.value;
if (e->kind != Str_kind)
done = 1;
else
found_docstring = 1;
}
else
done = 1;
}
return 1;
}
PyFutureFeatures *
PyFuture_FromAST(mod_ty mod, const char *filename)
{
PyFutureFeatures *ff;
ff = (PyFutureFeatures *)PyObject_Malloc(sizeof(PyFutureFeatures));
if (ff == NULL) {
PyErr_NoMemory();
return NULL;
}
ff->ff_features = 0;
ff->ff_lineno = -1;
if (!future_parse(ff, mod, filename)) {
PyObject_Free(ff);
return NULL;
}
return ff;
}
| 2,156 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.