max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,755 | /*=========================================================================
Program: Visualization Toolkit
Module: vtkCollectionRange.h
Copyright (c) <NAME>, <NAME>, <NAME>
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#ifndef vtkCollectionRange_h
#define vtkCollectionRange_h
#ifndef __VTK_WRAP__
#include "vtkCollection.h"
#include "vtkMeta.h"
#include "vtkRange.h"
#include "vtkSmartPointer.h"
#include <cassert>
namespace vtk
{
namespace detail
{
template <typename CollectionType>
struct CollectionRange;
template <typename CollectionType>
struct CollectionIterator;
//------------------------------------------------------------------------------
// Detect vtkCollection types
template <typename T>
struct IsCollection : std::is_base_of<vtkCollection, T>
{
};
template <typename CollectionType, typename T = CollectionType>
using EnableIfIsCollection = typename std::enable_if<IsCollection<CollectionType>::value, T>::type;
//------------------------------------------------------------------------------
// Detect the type of items held by the collection by checking the return type
// of GetNextItem(), or GetNextItemAsObject() as a fallback.
template <typename CollectionType>
struct GetCollectionItemType
{
static_assert(IsCollection<CollectionType>::value, "Invalid vtkCollection subclass.");
private:
// The GetType methods are only used in a decltype context and are left
// unimplemented as we only care about their signatures. They are used to
// determine the type of object held by the collection.
//
// By passing literal 0 as the argument, the overload taking `int` is
// preferred and returns the same type as CollectionType::GetNextItem, which
// is usually the exact type held by the collection (e.g.
// vtkRendererCollection::GetNextItem returns vtkRenderer*).
//
// If the collection class does not define GetNextItem, SFINAE removes the
// preferred `int` overload, and the `...` overload is used instead. This
// method returns the same type as vtkCollection::GetNextItemAsObject, which
// is vtkObject*. This lets us define a more derived collection item type
// when possible, while falling back to the general vtkObject if a more
// refined type is not known.
// not implemented
template <typename T>
static auto GetType(...) -> decltype(std::declval<T>().GetNextItemAsObject());
// not implemented
template <typename T>
static auto GetType(int) -> decltype(std::declval<T>().GetNextItem());
using PointerType = decltype(GetType<CollectionType>(0));
public:
// Just use std::remove pointer, vtk::detail::StripPointer is overkill.
using Type = typename std::remove_pointer<PointerType>::type;
};
//------------------------------------------------------------------------------
// Collection iterator. Reference, value, and pointer types are all ItemType
// pointers, since:
// a) values: ItemType* instead of ItemType because vtkObjects can't be
// copied/assigned.
// b) references: No good usecase to change the pointers held by the collection
// by returning ItemType*&, nor would returning ItemType& be useful, since
// it'd have to be dereferenced anyway to pass it anywhere, and vtkObjects
// are conventionally held by address.
// c) pointers: Returning ItemType** from operator-> would be useless.
//
// There are no const_reference, etc, since VTK is not const correct and marking
// vtkObjects consts makes them unusable.
template <typename CollectionType>
struct CollectionIterator
: public std::iterator<std::forward_iterator_tag,
typename GetCollectionItemType<CollectionType>::Type*, int,
typename GetCollectionItemType<CollectionType>::Type*,
typename GetCollectionItemType<CollectionType>::Type*>
{
static_assert(IsCollection<CollectionType>::value, "Invalid vtkCollection subclass.");
private:
using ItemType = typename GetCollectionItemType<CollectionType>::Type;
using Superclass = std::iterator<std::forward_iterator_tag, ItemType*, int, ItemType*, ItemType*>;
public:
using iterator_category = typename Superclass::iterator_category;
using value_type = typename Superclass::value_type;
using difference_type = typename Superclass::difference_type;
using pointer = typename Superclass::pointer;
using reference = typename Superclass::reference;
CollectionIterator() noexcept
: Element(nullptr)
{
}
CollectionIterator(const CollectionIterator& o) noexcept = default;
CollectionIterator& operator=(const CollectionIterator& o) noexcept = default;
CollectionIterator& operator++() noexcept // prefix
{
this->Increment();
return *this;
}
CollectionIterator operator++(int) noexcept // postfix
{
auto elem = this->Element;
this->Increment();
return CollectionIterator{ elem };
}
reference operator*() const noexcept { return this->GetItem(); }
pointer operator->() const noexcept { return this->GetItem(); }
friend bool operator==(const CollectionIterator& lhs, const CollectionIterator& rhs) noexcept
{
return lhs.Element == rhs.Element;
}
friend bool operator!=(const CollectionIterator& lhs, const CollectionIterator& rhs) noexcept
{
return lhs.Element != rhs.Element;
}
friend void swap(CollectionIterator& lhs, CollectionIterator& rhs) noexcept
{
using std::swap;
swap(lhs.Element, rhs.Element);
}
friend struct CollectionRange<CollectionType>;
protected:
CollectionIterator(vtkCollectionElement* element) noexcept
: Element(element)
{
}
private:
void Increment() noexcept
{ // incrementing an invalid iterator is UB, no need to check for non-null.
this->Element = this->Element->Next;
}
ItemType* GetItem() const noexcept { return static_cast<ItemType*>(this->Element->Item); }
vtkCollectionElement* Element;
};
//------------------------------------------------------------------------------
// Collection range proxy.
// The const_iterators/references are the same as the non-const versions, since
// vtkObjects marked const are unusable.
template <typename CollectionType>
struct CollectionRange
{
static_assert(IsCollection<CollectionType>::value, "Invalid vtkCollection subclass.");
using ItemType = typename GetCollectionItemType<CollectionType>::Type;
// NOTE: The const items are the same as the mutable ones, since const
// vtkObjects are generally unusable.
using size_type = int; // int is used by the vtkCollection API.
using iterator = CollectionIterator<CollectionType>;
using const_iterator = CollectionIterator<CollectionType>;
using reference = ItemType*;
using const_reference = ItemType*;
using value_type = ItemType*;
CollectionRange(CollectionType* coll) noexcept
: Collection(coll)
{
assert(this->Collection);
}
CollectionType* GetCollection() const noexcept { return this->Collection; }
size_type size() const noexcept { return this->Collection->GetNumberOfItems(); }
iterator begin() const
{
vtkCollectionSimpleIterator cookie;
this->Collection->InitTraversal(cookie);
// The cookie is a linked list node pointer, vtkCollectionElement:
return iterator{ static_cast<vtkCollectionElement*>(cookie) };
}
iterator end() const { return iterator{ nullptr }; }
// Note: These return mutable objects because const vtkObject are unusable.
const_iterator cbegin() const
{
vtkCollectionSimpleIterator cookie;
this->Collection->InitTraversal(cookie);
// The cookie is a linked list node pointer, vtkCollectionElement:
return const_iterator{ static_cast<vtkCollectionElement*>(cookie) };
}
// Note: These return mutable objects because const vtkObjects are unusable.
const_iterator cend() const { return const_iterator{ nullptr }; }
private:
vtkSmartPointer<CollectionType> Collection;
};
}
} // end namespace vtk::detail
#endif // __VTK_WRAP__
#endif // vtkCollectionRange_h
// VTK-HeaderTest-Exclude: vtkCollectionRange.h
| 2,286 |
372 | <gh_stars>100-1000
//
// WalkinClient.h
// ISPDemo1
//
// Created by J_Knight_ on 2018/8/26.
// Copyright © 2018年 J_Knight_. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "RestaurantProtocol.h"
@interface WalkinClient : NSObject<RestaurantProtocol>
@end
| 108 |
649 | package net.serenitybdd.screenplay;
public class UnexpectedEnumValueException extends AssertionError {
public UnexpectedEnumValueException(String detailMessage) {
super(detailMessage);
}
}
| 66 |
852 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
// #include "Utilities/Configuration/interface/Architecture.h"
/*
* See header file for a description of this class.
*
* \author <NAME> - INFN Torino
*/
#include "MagneticField/Layers/interface/MagBSector.h"
#include "MagneticField/Layers/interface/MagBRod.h"
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include <iostream>
using namespace std;
MagBSector::MagBSector(vector<MagBRod*>& rods, Geom::Phi<float> phiMin) : theRods(rods), thePhiMin(phiMin) {}
MagBSector::~MagBSector() {
for (vector<MagBRod*>::const_iterator irod = theRods.begin(); irod != theRods.end(); ++irod) {
delete (*irod);
}
}
const MagVolume* MagBSector::findVolume(const GlobalPoint& gp, double tolerance) const {
const MagVolume* result = nullptr;
Geom::Phi<float> phi = gp.phi();
// FIXME : use a binfinder
for (vector<MagBRod*>::const_iterator irod = theRods.begin(); irod != theRods.end(); ++irod) {
LogTrace("MagGeometry") << " Trying rod at phi " << (*irod)->minPhi() << " " << phi << endl;
result = (*irod)->findVolume(gp, tolerance);
if (result != nullptr)
return result;
}
return nullptr;
}
| 454 |
364 | package rsc.publisher;
import org.junit.Test;
import rsc.test.TestSubscriber;
public class PublisherNextTest {
@Test(expected = NullPointerException.class)
public void source1Null() {
new PublisherNext<>(null);
}
@Test
public void normal() {
TestSubscriber<Integer> ts = new TestSubscriber<>();
new PublisherNext<>(new PublisherJust<>(1)).subscribe(ts);
ts.assertValue(1)
.assertNoError()
.assertComplete();
}
@Test
public void normalBackpressured() {
TestSubscriber<Integer> ts = new TestSubscriber<>(0);
new PublisherNext<>(new PublisherJust<>(1)).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(1);
ts.assertValue(1)
.assertNoError()
.assertComplete();
}
@Test
public void empty() {
TestSubscriber<Integer> ts = new TestSubscriber<>();
new PublisherNext<>(PublisherEmpty.<Integer>instance()).subscribe(ts);
ts.assertNoValues()
.assertComplete();
}
@Test
public void emptyDefault() {
TestSubscriber<Integer> ts = new TestSubscriber<>();
new PublisherNext<>(PublisherEmpty.<Integer>instance()).subscribe(ts);
ts.assertNoError()
.assertComplete();
}
@Test
public void multi() {
TestSubscriber<Integer> ts = new TestSubscriber<>();
new PublisherNext<>(new PublisherRange(1, 10)).subscribe(ts);
ts.assertValue(1)
.assertComplete();
}
@Test
public void multiBackpressured() {
TestSubscriber<Integer> ts = new TestSubscriber<>(0);
new PublisherNext<>(new PublisherRange(1, 10)).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(1);
ts.assertValue(1)
.assertComplete();
}
}
| 839 |
575 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for media/test/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _CheckTestDataReadmeUpdated(input_api, output_api):
"""
Checks to make sure the README.md file is updated when changing test files.
"""
test_data_dir = input_api.os_path.join('media', 'test', 'data')
readme_path = input_api.os_path.join('media', 'test', 'data', 'README.md')
test_files = []
readme_updated = False
errors = []
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
if input_api.os_path.dirname(local_path) == test_data_dir:
test_files.append(f)
if local_path == readme_path:
readme_updated = True
break
if test_files and not readme_updated:
errors.append(output_api.PresubmitPromptWarning(
'When updating files in ' + test_data_dir + ', please also update '
+ readme_path + ':', test_files))
return errors
def CheckChangeOnUpload(input_api, output_api):
return _CheckTestDataReadmeUpdated(input_api, output_api)
| 449 |
698 | <filename>src/test/python/test_autocloseable.py
import unittest
import jep
TestAutoCloseable = jep.findClass('jep.test.closeable.TestAutoCloseable')
class TestAutoCloseables(unittest.TestCase):
def test_with(self):
with TestAutoCloseable() as writer:
writer.write("abc")
self.assertFalse(writer.isClosed())
self.assertTrue(writer.isClosed())
def test_io_exception(self):
with TestAutoCloseable() as writer:
writer.write("abc")
with self.assertRaises(IOError):
writer.write("def")
def test_inner_exception(self):
try:
with TestAutoCloseable() as writer:
writer.write("abc")
from java.fake import ArrayList
writer.write("def")
except ImportError as exc:
pass
self.assertTrue(writer.isClosed())
| 416 |
1,679 | <filename>run_spec_tests.py
import os
import sys
import subprocess
def collect_wast(dir):
ret = []
for item in os.listdir(dir):
full_path = os.path.join(dir, item)
if os.path.isfile(full_path) and full_path.endswith(".wast"):
ret.append(full_path)
return ret
wast_files = collect_wast(sys.argv[1])
success_list = []
failure_list = []
for name in wast_files:
try:
json_name = name + ".json"
ret = subprocess.call(["wast2json", name, "-o", json_name ])
if ret != 0:
raise Exception("wast2json")
ret = subprocess.call(["./test_runner", json_name])
if ret != 0:
raise Exception("test_runner")
success_list.append(name)
except Exception as e:
print(e)
failure_list.append(name)
print("Successes:")
print(success_list)
print("Failures:")
print(failure_list)
num_successes = len(success_list)
num_failures = len(failure_list)
print("{} successes, {} failures".format(num_successes, num_failures))
| 447 |
652 | /*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.builder.search.condition;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* A {@link Condition} implementation that matches a field within an range of values.
*
* @author <NAME> {@literal <<EMAIL>>}
*/
public class DateRangeCondition extends Condition<DateRangeCondition> {
/** The name of the field to be matched. */
@JsonProperty("field")
final String field;
/** The lower accepted date. Maybe {@code null} meaning no lower limit. */
@JsonProperty("from")
Object from;
/** The upper accepted date. Maybe {@code null} meaning no upper limit. */
@JsonProperty("to")
Object to;
/** The spatial operation to be performed. */
@JsonProperty("operation")
String operation;
/**
* Returns a new {@link DateRangeCondition} with the specified field reference point.
*
* @param field the name of the field to be matched
*/
@JsonCreator
public DateRangeCondition(@JsonProperty("field") String field) {
this.field = field;
}
/**
* Sets the lower accepted date. Maybe {@code null} meaning no lower limit.
*
* @param from the lower accepted date, or {@code null} if there is no lower limit
* @return this with the specified lower accepted date
*/
public DateRangeCondition from(Object from) {
this.from = from;
return this;
}
/**
* Sets the upper accepted date. Maybe {@code null} meaning no upper limit.
*
* @param to the upper accepted date, or {@code null} if there is no upper limit
* @return this with the specified upper accepted date
*/
public DateRangeCondition to(Object to) {
this.to = to;
return this;
}
/**
* Sets the spatial operation to be performed. Possible values are {@code intersects}, {@code is_within} and {@code
* contains}. Defaults to {@code intersects}.
*
* @param operation the operation
* @return this with the specified operation
*/
public DateRangeCondition operation(String operation) {
this.operation = operation;
return this;
}
}
| 920 |
1,262 | <gh_stars>1000+
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.type.ArrayType;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Data type converter for Cassandra.
*
* @see <a href="http://cassandra.apache.org/doc/latest/cql/types.html">Cassandra Data Types</a>
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class CassandraTypeConverter implements ConnectorTypeConverter {
private static final Pattern TYPE_PATTERN = Pattern.compile("^\\s*?(\\w*)\\s*?(?:<\\s*?(.*)\\s*?>)?\\s*?$");
private static final int TYPE_GROUP = 1;
private static final int PARAM_GROUP = 2;
private static final Pattern MAP_PARAM_PATTERN = Pattern
.compile("^\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?,\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?$");
private static final int MAP_KEY_GROUP = 1;
private static final int MAP_VALUE_GROUP = 2;
private static final Pattern TUPLE_PARAM_PATTERN
= Pattern.compile("(?:(\\w[\\w\\s]+(?:<[\\w+,\\s]+>\\s*?)?),?\\s*?)");
private static final int TUPLE_GROUP = 1;
/**
* {@inheritDoc}
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String type) {
final Matcher matcher = TYPE_PATTERN.matcher(type.toLowerCase());
// TODO: Escape case from recursion may be needed to avoid potential infinite
if (matcher.matches()) {
final String cqlType = matcher.group(TYPE_GROUP);
switch (cqlType) {
case "ascii":
return BaseType.STRING;
case "bigint":
return BaseType.BIGINT;
case "blob":
return VarbinaryType.createVarbinaryType(Integer.MAX_VALUE);
case "boolean":
return BaseType.BOOLEAN;
case "counter":
return BaseType.BIGINT;
case "date":
return BaseType.DATE;
case "decimal":
return DecimalType.createDecimalType();
case "double":
return BaseType.DOUBLE;
case "float":
return BaseType.FLOAT;
case "frozen":
return this.toMetacatType(matcher.group(PARAM_GROUP));
case "int":
return BaseType.INT;
case "list":
// The possible null for the PARAM_GROUP should be handled on recursive call throwing exception
return new ArrayType(this.toMetacatType(matcher.group(PARAM_GROUP)));
case "map":
final Matcher mapMatcher = MAP_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP));
if (mapMatcher.matches()) {
return new MapType(
this.toMetacatType(mapMatcher.group(MAP_KEY_GROUP)),
this.toMetacatType(mapMatcher.group(MAP_VALUE_GROUP))
);
} else {
throw new IllegalArgumentException("Unable to parse map params " + matcher.group(PARAM_GROUP));
}
case "smallint":
return BaseType.SMALLINT;
case "text":
return BaseType.STRING;
case "time":
return BaseType.TIME;
case "timestamp":
return BaseType.TIMESTAMP;
case "tinyint":
return BaseType.TINYINT;
case "tuple":
if (matcher.group(PARAM_GROUP) == null) {
throw new IllegalArgumentException("Empty tuple param group. Unable to parse");
}
final Matcher tupleMatcher = TUPLE_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP));
final ImmutableList.Builder<RowType.RowField> tupleFields = ImmutableList.builder();
int rowFieldNumber = 0;
while (tupleMatcher.find()) {
tupleFields.add(
new RowType.RowField(
this.toMetacatType(tupleMatcher.group(TUPLE_GROUP)),
"field" + rowFieldNumber++
)
);
}
return new RowType(tupleFields.build());
case "varchar":
return BaseType.STRING;
case "varint":
return BaseType.INT;
case "inet":
case "set":
case "timeuuid":
case "uuid":
default:
log.info("Currently unsupported type {}, returning Unknown type", cqlType);
return BaseType.UNKNOWN;
}
} else {
throw new IllegalArgumentException("Unable to parse CQL type " + type);
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
if (!(type instanceof ArrayType)) {
throw new IllegalArgumentException("Expected an ArrayType and got " + type.getClass());
}
final ArrayType arrayType = (ArrayType) type;
return "list<" + this.getElementTypeString(arrayType.getElementType()) + ">";
case BIGINT:
return "bigint";
case BOOLEAN:
return "boolean";
case CHAR:
// TODO: Should we make this unsupported?
return "text";
case DATE:
return "date";
case DECIMAL:
return "decimal";
case DOUBLE:
return "double";
case FLOAT:
return "float";
case INT:
return "int";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Cassandra doesn't support intervals.");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Cassandra doesn't support intervals.");
case JSON:
throw new UnsupportedOperationException("Cassandra doesn't support JSON natively.");
case MAP:
if (!(type instanceof MapType)) {
throw new IllegalArgumentException("Was expecting MapType instead it is " + type.getClass());
}
final MapType mapType = (MapType) type;
final Type keyType = mapType.getKeyType();
final Type valueType = mapType.getValueType();
return "map<" + this.getElementTypeString(keyType) + ", " + this.getElementTypeString(valueType) + ">";
case ROW:
if (!(type instanceof RowType)) {
throw new IllegalArgumentException("Was expecting RowType instead it is " + type.getClass());
}
final RowType rowType = (RowType) type;
final StringBuilder tupleBuilder = new StringBuilder();
tupleBuilder.append("tuple<");
// Tuple fields don't need to be frozen
boolean putComma = false;
for (final RowType.RowField field : rowType.getFields()) {
if (putComma) {
tupleBuilder.append(", ");
} else {
putComma = true;
}
tupleBuilder.append(this.fromMetacatType(field.getType()));
}
tupleBuilder.append(">");
return tupleBuilder.toString();
case SMALLINT:
return "smallint";
case STRING:
return "text";
case TIME:
return "time";
case TIME_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Cassandra doesn't support time with timezone");
case TIMESTAMP:
return "timestamp";
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Cassandra doesn't support time with timezone");
case TINYINT:
return "tinyint";
case UNKNOWN:
throw new UnsupportedOperationException("Cassandra doesn't support an unknown type");
case VARBINARY:
return "blob";
case VARCHAR:
return "text";
default:
throw new IllegalArgumentException("Unknown type: " + type.getTypeSignature().getBase());
}
}
private String getElementTypeString(final Type elementType) {
// Nested collections must have
if (elementType instanceof MapType || elementType instanceof ArrayType) {
return "frozen " + this.fromMetacatType(elementType);
} else {
return this.fromMetacatType(elementType);
}
}
}
| 5,150 |
6,201 | {
"recommendations": [
"codezombiech.gitignore",
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"mikestead.dotenv",
"ms-azuretools.vscode-docker",
"ms-vscode.vscode-typescript-tslint-plugin",
"msjsdiag.debugger-for-chrome",
"prisma.prisma",
"graphql.vscode-graphql",
"redhat.vscode-yaml",
"streetsidesoftware.code-spell-checker",
"hashicorp.terraform",
"sleistner.vscode-fileutils"
]
}
| 221 |
1,768 | <gh_stars>1000+
package org.lamport.tla.toolbox.editor.basic.pcal;
import org.eclipse.jface.text.rules.IWordDetector;
import org.lamport.tla.toolbox.editor.basic.tla.TLAWordDetector;
public class PCalWordDetector extends TLAWordDetector implements IWordDetector {
/* (non-Javadoc)
* @see org.lamport.tla.toolbox.editor.basic.tla.TLAWordDetector#isWordPart(char)
*/
@Override
public boolean isWordPart(final char character) {
if (character == ':' || character == '=') {
// Detect assignment ":=" as word.
return true;
} else if (character == '|') {
// Detect multi-assignment "||" as word.
return true;
}
return super.isWordPart(character);
}
}
| 254 |
402 | <gh_stars>100-1000
/*
* Copyright (c) 2016, <NAME>, ASL, ETH Zurich, Switzerland
* Copyright (c) 2016, <NAME>, ASL, ETH Zurich, Switzerland
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAV_VISUALIZATION_HELPERS_H_
#define MAV_VISUALIZATION_HELPERS_H_
#include <Eigen/Eigenvalues>
#include <eigen_conversions/eigen_msg.h>
#include <std_msgs/ColorRGBA.h>
#include <visualization_msgs/MarkerArray.h>
namespace mav_visualization {
class Color : public std_msgs::ColorRGBA {
public:
Color() : std_msgs::ColorRGBA() {}
Color(double red, double green, double blue) : Color(red, green, blue, 1.0) {}
Color(double red, double green, double blue, double alpha) : Color() {
r = red;
g = green;
b = blue;
a = alpha;
}
static const Color White() { return Color(1.0, 1.0, 1.0); }
static const Color Black() { return Color(0.0, 0.0, 0.0); }
static const Color Gray() { return Color(0.5, 0.5, 0.5); }
static const Color Red() { return Color(1.0, 0.0, 0.0); }
static const Color Green() { return Color(0.0, 1.0, 0.0); }
static const Color Blue() { return Color(0.0, 0.0, 1.0); }
static const Color Yellow() { return Color(1.0, 1.0, 0.0); }
static const Color Orange() { return Color(1.0, 0.5, 0.0); }
static const Color Purple() { return Color(0.5, 0.0, 1.0); }
static const Color Chartreuse() { return Color(0.5, 1.0, 0.0); }
static const Color Teal() { return Color(0.0, 1.0, 1.0); }
static const Color Pink() { return Color(1.0, 0.0, 0.5); }
};
/// helper function to create a geometry_msgs::Point
inline geometry_msgs::Point createPoint(double x, double y, double z) {
geometry_msgs::Point p;
p.x = x;
p.y = y;
p.z = z;
return p;
}
// Draws a covariance ellipsoid
// Input: mu = static 3 element vector, specifying the ellipsoid center
// Input: cov = static 3x3 covariance matrix
// Input: color = RGBA color of the ellipsoid
// Input: n_sigma = confidence area / scale of the ellipsoid
// Output: marker = The marker in which the ellipsoid should be drawn
inline void drawCovariance3D(const Eigen::Vector3d& mu,
const Eigen::Matrix3d& cov,
const std_msgs::ColorRGBA& color, double n_sigma,
visualization_msgs::Marker* marker) {
// TODO(helenol): What does this do???? Does anyone know?
const Eigen::Matrix3d changed_covariance = (cov + cov.transpose()) * 0.5;
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> solver(
changed_covariance, Eigen::ComputeEigenvectors);
Eigen::Matrix3d V = solver.eigenvectors();
// make sure it's a rotation matrix
V.col(2) = V.col(0).cross(V.col(1));
const Eigen::Vector3d sigma = solver.eigenvalues().cwiseSqrt() * n_sigma;
tf::pointEigenToMsg(mu, marker->pose.position);
tf::quaternionEigenToMsg(Eigen::Quaterniond(V), marker->pose.orientation);
tf::vectorEigenToMsg(sigma * 2.0, marker->scale); // diameter, not half axis
marker->type = visualization_msgs::Marker::SPHERE;
marker->color = color;
marker->action = visualization_msgs::Marker::ADD;
}
inline void drawAxes(const Eigen::Vector3d& p, const Eigen::Quaterniond& q,
double scale, double line_width,
visualization_msgs::Marker* marker) {
marker->colors.resize(6);
marker->points.resize(6);
marker->points[0] = createPoint(0, 0, 0);
marker->points[1] = createPoint(1 * scale, 0, 0);
marker->points[2] = createPoint(0, 0, 0);
marker->points[3] = createPoint(0, 1 * scale, 0);
marker->points[4] = createPoint(0, 0, 0);
marker->points[5] = createPoint(0, 0, 1 * scale);
marker->color = Color::Black();
marker->colors[0] = Color::Red();
marker->colors[1] = Color::Red();
marker->colors[2] = Color::Green();
marker->colors[3] = Color::Green();
marker->colors[4] = Color::Blue();
marker->colors[5] = Color::Blue();
marker->scale.x = line_width; // rest is unused
marker->type = visualization_msgs::Marker::LINE_LIST;
marker->action = visualization_msgs::Marker::ADD;
tf::pointEigenToMsg(p, marker->pose.position);
tf::quaternionEigenToMsg(q, marker->pose.orientation);
}
inline void drawArrowPositionOrientation(const Eigen::Vector3d& p,
const Eigen::Quaterniond& q,
const std_msgs::ColorRGBA& color,
double length, double diameter,
visualization_msgs::Marker* marker) {
marker->type = visualization_msgs::Marker::ARROW;
marker->action = visualization_msgs::Marker::ADD;
marker->color = color;
tf::pointEigenToMsg(p, marker->pose.position);
tf::quaternionEigenToMsg(q, marker->pose.orientation);
marker->scale.x = length;
marker->scale.y = diameter;
marker->scale.z = diameter;
}
inline void drawArrowPoints(const Eigen::Vector3d& p1,
const Eigen::Vector3d& p2,
const std_msgs::ColorRGBA& color, double diameter,
visualization_msgs::Marker* marker) {
marker->type = visualization_msgs::Marker::ARROW;
marker->action = visualization_msgs::Marker::ADD;
marker->color = color;
marker->points.resize(2);
tf::pointEigenToMsg(p1, marker->points[0]);
tf::pointEigenToMsg(p2, marker->points[1]);
marker->scale.x = diameter * 0.1;
marker->scale.y = diameter * 2 * 0.1;
marker->scale.z = 0;
}
inline void drawAxesArrows(const Eigen::Vector3d& p,
const Eigen::Quaterniond& q, double scale,
double diameter,
visualization_msgs::MarkerArray* marker_array) {
marker_array->markers.resize(3);
Eigen::Vector3d origin;
origin.setZero();
drawArrowPoints(origin + p, q * Eigen::Vector3d::UnitX() * scale + p,
Color::Red(), diameter, &marker_array->markers[0]);
drawArrowPoints(origin + p, q * Eigen::Vector3d::UnitY() * scale + p,
Color::Green(), diameter, &marker_array->markers[1]);
drawArrowPoints(origin + p, q * Eigen::Vector3d::UnitZ() * scale + p,
Color::Blue(), diameter, &marker_array->markers[2]);
}
} // namespace mav_visualization
#endif // MAV_VISUALIZATION_HELPERS_H_
| 2,730 |
1,259 | <reponame>isobelLiu/appium<filename>sample-code/examples/java/junit/src/test/java/com/saucelabs/appium/page_object/PageObjectWithCustomizedTimeOuts.java<gh_stars>1000+
package com.saucelabs.appium.page_object;
import io.appium.java_client.MobileElement;
import io.appium.java_client.pagefactory.WithTimeout;
import org.openqa.selenium.support.FindBy;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class PageObjectWithCustomizedTimeOuts {
/**
* Page Object best practice is to describe interactions with target
* elements by methods. This methods describe business logic of the page/screen.
* Here lazy instantiated elements are public.
* It was done so just for obviousness
*/
@FindBy(className = "OneClassWhichDoesNotExist")
public List<MobileElement> stubElements;
/*Any timeout of the waiting for element/list of elements
can be customized if the general time duration is
not suitable. E.g. the element is rendered for a long time
or the element is used just for instant checkings/assertions
*/
@WithTimeout(time = 5, unit = TimeUnit.SECONDS)
@FindBy(className = "OneAnotherClassWhichDoesNotExist")
public List<MobileElement> stubElements2;
}
| 410 |
12,004 | # Copyright 2016-2020, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pulumi._types import resource_types
import pulumi
class Resource1(pulumi.Resource):
pass
class Resource2(pulumi.Resource):
foo: pulumi.Output[str]
class Resource3(pulumi.Resource):
nested: pulumi.Output['Nested']
class Resource4(pulumi.Resource):
nested_value: pulumi.Output['Nested'] = pulumi.property("nestedValue")
class Resource5(pulumi.Resource):
@property
@pulumi.getter
def foo(self) -> pulumi.Output[str]:
...
class Resource6(pulumi.Resource):
@property
@pulumi.getter
def nested(self) -> pulumi.Output['Nested']:
...
class Resource7(pulumi.Resource):
@property
@pulumi.getter(name="nestedValue")
def nested_value(self) -> pulumi.Output['Nested']:
...
class Resource8(pulumi.Resource):
foo: pulumi.Output
class Resource9(pulumi.Resource):
@property
@pulumi.getter
def foo(self) -> pulumi.Output:
...
class Resource10(pulumi.Resource):
foo: str
class Resource11(pulumi.Resource):
@property
@pulumi.getter
def foo(self) -> str:
...
class Resource12(pulumi.Resource):
@property
@pulumi.getter
def foo(self):
...
@pulumi.output_type
class Nested:
first: str
second: str
class ResourceTypesTests(unittest.TestCase):
def test_resource_types(self):
self.assertEqual({}, resource_types(Resource1))
self.assertEqual({"foo": str}, resource_types(Resource2))
self.assertEqual({"nested": Nested}, resource_types(Resource3))
self.assertEqual({"nestedValue": Nested}, resource_types(Resource4))
self.assertEqual({"foo": str}, resource_types(Resource5))
self.assertEqual({"nested": Nested}, resource_types(Resource6))
self.assertEqual({"nestedValue": Nested}, resource_types(Resource7))
# Non-generic Output excluded from types.
self.assertEqual({}, resource_types(Resource8))
self.assertEqual({}, resource_types(Resource9))
# Type annotations not using Output.
self.assertEqual({"foo": str}, resource_types(Resource10))
self.assertEqual({"foo": str}, resource_types(Resource11))
# No return type annotation from the property getter.
self.assertEqual({}, resource_types(Resource12))
| 1,063 |
317 | /*!
* Copyright (c) 2016 by Contributors
* \file op_suppl.h
* \brief A supplement and amendment of the operators from op.h
* \author <NAME>, zhubuntu
*/
#ifndef OP_SUPPL_H
#define OP_SUPPL_H
#include <string>
#include <vector>
#include "base.h"
#include "shape.h"
#include "operator.h"
#include "MxNetCpp.h"
namespace mxnet {
namespace cpp {
inline Symbol _Plus(Symbol lhs, Symbol rhs) {
return Operator("_Plus")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Mul(Symbol lhs, Symbol rhs) {
return Operator("_Mul")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Minus(Symbol lhs, Symbol rhs) {
return Operator("_Minus")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Div(Symbol lhs, Symbol rhs) {
return Operator("_Div")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Power(Symbol lhs, Symbol rhs) {
return Operator("_Power")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Maximum(Symbol lhs, Symbol rhs) {
return Operator("_Maximum")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _Minimum(Symbol lhs, Symbol rhs) {
return Operator("_Minimum")
.SetInput("lhs", lhs)
.SetInput("rhs", rhs)
.CreateSymbol();
}
inline Symbol _PlusScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_PlusScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _MinusScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_MinusScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _MulScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_MulScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _DivScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_DivScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _PowerScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_PowerScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _MaximumScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_MaximumScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
inline Symbol _MinimumScalar(Symbol lhs, mx_float scalar, bool scalar_on_left) {
return Operator("_MinimumScalar")
.SetParam("scalar", scalar)
.SetParam("scalar_on_left", scalar_on_left)
.SetInput("lhs", lhs)
.CreateSymbol();
}
// TODO(zhangcheng-qinyinghua)
// make crop function run in op.h
// This function is due to [zhubuntu](https://github.com/zhubuntu)
inline Symbol Crop(const std::string& symbol_name,
int num_args,
Symbol data,
Symbol crop_like,
Shape offset = Shape(0, 0),
Shape h_w = Shape(0, 0),
bool center_crop = false) {
return Operator("Crop")
.SetParam("num_args", num_args)
.SetParam("offset", offset)
.SetParam("h_w", h_w)
.SetParam("center_crop", center_crop)
.SetInput("arg0", data)
.SetInput("arg1", crop_like)
.CreateSymbol(symbol_name);
}
/*!
* \breif Slice input equally along specified axis.
* \param symbol_name name of the resulting symbol.
* \param data input symbol.
* \param num_outputs Number of outputs to be sliced.
* \param axis Dimension along which to slice.
* \param squeeze_axis If true AND the sliced dimension becomes 1, squeeze that dimension.
* \return new symbol
*/
inline Symbol SliceChannel(const std::string& symbol_name,
Symbol data,
int num_outputs,
int axis = 1,
bool squeeze_axis = false) {
return Operator("SliceChannel")
.SetParam("num_outputs", num_outputs)
.SetParam("axis", axis)
.SetParam("squeeze_axis", squeeze_axis) (data)
.CreateSymbol(symbol_name);
}
inline Symbol ConvolutionNoBias(const std::string& symbol_name,
Symbol data,
Symbol weight,
Shape kernel,
int num_filter,
Shape stride = Shape(1, 1),
Shape dilate = Shape(1, 1),
Shape pad = Shape(0, 0),
int num_group = 1,
int64_t workspace = 512) {
return Operator("Convolution")
.SetParam("kernel", kernel)
.SetParam("num_filter", num_filter)
.SetParam("stride", stride)
.SetParam("dilate", dilate)
.SetParam("pad", pad)
.SetParam("num_group", num_group)
.SetParam("workspace", workspace)
.SetParam("no_bias", true)
.SetInput("data", data)
.SetInput("weight", weight)
.CreateSymbol(symbol_name);
}
} // namespace cpp
} // namespace mxnet
#endif /* end of include guard: OP_SUPPL_H */
| 2,803 |
585 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestTermQParserPlugin extends SolrTestCaseJ4 {
@BeforeClass
public static void beforeClass() throws Exception {
initCore("solrconfig.xml", "schema.xml");
assertU(adoc("id","1", "author_s1", "<NAME>", "t_title", "The Magicians", "cat_s", "fantasy", "pubyear_i", "2009"));
assertU(adoc("id", "2", "author_s1", "<NAME>", "t_title", "The Eye of the World", "cat_s", "fantasy", "cat_s", "childrens", "pubyear_i", "1990"));
assertU(adoc("id", "3", "author_s1", "<NAME>", "t_title", "The Great Hunt", "cat_s", "fantasy", "cat_s", "childrens", "pubyear_i", "1990"));
assertU(adoc("id", "4", "author_s1", "<NAME>", "t_title", "The Fifth Season", "cat_s", "fantasy", "pubyear_i", "2015"));
assertU(commit());
assertU(adoc("id", "5", "author_s1", "<NAME>", "t_title", "The Dispossessed", "cat_s", "scifi", "pubyear_i", "1974"));
assertU(adoc("id", "6", "author_s1", "<NAME>", "t_title", "The Left Hand of Darkness", "cat_s", "scifi", "pubyear_i", "1969"));
assertU(adoc("id", "7", "author_s1", "<NAME>", "t_title", "Foundation", "cat_s", "scifi", "pubyear_i", "1951"));
assertU(commit());
}
@Test
public void testTextTermsQuery() {
// Single term value
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "{!term f=t_title}left");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=1]",
"//result/doc[1]/str[@name='id'][.='6']"
);
// Single term value
params = new ModifiableSolrParams();
params.add("q", "{!term f=t_title}the");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=0]");
}
@Test
public void testMissingField() {
assertQEx("Expecting bad request", "Missing field to query", req("q", "{!term}childrens"), SolrException.ErrorCode.BAD_REQUEST);
}
@Test
public void testTermsMethodEquivalency() {
// Single-valued field
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q","{!term f=author_s1}<NAME>");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=2]",
"//result/doc[1]/str[@name='id'][.='2']",
"//result/doc[2]/str[@name='id'][.='3']"
);
// Multi-valued field
params = new ModifiableSolrParams();
params.add("q", "{!term f=cat_s}childrens");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=2]",
"//result/doc[1]/str[@name='id'][.='2']",
"//result/doc[2]/str[@name='id'][.='3']"
);
// Numeric field
params = new ModifiableSolrParams();
params.add("q", "{!term f=pubyear_i}2009");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=1]", "//result/doc[1]/str[@name='id'][.='1']");
// Numeric field
params = new ModifiableSolrParams();
params.add("q", "{!term f=pubyear_i}2009");
params.add("sort", "id asc");
assertQ(req(params, "indent", "on"), "*[count(//doc)=1]", "//result/doc[1]/str[@name='id'][.='1']");
}
}
| 1,605 |
1,405 | package com.tencent.tmsecure.module.aresengine;
public abstract class OutgoingSmsFiter extends DataFilter<SmsEntity> {
public static final int REMOVE_PRIVATE_SMS = 1;
public abstract void setEntityConvertor(IEntityConverter iEntityConverter);
public abstract void setPrivateListDao(IContactDao<? extends ContactEntity> iContactDao);
public abstract void setPrivateSmsDao(ISmsDao<? extends SmsEntity> iSmsDao);
public abstract void setSystDao(AbsSysDao absSysDao);
}
| 163 |
777 | /*
* Copyright (C) 2004, 2005, 2006 Apple Computer, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "core/editing/VisibleSelection.h"
#include "bindings/core/v8/ExceptionState.h"
#include "core/dom/Document.h"
#include "core/dom/Element.h"
#include "core/dom/Range.h"
#include "core/editing/EditingUtilities.h"
#include "core/editing/SelectionAdjuster.h"
#include "core/editing/iterators/CharacterIterator.h"
#include "platform/geometry/LayoutPoint.h"
#include "wtf/Assertions.h"
#include "wtf/text/CString.h"
#include "wtf/text/CharacterNames.h"
#include "wtf/text/StringBuilder.h"
namespace blink {
template <typename Strategy>
VisibleSelectionTemplate<Strategy>::VisibleSelectionTemplate()
: m_affinity(TextAffinity::Downstream),
m_selectionType(NoSelection),
m_baseIsFirst(true),
m_isDirectional(false),
m_granularity(CharacterGranularity),
m_hasTrailingWhitespace(false) {}
template <typename Strategy>
VisibleSelectionTemplate<Strategy>::VisibleSelectionTemplate(
const SelectionTemplate<Strategy>& selection)
: m_base(selection.base()),
m_extent(selection.extent()),
m_affinity(selection.affinity()),
m_selectionType(NoSelection),
m_isDirectional(selection.isDirectional()),
m_granularity(selection.granularity()),
m_hasTrailingWhitespace(selection.hasTrailingWhitespace()) {
validate(m_granularity);
}
template <typename Strategy>
VisibleSelectionTemplate<Strategy> VisibleSelectionTemplate<Strategy>::create(
const SelectionTemplate<Strategy>& selection) {
return VisibleSelectionTemplate(selection);
}
VisibleSelection createVisibleSelection(const SelectionInDOMTree& selection) {
return VisibleSelection::create(selection);
}
VisibleSelectionInFlatTree createVisibleSelection(
const SelectionInFlatTree& selection) {
return VisibleSelectionInFlatTree::create(selection);
}
template <typename Strategy>
static SelectionType computeSelectionType(
const PositionTemplate<Strategy>& start,
const PositionTemplate<Strategy>& end) {
if (start.isNull()) {
DCHECK(end.isNull());
return NoSelection;
}
if (start == end)
return CaretSelection;
// TODO(yosin) We should call |Document::updateStyleAndLayout()| here for
// |mostBackwardCaretPosition()|. However, we are here during
// |Node::removeChild()|.
start.anchorNode()->updateDistribution();
end.anchorNode()->updateDistribution();
if (mostBackwardCaretPosition(start) == mostBackwardCaretPosition(end))
return CaretSelection;
return RangeSelection;
}
template <typename Strategy>
VisibleSelectionTemplate<Strategy>::VisibleSelectionTemplate(
const VisibleSelectionTemplate<Strategy>& other)
: m_base(other.m_base),
m_extent(other.m_extent),
m_start(other.m_start),
m_end(other.m_end),
m_affinity(other.m_affinity),
m_selectionType(other.m_selectionType),
m_baseIsFirst(other.m_baseIsFirst),
m_isDirectional(other.m_isDirectional),
m_granularity(other.m_granularity),
m_hasTrailingWhitespace(other.m_hasTrailingWhitespace) {}
template <typename Strategy>
VisibleSelectionTemplate<Strategy>& VisibleSelectionTemplate<Strategy>::
operator=(const VisibleSelectionTemplate<Strategy>& other) {
m_base = other.m_base;
m_extent = other.m_extent;
m_start = other.m_start;
m_end = other.m_end;
m_affinity = other.m_affinity;
m_selectionType = other.m_selectionType;
m_baseIsFirst = other.m_baseIsFirst;
m_isDirectional = other.m_isDirectional;
m_granularity = other.m_granularity;
m_hasTrailingWhitespace = other.m_hasTrailingWhitespace;
return *this;
}
template <typename Strategy>
SelectionTemplate<Strategy> VisibleSelectionTemplate<Strategy>::asSelection()
const {
typename SelectionTemplate<Strategy>::Builder builder;
if (m_base.isNotNull())
builder.setBaseAndExtent(m_base, m_extent);
return builder.setAffinity(m_affinity)
.setGranularity(m_granularity)
.setIsDirectional(m_isDirectional)
.setHasTrailingWhitespace(m_hasTrailingWhitespace)
.build();
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setBase(
const PositionTemplate<Strategy>& position) {
DCHECK(!needsLayoutTreeUpdate(position));
m_base = position;
validate();
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setBase(
const VisiblePositionTemplate<Strategy>& visiblePosition) {
DCHECK(visiblePosition.isValid());
m_base = visiblePosition.deepEquivalent();
validate();
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setExtent(
const PositionTemplate<Strategy>& position) {
DCHECK(!needsLayoutTreeUpdate(position));
m_extent = position;
validate();
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setExtent(
const VisiblePositionTemplate<Strategy>& visiblePosition) {
DCHECK(visiblePosition.isValid());
m_extent = visiblePosition.deepEquivalent();
validate();
}
EphemeralRange firstEphemeralRangeOf(const VisibleSelection& selection) {
if (selection.isNone())
return EphemeralRange();
Position start = selection.start().parentAnchoredEquivalent();
Position end = selection.end().parentAnchoredEquivalent();
return EphemeralRange(start, end);
}
Range* firstRangeOf(const VisibleSelection& selection) {
return createRange(firstEphemeralRangeOf(selection));
}
template <typename Strategy>
EphemeralRangeTemplate<Strategy>
VisibleSelectionTemplate<Strategy>::toNormalizedEphemeralRange() const {
if (isNone())
return EphemeralRangeTemplate<Strategy>();
// Make sure we have an updated layout since this function is called
// in the course of running edit commands which modify the DOM.
// Failing to ensure this can result in equivalentXXXPosition calls returning
// incorrect results.
DCHECK(!m_start.document()->needsLayoutTreeUpdate());
if (isCaret()) {
// If the selection is a caret, move the range start upstream. This
// helps us match the conventions of text editors tested, which make
// style determinations based on the character before the caret, if any.
const PositionTemplate<Strategy> start =
mostBackwardCaretPosition(m_start).parentAnchoredEquivalent();
return EphemeralRangeTemplate<Strategy>(start, start);
}
// If the selection is a range, select the minimum range that encompasses
// the selection. Again, this is to match the conventions of text editors
// tested, which make style determinations based on the first character of
// the selection. For instance, this operation helps to make sure that the
// "X" selected below is the only thing selected. The range should not be
// allowed to "leak" out to the end of the previous text node, or to the
// beginning of the next text node, each of which has a different style.
//
// On a treasure map, <b>X</b> marks the spot.
// ^ selected
//
DCHECK(isRange());
return normalizeRange(EphemeralRangeTemplate<Strategy>(m_start, m_end));
}
template <typename Strategy>
static EphemeralRangeTemplate<Strategy> makeSearchRange(
const PositionTemplate<Strategy>& pos) {
Node* node = pos.anchorNode();
if (!node)
return EphemeralRangeTemplate<Strategy>();
Document& document = node->document();
if (!document.documentElement())
return EphemeralRangeTemplate<Strategy>();
Element* boundary = enclosingBlockFlowElement(*node);
if (!boundary)
return EphemeralRangeTemplate<Strategy>();
return EphemeralRangeTemplate<Strategy>(
pos, PositionTemplate<Strategy>::lastPositionInNode(boundary));
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::appendTrailingWhitespace() {
if (isNone())
return;
DCHECK_EQ(m_granularity, WordGranularity);
if (!isRange())
return;
const EphemeralRangeTemplate<Strategy> searchRange = makeSearchRange(end());
if (searchRange.isNull())
return;
CharacterIteratorAlgorithm<Strategy> charIt(
searchRange.startPosition(), searchRange.endPosition(),
TextIteratorEmitsCharactersBetweenAllVisiblePositions);
bool changed = false;
for (; charIt.length(); charIt.advance(1)) {
UChar c = charIt.characterAt(0);
if ((!isSpaceOrNewline(c) && c != noBreakSpaceCharacter) || c == '\n')
break;
m_end = charIt.endPosition();
changed = true;
}
if (!changed)
return;
m_hasTrailingWhitespace = true;
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setBaseAndExtentToDeepEquivalents() {
// Move the selection to rendered positions, if possible.
bool baseAndExtentEqual = m_base == m_extent;
if (m_base.isNotNull()) {
m_base = createVisiblePosition(m_base, m_affinity).deepEquivalent();
if (baseAndExtentEqual)
m_extent = m_base;
}
if (m_extent.isNotNull() && !baseAndExtentEqual)
m_extent = createVisiblePosition(m_extent, m_affinity).deepEquivalent();
// Make sure we do not have a dangling base or extent.
if (m_base.isNull() && m_extent.isNull()) {
m_baseIsFirst = true;
} else if (m_base.isNull()) {
m_base = m_extent;
m_baseIsFirst = true;
} else if (m_extent.isNull()) {
m_extent = m_base;
m_baseIsFirst = true;
} else {
m_baseIsFirst = m_base.compareTo(m_extent) <= 0;
}
}
template <typename Strategy>
static PositionTemplate<Strategy> computeStartRespectingGranularity(
const PositionWithAffinityTemplate<Strategy>& passedStart,
TextGranularity granularity) {
DCHECK(passedStart.isNotNull());
switch (granularity) {
case CharacterGranularity:
// Don't do any expansion.
return passedStart.position();
case WordGranularity: {
// General case: Select the word the caret is positioned inside of.
// If the caret is on the word boundary, select the word according to
// |wordSide|.
// Edge case: If the caret is after the last word in a soft-wrapped line
// or the last word in the document, select that last word
// (LeftWordIfOnBoundary).
// Edge case: If the caret is after the last word in a paragraph, select
// from the the end of the last word to the line break (also
// RightWordIfOnBoundary);
const VisiblePositionTemplate<Strategy> visibleStart =
createVisiblePosition(passedStart);
if (isEndOfEditableOrNonEditableContent(visibleStart) ||
(isEndOfLine(visibleStart) && !isStartOfLine(visibleStart) &&
!isEndOfParagraph(visibleStart))) {
return startOfWord(visibleStart, LeftWordIfOnBoundary).deepEquivalent();
}
return startOfWord(visibleStart, RightWordIfOnBoundary).deepEquivalent();
}
case SentenceGranularity:
return startOfSentence(createVisiblePosition(passedStart))
.deepEquivalent();
case LineGranularity:
return startOfLine(createVisiblePosition(passedStart)).deepEquivalent();
case LineBoundary:
return startOfLine(createVisiblePosition(passedStart)).deepEquivalent();
case ParagraphGranularity: {
const VisiblePositionTemplate<Strategy> pos =
createVisiblePosition(passedStart);
if (isStartOfLine(pos) && isEndOfEditableOrNonEditableContent(pos))
return startOfParagraph(previousPositionOf(pos)).deepEquivalent();
return startOfParagraph(pos).deepEquivalent();
}
case DocumentBoundary:
return startOfDocument(createVisiblePosition(passedStart))
.deepEquivalent();
case ParagraphBoundary:
return startOfParagraph(createVisiblePosition(passedStart))
.deepEquivalent();
case SentenceBoundary:
return startOfSentence(createVisiblePosition(passedStart))
.deepEquivalent();
}
NOTREACHED();
return passedStart.position();
}
template <typename Strategy>
static PositionTemplate<Strategy> computeEndRespectingGranularity(
const PositionTemplate<Strategy>& start,
const PositionWithAffinityTemplate<Strategy>& passedEnd,
TextGranularity granularity) {
DCHECK(passedEnd.isNotNull());
switch (granularity) {
case CharacterGranularity:
// Don't do any expansion.
return passedEnd.position();
case WordGranularity: {
// General case: Select the word the caret is positioned inside of.
// If the caret is on the word boundary, select the word according to
// |wordSide|.
// Edge case: If the caret is after the last word in a soft-wrapped line
// or the last word in the document, select that last word
// (|LeftWordIfOnBoundary|).
// Edge case: If the caret is after the last word in a paragraph, select
// from the the end of the last word to the line break (also
// |RightWordIfOnBoundary|);
const VisiblePositionTemplate<Strategy> originalEnd =
createVisiblePosition(passedEnd);
EWordSide side = RightWordIfOnBoundary;
if (isEndOfEditableOrNonEditableContent(originalEnd) ||
(isEndOfLine(originalEnd) && !isStartOfLine(originalEnd) &&
!isEndOfParagraph(originalEnd)))
side = LeftWordIfOnBoundary;
const VisiblePositionTemplate<Strategy> wordEnd =
endOfWord(originalEnd, side);
if (!isEndOfParagraph(originalEnd))
return wordEnd.deepEquivalent();
if (isEmptyTableCell(start.anchorNode()))
return wordEnd.deepEquivalent();
// Select the paragraph break (the space from the end of a paragraph
// to the start of the next one) to match TextEdit.
const VisiblePositionTemplate<Strategy> end = nextPositionOf(wordEnd);
Element* const table = tableElementJustBefore(end);
if (!table) {
if (end.isNull())
return wordEnd.deepEquivalent();
return end.deepEquivalent();
}
if (!isEnclosingBlock(table))
return wordEnd.deepEquivalent();
// The paragraph break after the last paragraph in the last cell
// of a block table ends at the start of the paragraph after the
// table.
const VisiblePositionTemplate<Strategy> next =
nextPositionOf(end, CannotCrossEditingBoundary);
if (next.isNull())
return wordEnd.deepEquivalent();
return next.deepEquivalent();
}
case SentenceGranularity:
return endOfSentence(createVisiblePosition(passedEnd)).deepEquivalent();
case LineGranularity: {
const VisiblePositionTemplate<Strategy> end =
endOfLine(createVisiblePosition(passedEnd));
if (!isEndOfParagraph(end))
return end.deepEquivalent();
// If the end of this line is at the end of a paragraph, include the
// space after the end of the line in the selection.
const VisiblePositionTemplate<Strategy> next = nextPositionOf(end);
if (next.isNull())
return end.deepEquivalent();
return next.deepEquivalent();
}
case LineBoundary:
return endOfLine(createVisiblePosition(passedEnd)).deepEquivalent();
case ParagraphGranularity: {
const VisiblePositionTemplate<Strategy> visibleParagraphEnd =
endOfParagraph(createVisiblePosition(passedEnd));
// Include the "paragraph break" (the space from the end of this
// paragraph to the start of the next one) in the selection.
const VisiblePositionTemplate<Strategy> end =
nextPositionOf(visibleParagraphEnd);
Element* const table = tableElementJustBefore(end);
if (!table) {
if (end.isNull())
return visibleParagraphEnd.deepEquivalent();
return end.deepEquivalent();
}
if (!isEnclosingBlock(table)) {
// There is no paragraph break after the last paragraph in the
// last cell of an inline table.
return visibleParagraphEnd.deepEquivalent();
}
// The paragraph break after the last paragraph in the last cell of
// a block table ends at the start of the paragraph after the table,
// not at the position just after the table.
const VisiblePositionTemplate<Strategy> next =
nextPositionOf(end, CannotCrossEditingBoundary);
if (next.isNull())
return visibleParagraphEnd.deepEquivalent();
return next.deepEquivalent();
}
case DocumentBoundary:
return endOfDocument(createVisiblePosition(passedEnd)).deepEquivalent();
case ParagraphBoundary:
return endOfParagraph(createVisiblePosition(passedEnd)).deepEquivalent();
case SentenceBoundary:
return endOfSentence(createVisiblePosition(passedEnd)).deepEquivalent();
}
NOTREACHED();
return passedEnd.position();
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::updateSelectionType() {
m_selectionType = computeSelectionType(m_start, m_end);
// Affinity only makes sense for a caret
if (m_selectionType != CaretSelection)
m_affinity = TextAffinity::Downstream;
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::validate(TextGranularity granularity) {
DCHECK(!needsLayoutTreeUpdate(m_base));
DCHECK(!needsLayoutTreeUpdate(m_extent));
// TODO(xiaochengh): Add a DocumentLifecycle::DisallowTransitionScope here.
m_granularity = granularity;
m_hasTrailingWhitespace = false;
setBaseAndExtentToDeepEquivalents();
if (m_base.isNull() || m_extent.isNull()) {
m_base = m_extent = m_start = m_end = PositionTemplate<Strategy>();
updateSelectionType();
return;
}
const PositionTemplate<Strategy> start = m_baseIsFirst ? m_base : m_extent;
const PositionTemplate<Strategy> newStart = computeStartRespectingGranularity(
PositionWithAffinityTemplate<Strategy>(start, m_affinity), granularity);
m_start = newStart.isNotNull() ? newStart : start;
const PositionTemplate<Strategy> end = m_baseIsFirst ? m_extent : m_base;
const PositionTemplate<Strategy> newEnd = computeEndRespectingGranularity(
m_start, PositionWithAffinityTemplate<Strategy>(end, m_affinity),
granularity);
m_end = newEnd.isNotNull() ? newEnd : end;
adjustSelectionToAvoidCrossingShadowBoundaries();
adjustSelectionToAvoidCrossingEditingBoundaries();
updateSelectionType();
if (getSelectionType() == RangeSelection) {
// "Constrain" the selection to be the smallest equivalent range of
// nodes. This is a somewhat arbitrary choice, but experience shows that
// it is useful to make to make the selection "canonical" (if only for
// purposes of comparing selections). This is an ideal point of the code
// to do this operation, since all selection changes that result in a
// RANGE come through here before anyone uses it.
// TODO(yosin) Canonicalizing is good, but haven't we already done it
// (when we set these two positions to |VisiblePosition|
// |deepEquivalent()|s above)?
m_start = mostForwardCaretPosition(m_start);
m_end = mostBackwardCaretPosition(m_end);
}
}
template <typename Strategy>
bool VisibleSelectionTemplate<Strategy>::isValidFor(
const Document& document) const {
if (isNone())
return true;
return m_base.document() == &document && !m_base.isOrphan() &&
!m_extent.isOrphan() && !m_start.isOrphan() && !m_end.isOrphan();
}
// TODO(yosin) This function breaks the invariant of this class.
// But because we use VisibleSelection to store values in editing commands for
// use when undoing the command, we need to be able to create a selection that
// while currently invalid, will be valid once the changes are undone. This is a
// design problem. To fix it we either need to change the invariants of
// |VisibleSelection| or create a new class for editing to use that can
// manipulate selections that are not currently valid.
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::setWithoutValidation(
const PositionTemplate<Strategy>& base,
const PositionTemplate<Strategy>& extent) {
if (base.isNull() || extent.isNull()) {
m_base = m_extent = m_start = m_end = PositionTemplate<Strategy>();
updateSelectionType();
return;
}
m_base = base;
m_extent = extent;
m_baseIsFirst = base.compareTo(extent) <= 0;
if (m_baseIsFirst) {
m_start = base;
m_end = extent;
} else {
m_start = extent;
m_end = base;
}
m_selectionType = base == extent ? CaretSelection : RangeSelection;
if (m_selectionType != CaretSelection) {
// Since |m_affinity| for non-|CaretSelection| is always |Downstream|,
// we should keep this invariant. Note: This function can be called with
// |m_affinity| is |TextAffinity::Upstream|.
m_affinity = TextAffinity::Downstream;
}
}
template <typename Strategy>
void VisibleSelectionTemplate<
Strategy>::adjustSelectionToAvoidCrossingShadowBoundaries() {
if (m_base.isNull() || m_start.isNull() || m_base.isNull())
return;
SelectionAdjuster::adjustSelectionToAvoidCrossingShadowBoundaries(this);
}
static Element* lowestEditableAncestor(Node* node) {
while (node) {
if (hasEditableStyle(*node))
return rootEditableElement(*node);
if (isHTMLBodyElement(*node))
break;
node = node->parentNode();
}
return nullptr;
}
template <typename Strategy>
void VisibleSelectionTemplate<
Strategy>::adjustSelectionToAvoidCrossingEditingBoundaries() {
if (m_base.isNull() || m_start.isNull() || m_end.isNull())
return;
ContainerNode* baseRoot = highestEditableRoot(m_base);
ContainerNode* startRoot = highestEditableRoot(m_start);
ContainerNode* endRoot = highestEditableRoot(m_end);
Element* baseEditableAncestor =
lowestEditableAncestor(m_base.computeContainerNode());
// The base, start and end are all in the same region. No adjustment
// necessary.
if (baseRoot == startRoot && baseRoot == endRoot)
return;
// The selection is based in editable content.
if (baseRoot) {
// If the start is outside the base's editable root, cap it at the start of
// that root.
// If the start is in non-editable content that is inside the base's
// editable root, put it at the first editable position after start inside
// the base's editable root.
if (startRoot != baseRoot) {
const VisiblePositionTemplate<Strategy> first =
firstEditableVisiblePositionAfterPositionInRoot(m_start, *baseRoot);
m_start = first.deepEquivalent();
if (m_start.isNull()) {
NOTREACHED();
m_start = m_end;
}
}
// If the end is outside the base's editable root, cap it at the end of that
// root.
// If the end is in non-editable content that is inside the base's root, put
// it at the last editable position before the end inside the base's root.
if (endRoot != baseRoot) {
const VisiblePositionTemplate<Strategy> last =
lastEditableVisiblePositionBeforePositionInRoot(m_end, *baseRoot);
m_end = last.deepEquivalent();
if (m_end.isNull())
m_end = m_start;
}
// The selection is based in non-editable content.
} else {
// FIXME: Non-editable pieces inside editable content should be atomic, in
// the same way that editable pieces in non-editable content are atomic.
// The selection ends in editable content or non-editable content inside a
// different editable ancestor, move backward until non-editable content
// inside the same lowest editable ancestor is reached.
Element* endEditableAncestor =
lowestEditableAncestor(m_end.computeContainerNode());
if (endRoot || endEditableAncestor != baseEditableAncestor) {
PositionTemplate<Strategy> p = previousVisuallyDistinctCandidate(m_end);
Element* shadowAncestor = endRoot ? endRoot->ownerShadowHost() : nullptr;
if (p.isNull() && shadowAncestor)
p = PositionTemplate<Strategy>::afterNode(shadowAncestor);
while (p.isNotNull() &&
!(lowestEditableAncestor(p.computeContainerNode()) ==
baseEditableAncestor &&
!isEditablePosition(p))) {
Element* root = rootEditableElementOf(p);
shadowAncestor = root ? root->ownerShadowHost() : nullptr;
p = isAtomicNode(p.computeContainerNode())
? PositionTemplate<Strategy>::inParentBeforeNode(
*p.computeContainerNode())
: previousVisuallyDistinctCandidate(p);
if (p.isNull() && shadowAncestor)
p = PositionTemplate<Strategy>::afterNode(shadowAncestor);
}
const VisiblePositionTemplate<Strategy> previous =
createVisiblePosition(p);
if (previous.isNull()) {
// The selection crosses an Editing boundary. This is a
// programmer error in the editing code. Happy debugging!
NOTREACHED();
m_base = PositionTemplate<Strategy>();
m_extent = PositionTemplate<Strategy>();
validate();
return;
}
m_end = previous.deepEquivalent();
}
// The selection starts in editable content or non-editable content inside a
// different editable ancestor, move forward until non-editable content
// inside the same lowest editable ancestor is reached.
Element* startEditableAncestor =
lowestEditableAncestor(m_start.computeContainerNode());
if (startRoot || startEditableAncestor != baseEditableAncestor) {
PositionTemplate<Strategy> p = nextVisuallyDistinctCandidate(m_start);
Element* shadowAncestor =
startRoot ? startRoot->ownerShadowHost() : nullptr;
if (p.isNull() && shadowAncestor)
p = PositionTemplate<Strategy>::beforeNode(shadowAncestor);
while (p.isNotNull() &&
!(lowestEditableAncestor(p.computeContainerNode()) ==
baseEditableAncestor &&
!isEditablePosition(p))) {
Element* root = rootEditableElementOf(p);
shadowAncestor = root ? root->ownerShadowHost() : nullptr;
p = isAtomicNode(p.computeContainerNode())
? PositionTemplate<Strategy>::inParentAfterNode(
*p.computeContainerNode())
: nextVisuallyDistinctCandidate(p);
if (p.isNull() && shadowAncestor)
p = PositionTemplate<Strategy>::beforeNode(shadowAncestor);
}
const VisiblePositionTemplate<Strategy> next = createVisiblePosition(p);
if (next.isNull()) {
// The selection crosses an Editing boundary. This is a
// programmer error in the editing code. Happy debugging!
NOTREACHED();
m_base = PositionTemplate<Strategy>();
m_extent = PositionTemplate<Strategy>();
validate();
return;
}
m_start = next.deepEquivalent();
}
}
// Correct the extent if necessary.
if (baseEditableAncestor !=
lowestEditableAncestor(m_extent.computeContainerNode()))
m_extent = m_baseIsFirst ? m_end : m_start;
}
template <typename Strategy>
bool VisibleSelectionTemplate<Strategy>::isContentEditable() const {
return isEditablePosition(start());
}
template <typename Strategy>
bool VisibleSelectionTemplate<Strategy>::hasEditableStyle() const {
return isEditablePosition(start());
}
template <typename Strategy>
bool VisibleSelectionTemplate<Strategy>::isContentRichlyEditable() const {
return isRichlyEditablePosition(toPositionInDOMTree(start()));
}
template <typename Strategy>
Element* VisibleSelectionTemplate<Strategy>::rootEditableElement() const {
return rootEditableElementOf(start());
}
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::updateIfNeeded() {
Document* document = m_base.document();
if (!document)
return;
DCHECK(!document->needsLayoutTreeUpdate());
const bool hasTrailingWhitespace = m_hasTrailingWhitespace;
validate(m_granularity);
if (!hasTrailingWhitespace)
return;
appendTrailingWhitespace();
}
template <typename Strategy>
static bool equalSelectionsAlgorithm(
const VisibleSelectionTemplate<Strategy>& selection1,
const VisibleSelectionTemplate<Strategy>& selection2) {
if (selection1.affinity() != selection2.affinity() ||
selection1.isDirectional() != selection2.isDirectional())
return false;
if (selection1.isNone())
return selection2.isNone();
const VisibleSelectionTemplate<Strategy> selectionWrapper1(selection1);
const VisibleSelectionTemplate<Strategy> selectionWrapper2(selection2);
return selectionWrapper1.start() == selectionWrapper2.start() &&
selectionWrapper1.end() == selectionWrapper2.end() &&
selectionWrapper1.base() == selectionWrapper2.base() &&
selectionWrapper1.extent() == selectionWrapper2.extent();
}
template <typename Strategy>
bool VisibleSelectionTemplate<Strategy>::operator==(
const VisibleSelectionTemplate<Strategy>& other) const {
return equalSelectionsAlgorithm<Strategy>(*this, other);
}
#ifndef NDEBUG
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::showTreeForThis() const {
if (!start().anchorNode())
return;
LOG(INFO) << "\n"
<< start()
.anchorNode()
->toMarkedTreeString(start().anchorNode(), "S",
end().anchorNode(), "E")
.utf8()
.data()
<< "start: " << start().toAnchorTypeAndOffsetString().utf8().data()
<< "\n"
<< "end: " << end().toAnchorTypeAndOffsetString().utf8().data();
}
#endif
template <typename Strategy>
void VisibleSelectionTemplate<Strategy>::PrintTo(
const VisibleSelectionTemplate<Strategy>& selection,
std::ostream* ostream) {
if (selection.isNone()) {
*ostream << "VisibleSelection()";
return;
}
*ostream << "VisibleSelection(base: " << selection.base()
<< " extent:" << selection.extent()
<< " start: " << selection.start() << " end: " << selection.end()
<< ' ' << selection.affinity() << ' '
<< (selection.isDirectional() ? "Directional" : "NonDirectional")
<< ')';
}
template class CORE_TEMPLATE_EXPORT VisibleSelectionTemplate<EditingStrategy>;
template class CORE_TEMPLATE_EXPORT
VisibleSelectionTemplate<EditingInFlatTreeStrategy>;
std::ostream& operator<<(std::ostream& ostream,
const VisibleSelection& selection) {
VisibleSelection::PrintTo(selection, &ostream);
return ostream;
}
std::ostream& operator<<(std::ostream& ostream,
const VisibleSelectionInFlatTree& selection) {
VisibleSelectionInFlatTree::PrintTo(selection, &ostream);
return ostream;
}
} // namespace blink
#ifndef NDEBUG
void showTree(const blink::VisibleSelection& sel) {
sel.showTreeForThis();
}
void showTree(const blink::VisibleSelection* sel) {
if (sel)
sel->showTreeForThis();
}
void showTree(const blink::VisibleSelectionInFlatTree& sel) {
sel.showTreeForThis();
}
void showTree(const blink::VisibleSelectionInFlatTree* sel) {
if (sel)
sel->showTreeForThis();
}
#endif
| 11,314 |
2,338 | <filename>mlir/lib/Dialect/SPIRV/Transforms/RewriteInsertsPass.cpp
//===- RewriteInsertsPass.cpp - MLIR conversion pass ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass to rewrite sequential chains of
// `spirv::CompositeInsert` operations into `spirv::CompositeConstruct`
// operations.
//
//===----------------------------------------------------------------------===//
#include "PassDetail.h"
#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
#include "mlir/Dialect/SPIRV/Transforms/Passes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
using namespace mlir;
namespace {
/// Replaces sequential chains of `spirv::CompositeInsertOp` operation into
/// `spirv::CompositeConstructOp` operation if possible.
class RewriteInsertsPass
: public SPIRVRewriteInsertsPassBase<RewriteInsertsPass> {
public:
void runOnOperation() override;
private:
/// Collects a sequential insertion chain by the given
/// `spirv::CompositeInsertOp` operation, if the given operation is the last
/// in the chain.
LogicalResult
collectInsertionChain(spirv::CompositeInsertOp op,
SmallVectorImpl<spirv::CompositeInsertOp> &insertions);
};
} // anonymous namespace
void RewriteInsertsPass::runOnOperation() {
SmallVector<SmallVector<spirv::CompositeInsertOp, 4>, 4> workList;
getOperation().walk([this, &workList](spirv::CompositeInsertOp op) {
SmallVector<spirv::CompositeInsertOp, 4> insertions;
if (succeeded(collectInsertionChain(op, insertions)))
workList.push_back(insertions);
});
for (const auto &insertions : workList) {
auto lastCompositeInsertOp = insertions.back();
auto compositeType = lastCompositeInsertOp.getType();
auto location = lastCompositeInsertOp.getLoc();
SmallVector<Value, 4> operands;
// Collect inserted objects.
for (auto insertionOp : insertions)
operands.push_back(insertionOp.object());
OpBuilder builder(lastCompositeInsertOp);
auto compositeConstructOp = builder.create<spirv::CompositeConstructOp>(
location, compositeType, operands);
lastCompositeInsertOp.replaceAllUsesWith(
compositeConstructOp->getResult(0));
// Erase ops.
for (auto insertOp : llvm::reverse(insertions)) {
auto *op = insertOp.getOperation();
if (op->use_empty())
insertOp.erase();
}
}
}
LogicalResult RewriteInsertsPass::collectInsertionChain(
spirv::CompositeInsertOp op,
SmallVectorImpl<spirv::CompositeInsertOp> &insertions) {
auto indicesArrayAttr = op.indices().cast<ArrayAttr>();
// TODO: handle nested composite object.
if (indicesArrayAttr.size() == 1) {
auto numElements =
op.composite().getType().cast<spirv::CompositeType>().getNumElements();
auto index = indicesArrayAttr[0].cast<IntegerAttr>().getInt();
// Need a last index to collect a sequential chain.
if (index + 1 != numElements)
return failure();
insertions.resize(numElements);
while (true) {
insertions[index] = op;
if (index == 0)
return success();
op = op.composite().getDefiningOp<spirv::CompositeInsertOp>();
if (!op)
return failure();
--index;
indicesArrayAttr = op.indices().cast<ArrayAttr>();
if ((indicesArrayAttr.size() != 1) ||
(indicesArrayAttr[0].cast<IntegerAttr>().getInt() != index))
return failure();
}
}
return failure();
}
std::unique_ptr<mlir::OperationPass<spirv::ModuleOp>>
mlir::spirv::createRewriteInsertsPass() {
return std::make_unique<RewriteInsertsPass>();
}
| 1,349 |
30,023 | """DataUpdateCoordinator for Plugwise."""
from datetime import timedelta
from typing import Any, NamedTuple
from plugwise import Smile
from plugwise.exceptions import PlugwiseException, XMLDataMissingError
from homeassistant.core import HomeAssistant
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN, LOGGER
class PlugwiseData(NamedTuple):
"""Plugwise data stored in the DataUpdateCoordinator."""
gateway: dict[str, Any]
devices: dict[str, dict[str, Any]]
class PlugwiseDataUpdateCoordinator(DataUpdateCoordinator[PlugwiseData]):
"""Class to manage fetching Plugwise data from single endpoint."""
def __init__(self, hass: HomeAssistant, api: Smile) -> None:
"""Initialize the coordinator."""
super().__init__(
hass,
LOGGER,
name=api.smile_name or DOMAIN,
update_interval=DEFAULT_SCAN_INTERVAL.get(
str(api.smile_type), timedelta(seconds=60)
),
# Don't refresh immediately, give the device time to process
# the change in state before we query it.
request_refresh_debouncer=Debouncer(
hass,
LOGGER,
cooldown=1.5,
immediate=False,
),
)
self.api = api
async def _async_update_data(self) -> PlugwiseData:
"""Fetch data from Plugwise."""
try:
data = await self.api.async_update()
except XMLDataMissingError as err:
raise UpdateFailed(
f"No XML data received for: {self.api.smile_name}"
) from err
except PlugwiseException as err:
raise UpdateFailed(f"Updated failed for: {self.api.smile_name}") from err
return PlugwiseData(*data)
| 804 |
854 | <reponame>timxor/leetcode-journal
__________________________________________________________________________________________________
sample 4 ms submission
class Solution {
public:
vector<int> partitionLabels(string S) {
vector<int> rez;
vector<int> farest(26, -1);
for (int i = 0; i < S.size(); ++i) farest[S[i] - 'a'] = i;
for (int i = 0, cur = -1; i < S.size(); ++i) {
cur = max(cur, farest[S[i] - 'a']);
if (cur == i) rez.push_back(i + 1);
}
for (int i = rez.size() - 1; i > 0; --i) rez[i] -= rez[i - 1];
return rez;
}
};
__________________________________________________________________________________________________
sample 8852 kb submission
static int fast_io = []() { std::ios::sync_with_stdio(false); cin.tie(nullptr); return 0; }();
class Solution {
public:
std::vector<int> partitionLabels(const std::string& S) {
std::array<int, 256> m = {0};
for (int i = 0; i < S.size(); ++i) {
m[S[i]] = i;
}
int lo = 0;
int hi = 0;
std::vector<int> res;
res.reserve(S.size());
for (int i = 0; i < S.size(); ++i) {
hi = std::max(hi, m[S[i]]);
if (i == hi) {
res.push_back(hi - lo + 1);
lo = i + 1;
}
}
return res;
}
};
__________________________________________________________________________________________________
| 612 |
350 | package com.github.penfeizhou.animation.apng.decode;
/**
* @Description: 作用描述
* @Author: pengfei.zhou
* @CreateDate: 2019/3/27
*/
class IDATChunk extends Chunk {
static final int ID = Chunk.fourCCToInt("IDAT");
}
| 93 |
474 | <reponame>lzwjava/MCAlbum
//
// MCComment.h
// LZAlbum
//
// Created by wangyuansong on 15/3/12.
// Copyright (c) 2015年 lzw. All rights reserved.
//
#import "LZCommon.h"
#import "LCAlbum.h"
#define KEY_ALBUM @"album"
#define KEY_COMMENT_USER @"commentUser"
#define KEY_COMMENT_USERNAME @"commentUsername"
#define KEY_COMMENT_CONTENT @"commentContent"
#define KEY_TO_USER @"toUser"
@interface LZComment : AVObject<AVSubclassing>
@property (nonatomic,strong) LCAlbum* album;//关联分享
@property (nonatomic,strong) AVUser* commentUser;//评论用户
@property (nonatomic,strong) NSString* commentContent;//评论内容
@property (nonatomic,strong)AVUser* toUser;//关联用户
// createdAt:评论时间
@end
| 299 |
45,293 | package jenum;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
public enum JEnum {
OK("123");
@Retention(RetentionPolicy.RUNTIME)
public @interface Foo {}
JEnum(@Foo String foo) {}
}
| 89 |
404 | {
"type":"fail",
"buildTargetPHID":"PHID-not-real",
"unit":[
{
"name":"TestCreateFile",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestCreateFileOverwriteExisting",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestGenerateFlameGraph",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestGenerateFlameGraphPrintsToStdout",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestGenerateFlameGraphExecError",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestRunPerlScriptDoesExist",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestRunPerlScriptDoesNotExist",
"result":"pass",
"namespace":"visualization",
"engine":"Jenkins",
"duration":0
},
{
"name":"TestNewVisualizer",
"result":"fail",
"namespace":"visualization",
"details":"1. It fails ",
"engine":"Jenkins",
"duration":0
}
],
"__conduit__":{
"token":"<PASSWORD>"
}
} | 657 |
6,717 | <gh_stars>1000+
#include "internal.h"
#if DISPATCH_COCOA_COMPAT
void (*dispatch_begin_thread_4GC)(void) = dummy_function;
void (*dispatch_end_thread_4GC)(void) = dummy_function;
void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function;
void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function;
static dispatch_once_t _dispatch_main_q_port_pred;
static mach_port_t main_q_port;
#endif
#if WINOBJC
void *objc_autoreleasePoolPush(void);
void objc_autoreleasePoolPop(void *pool);
void *(*_dispatch_begin_NSAutoReleasePool)(void) = objc_autoreleasePoolPush;
void(*_dispatch_end_NSAutoReleasePool)(void *) = objc_autoreleasePoolPop;
#endif
#if TARGET_OS_WIN32
static dispatch_once_t _dispatch_window_message_pred;
static UINT _dispatch_main_window_message;
static UINT _dispatch_thread_window_message;
#endif
static bool _dispatch_program_is_probably_callback_driven;
// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
struct dispatch_queue_s _dispatch_main_q = {
/*.do_vtable = */ &_dispatch_queue_vtable,
/*.do_next = */ 0,
/*.do_ref_cnt = */ DISPATCH_OBJECT_GLOBAL_REFCNT,
/*.do_xref_cnt = */ DISPATCH_OBJECT_GLOBAL_REFCNT,
/*.do_suspend_cnt = */ DISPATCH_OBJECT_SUSPEND_LOCK,
/*.do_targetq = */ &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT / 2],
/*.do_ctxt = */ 0,
/*.do_finalizer = */ 0,
/*.dq_running = */ 1,
/*.dq_width = */ 1,
/*.dq_items_tail = */ 0,
/*.dq_items_head = */ 0,
/*.dq_serialnum = */ 1,
/*.dq_finalizer_ctxt = */ 0,
/*.dq_specific_q = */ NULL,
/*.dq_specific_list = */ NULL,
/*.dq_manually_drained = */ 0,
/*.dq_is_manually_draining = */ false,
/*.dq_label = */ "com.apple.main-thread",
};
#undef dispatch_get_main_queue
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
dispatch_queue_t dispatch_get_main_queue(void);
dispatch_queue_t
dispatch_get_main_queue(void)
{
return &_dispatch_main_q;
}
/*
* XXXRW: Work-around for possible clang bug in which __builtin_trap() is not
* marked noreturn, leading to a build error as dispatch_main() *is* marked
* noreturn. Mask by marking __builtin_trap() as noreturn locally.
*/
#ifndef HAVE_NORETURN_BUILTIN_TRAP
void __builtin_trap(void) __attribute__((__noreturn__));
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4702) // unreachable code
#endif
void
dispatch_main(void)
{
#if HAVE_PTHREAD_MAIN_NP
if (pthread_main_np()) {
#endif
_dispatch_program_is_probably_callback_driven = true;
pthread_exit(NULL);
DISPATCH_CRASH("pthread_exit() returned");
#if HAVE_PTHREAD_MAIN_NP
}
DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread");
#endif
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
DISPATCH_NOINLINE
static void
_dispatch_queue_set_manual_drain_state(dispatch_queue_t q, bool arg)
{
q->dq_is_manually_draining = arg;
}
DISPATCH_NOINLINE
static void
_dispatch_manual_queue_drain(dispatch_queue_t q)
{
if (q->dq_is_manually_draining) {
return;
}
_dispatch_queue_set_manual_drain_state(q, true);
_dispatch_queue_serial_drain_till_empty(q);
_dispatch_queue_set_manual_drain_state(q, false);
}
#if DISPATCH_COCOA_COMPAT
static void
_dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED)
{
kern_return_t kr;
kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, MACH_MSG_TYPE_MAKE_SEND);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
_dispatch_program_is_probably_callback_driven = true;
_dispatch_safe_fork = false;
}
void
_dispatch_main_q_port_clean(void)
{
dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
mach_port_t mp = main_q_port;
kern_return_t kr;
main_q_port = 0;
if (mp) {
kr = mach_port_deallocate(mach_task_self(), mp);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
}
}
void
_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg DISPATCH_UNUSED)
{
_dispatch_manual_queue_drain(&_dispatch_main_q);
}
mach_port_t
_dispatch_get_main_queue_port_4CF(void)
{
dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
return main_q_port;
}
#endif
#if TARGET_OS_WIN32
static void
_dispatch_window_message_init(void *ctxt DISPATCH_UNUSED)
{
#if !defined( WINOBJC )
_dispatch_thread_window_message = RegisterWindowMessageW(L"libdispatch-threadq");
_dispatch_main_window_message = RegisterWindowMessageW(L"libdispatch-mainq");
#endif
}
UINT dispatch_get_thread_window_message(void)
{
dispatch_once_f(&_dispatch_window_message_pred, NULL, _dispatch_window_message_init);
return _dispatch_thread_window_message;
}
UINT dispatch_get_main_window_message(void)
{
dispatch_once_f(&_dispatch_window_message_pred, NULL, _dispatch_window_message_init);
return _dispatch_main_window_message;
}
#endif
#ifdef WINOBJC
static dispatch_wake_main_thread_callback _main_queue_wakeup_callback;
static void *_main_queue_wakeup_callback_param;
DISPATCH_INLINE
void
dispatch_set_wakeup_callback(dispatch_wake_main_thread_callback callback, void *userptr)
{
_main_queue_wakeup_callback = callback;
_main_queue_wakeup_callback_param = userptr;
}
#endif
DISPATCH_NOINLINE
void
_dispatch_queue_wakeup_main(void)
{
#if DISPATCH_COCOA_COMPAT
kern_return_t kr;
dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
kr = _dispatch_send_wakeup_main_thread(main_q_port, 0);
switch (kr) {
case MACH_SEND_TIMEOUT:
case MACH_SEND_TIMED_OUT:
case MACH_SEND_INVALID_DEST:
break;
default:
(void)dispatch_assume_zero(kr);
break;
}
_dispatch_safe_fork = false;
#elif defined( WINOBJC )
if ( _main_queue_wakeup_callback ) _main_queue_wakeup_callback(_main_queue_wakeup_callback_param);
#elif TARGET_OS_WIN32
PostThreadMessage(GetThreadId(_pthread_get_native_handle(_dispatch_main_q.dq_manually_drained)), dispatch_get_main_window_message(), 0, 0);
#endif
}
DISPATCH_NOINLINE
void
_dispatch_queue_wakeup_thread(dispatch_queue_t q)
{
#if TARGET_OS_WIN32 && !defined( WINOBJC )
PostThreadMessage(GetThreadId(_pthread_get_native_handle(q->dq_manually_drained)), dispatch_get_thread_window_message(), 0, 0);
#endif
// TODO decide on Mac OS X per-thread queue semantics. A mach port per thread would work nicely enough, I think.
}
DISPATCH_NOINLINE
void
_dispatch_queue_wakeup_manual(dispatch_queue_t q)
{
if (q == &_dispatch_main_q) {
_dispatch_queue_wakeup_main();
} else {
_dispatch_queue_wakeup_thread(q);
}
}
#if !TARGET_OS_WIN32
static void
_dispatch_sigsuspend(void *ctxt DISPATCH_UNUSED)
{
static const sigset_t mask;
for (;;) {
sigsuspend(&mask);
}
}
#endif
DISPATCH_NOINLINE
void
_dispatch_queue_cleanup_main(void)
{
#if !TARGET_OS_WIN32
// overload the "probably" variable to mean that dispatch_main() or
// similar non-POSIX API was called
// this has to run before the DISPATCH_COCOA_COMPAT below
if (_dispatch_program_is_probably_callback_driven) {
dispatch_async_f(dispatch_get_global_queue(0, 0), NULL, _dispatch_sigsuspend);
sleep(1); // workaround 6778970
}
#endif
#if DISPATCH_COCOA_COMPAT
_dispatch_main_q_port_clean();
#endif
// TODO Nothing to do here on Windows I think.
}
DISPATCH_NOINLINE
void
_dispatch_queue_cleanup_thread(void)
{
}
DISPATCH_NOINLINE
void
_dispatch_queue_cleanup_manual(dispatch_queue_t q)
{
if (q == &_dispatch_main_q) {
_dispatch_queue_cleanup_main();
} else {
_dispatch_queue_cleanup_thread();
}
}
void
dispatch_thread_queue_callback(void)
{
dispatch_queue_t q = dispatch_get_current_thread_queue();
_dispatch_manual_queue_drain(q);
dispatch_release(as_do(q));
}
void
dispatch_main_queue_callback(void)
{
_dispatch_manual_queue_drain(dispatch_get_main_queue());
}
struct timespec *dispatch_get_next_timer_fire(struct timespec *howsoon)
{
return _dispatch_get_next_timer_fire(howsoon);
}
| 3,575 |
4,234 | #pragma once
#include <mbgl/storage/resource.hpp>
#include <functional>
#include <string>
namespace mbgl {
class ResourceTransform {
public:
using FinishedCallback = std::function<void(const std::string&)>;
using TransformCallback = std::function<void(Resource::Kind kind, const std::string& url, FinishedCallback)>;
ResourceTransform(TransformCallback = {});
void transform(Resource::Kind, const std::string& url, FinishedCallback);
explicit operator bool() const { return bool(transformCallback); }
private:
TransformCallback transformCallback;
};
} // namespace mbgl
| 177 |
852 | <reponame>ckamtsikis/cmssw
#ifndef HcalSimAlgos_HcalShapes_h
#define HcalSimAlgos_HcalShapes_h
/** A class which decides which shape to return,
based on the DetId
*/
#include "FWCore/Framework/interface/Frameworkfwd.h"
#include "SimCalorimetry/CaloSimAlgos/interface/CaloShapes.h"
#include "SimCalorimetry/HcalSimAlgos/interface/HcalShape.h"
#include "SimCalorimetry/HcalSimAlgos/interface/ZDCShape.h"
#include "CalibFormats/HcalObjects/interface/HcalDbService.h"
#include <vector>
#include <map>
class CaloVShape;
class DetId;
class HcalShapes : public CaloShapes {
public:
enum { HPD = 101, LONG = 102, ZECOTEK = 201, HAMAMATSU = 202, HE2017 = 203, HE2018 = 206, HF = 301, ZDC = 401 };
HcalShapes();
~HcalShapes() override;
void setDbService(const HcalDbService* service) { theDbService = service; }
const CaloVShape* shape(const DetId& detId, bool precise = false) const override;
private:
typedef std::map<int, const CaloVShape*> ShapeMap;
// hardcoded, if we can't figure it out from the DB
const CaloVShape* defaultShape(const DetId& detId, bool precise = false) const;
const ShapeMap& getShapeMap(bool precise) const;
const HcalDbService* theDbService;
ShapeMap theShapes;
ShapeMap theShapesPrecise;
ZDCShape theZDCShape;
// list of vShapes.
std::vector<HcalShape> theHcalShapes;
};
#endif
| 495 |
938 | //===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic JITLinker utility class.
//
//===----------------------------------------------------------------------===//
#include "JITLinkGeneric.h"
#include "EHFrameSupportImpl.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/MemoryBuffer.h"
#define DEBUG_TYPE "jitlink"
namespace llvm {
namespace jitlink {
JITLinkerBase::~JITLinkerBase() {}
void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
// Build the atom graph.
if (auto GraphOrErr = buildGraph(Ctx->getObjectBuffer()))
G = std::move(*GraphOrErr);
else
return Ctx->notifyFailed(GraphOrErr.takeError());
assert(G && "Graph should have been created by buildGraph above");
// Prune and optimize the graph.
if (auto Err = runPasses(Passes.PrePrunePasses, *G))
return Ctx->notifyFailed(std::move(Err));
LLVM_DEBUG({
dbgs() << "Atom graph \"" << G->getName() << "\" pre-pruning:\n";
dumpGraph(dbgs());
});
prune(*G);
LLVM_DEBUG({
dbgs() << "Atom graph \"" << G->getName() << "\" post-pruning:\n";
dumpGraph(dbgs());
});
// Run post-pruning passes.
if (auto Err = runPasses(Passes.PostPrunePasses, *G))
return Ctx->notifyFailed(std::move(Err));
// Sort atoms into segments.
layOutAtoms();
// Allocate memory for segments.
if (auto Err = allocateSegments(Layout))
return Ctx->notifyFailed(std::move(Err));
// Notify client that the defined atoms have been assigned addresses.
Ctx->notifyResolved(*G);
auto ExternalSymbols = getExternalSymbolNames();
// We're about to hand off ownership of ourself to the continuation. Grab a
// pointer to the context so that we can call it to initiate the lookup.
//
// FIXME: Once callee expressions are defined to be sequenced before argument
// expressions (c++17) we can simplify all this to:
//
// Ctx->lookup(std::move(UnresolvedExternals),
// [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
// Self->linkPhase2(std::move(Self), std::move(Result));
// });
//
// FIXME: Use move capture once we have c++14.
auto *TmpCtx = Ctx.get();
auto *UnownedSelf = Self.release();
auto Phase2Continuation =
[UnownedSelf](Expected<AsyncLookupResult> LookupResult) {
std::unique_ptr<JITLinkerBase> Self(UnownedSelf);
UnownedSelf->linkPhase2(std::move(Self), std::move(LookupResult));
};
TmpCtx->lookup(std::move(ExternalSymbols), std::move(Phase2Continuation));
}
void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LR) {
// If the lookup failed, bail out.
if (!LR)
return deallocateAndBailOut(LR.takeError());
// Assign addresses to external atoms.
applyLookupResult(*LR);
LLVM_DEBUG({
dbgs() << "Atom graph \"" << G->getName() << "\" before copy-and-fixup:\n";
dumpGraph(dbgs());
});
// Copy atom content to working memory and fix up.
if (auto Err = copyAndFixUpAllAtoms(Layout, *Alloc))
return deallocateAndBailOut(std::move(Err));
LLVM_DEBUG({
dbgs() << "Atom graph \"" << G->getName() << "\" after copy-and-fixup:\n";
dumpGraph(dbgs());
});
if (auto Err = runPasses(Passes.PostFixupPasses, *G))
return deallocateAndBailOut(std::move(Err));
// FIXME: Use move capture once we have c++14.
auto *UnownedSelf = Self.release();
auto Phase3Continuation = [UnownedSelf](Error Err) {
std::unique_ptr<JITLinkerBase> Self(UnownedSelf);
UnownedSelf->linkPhase3(std::move(Self), std::move(Err));
};
Alloc->finalizeAsync(std::move(Phase3Continuation));
}
void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) {
if (Err)
return deallocateAndBailOut(std::move(Err));
Ctx->notifyFinalized(std::move(Alloc));
}
Error JITLinkerBase::runPasses(AtomGraphPassList &Passes, AtomGraph &G) {
for (auto &P : Passes)
if (auto Err = P(G))
return Err;
return Error::success();
}
void JITLinkerBase::layOutAtoms() {
// Group sections by protections, and whether or not they're zero-fill.
for (auto &S : G->sections()) {
// Skip empty sections.
if (S.atoms_empty())
continue;
auto &SL = Layout[S.getProtectionFlags()];
if (S.isZeroFill())
SL.ZeroFillSections.push_back(SegmentLayout::SectionLayout(S));
else
SL.ContentSections.push_back(SegmentLayout::SectionLayout(S));
}
// Sort sections within the layout by ordinal.
{
auto CompareByOrdinal = [](const SegmentLayout::SectionLayout &LHS,
const SegmentLayout::SectionLayout &RHS) {
return LHS.S->getSectionOrdinal() < RHS.S->getSectionOrdinal();
};
for (auto &KV : Layout) {
auto &SL = KV.second;
std::sort(SL.ContentSections.begin(), SL.ContentSections.end(),
CompareByOrdinal);
std::sort(SL.ZeroFillSections.begin(), SL.ZeroFillSections.end(),
CompareByOrdinal);
}
}
// Add atoms to the sections.
for (auto &KV : Layout) {
auto &SL = KV.second;
for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections}) {
for (auto &SI : *SIList) {
// First build the set of layout-heads (i.e. "heads" of layout-next
// chains) by copying the section atoms, then eliminating any that
// appear as layout-next targets.
DenseSet<DefinedAtom *> LayoutHeads;
for (auto *DA : SI.S->atoms())
LayoutHeads.insert(DA);
for (auto *DA : SI.S->atoms())
if (DA->hasLayoutNext())
LayoutHeads.erase(&DA->getLayoutNext());
// Next, sort the layout heads by address order.
std::vector<DefinedAtom *> OrderedLayoutHeads;
OrderedLayoutHeads.reserve(LayoutHeads.size());
for (auto *DA : LayoutHeads)
OrderedLayoutHeads.push_back(DA);
// Now sort the list of layout heads by address.
std::sort(OrderedLayoutHeads.begin(), OrderedLayoutHeads.end(),
[](const DefinedAtom *LHS, const DefinedAtom *RHS) {
return LHS->getAddress() < RHS->getAddress();
});
// Now populate the SI.Atoms field by appending each of the chains.
for (auto *DA : OrderedLayoutHeads) {
SI.Atoms.push_back(DA);
while (DA->hasLayoutNext()) {
auto &Next = DA->getLayoutNext();
SI.Atoms.push_back(&Next);
DA = &Next;
}
}
}
}
}
LLVM_DEBUG({
dbgs() << "Segment ordering:\n";
for (auto &KV : Layout) {
dbgs() << " Segment "
<< static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n";
auto &SL = KV.second;
for (auto &SIEntry :
{std::make_pair(&SL.ContentSections, "content sections"),
std::make_pair(&SL.ZeroFillSections, "zero-fill sections")}) {
auto &SIList = *SIEntry.first;
dbgs() << " " << SIEntry.second << ":\n";
for (auto &SI : SIList) {
dbgs() << " " << SI.S->getName() << ":\n";
for (auto *DA : SI.Atoms)
dbgs() << " " << *DA << "\n";
}
}
}
});
}
Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) {
// Compute segment sizes and allocate memory.
LLVM_DEBUG(dbgs() << "JIT linker requesting: { ");
JITLinkMemoryManager::SegmentsRequestMap Segments;
for (auto &KV : Layout) {
auto &Prot = KV.first;
auto &SegLayout = KV.second;
// Calculate segment content size.
size_t SegContentSize = 0;
for (auto &SI : SegLayout.ContentSections) {
assert(!SI.S->atoms_empty() && "Sections in layout must not be empty");
assert(!SI.Atoms.empty() && "Section layouts must not be empty");
// Bump to section alignment before processing atoms.
SegContentSize = alignTo(SegContentSize, SI.S->getAlignment());
for (auto *DA : SI.Atoms) {
SegContentSize = alignTo(SegContentSize, DA->getAlignment());
SegContentSize += DA->getSize();
}
}
// Get segment content alignment.
unsigned SegContentAlign = 1;
if (!SegLayout.ContentSections.empty()) {
auto &FirstContentSection = SegLayout.ContentSections.front();
SegContentAlign =
std::max(FirstContentSection.S->getAlignment(),
FirstContentSection.Atoms.front()->getAlignment());
}
// Calculate segment zero-fill size.
uint64_t SegZeroFillSize = 0;
for (auto &SI : SegLayout.ZeroFillSections) {
assert(!SI.S->atoms_empty() && "Sections in layout must not be empty");
assert(!SI.Atoms.empty() && "Section layouts must not be empty");
// Bump to section alignment before processing atoms.
SegZeroFillSize = alignTo(SegZeroFillSize, SI.S->getAlignment());
for (auto *DA : SI.Atoms) {
SegZeroFillSize = alignTo(SegZeroFillSize, DA->getAlignment());
SegZeroFillSize += DA->getSize();
}
}
// Calculate segment zero-fill alignment.
uint32_t SegZeroFillAlign = 1;
if (!SegLayout.ZeroFillSections.empty()) {
auto &FirstZeroFillSection = SegLayout.ZeroFillSections.front();
SegZeroFillAlign =
std::max(FirstZeroFillSection.S->getAlignment(),
FirstZeroFillSection.Atoms.front()->getAlignment());
}
if (SegContentSize == 0)
SegContentAlign = SegZeroFillAlign;
if (SegContentAlign % SegZeroFillAlign != 0)
return make_error<JITLinkError>("First content atom alignment does not "
"accommodate first zero-fill atom "
"alignment");
Segments[Prot] = {SegContentSize, SegContentAlign, SegZeroFillSize,
SegZeroFillAlign};
LLVM_DEBUG({
dbgs() << (&KV == &*Layout.begin() ? "" : "; ")
<< static_cast<sys::Memory::ProtectionFlags>(Prot) << ": "
<< SegContentSize << " content bytes (alignment "
<< SegContentAlign << ") + " << SegZeroFillSize
<< " zero-fill bytes (alignment " << SegZeroFillAlign << ")";
});
}
LLVM_DEBUG(dbgs() << " }\n");
if (auto AllocOrErr = Ctx->getMemoryManager().allocate(Segments))
Alloc = std::move(*AllocOrErr);
else
return AllocOrErr.takeError();
LLVM_DEBUG({
dbgs() << "JIT linker got working memory:\n";
for (auto &KV : Layout) {
auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first);
dbgs() << " " << Prot << ": "
<< (const void *)Alloc->getWorkingMemory(Prot).data() << "\n";
}
});
// Update atom target addresses.
for (auto &KV : Layout) {
auto &Prot = KV.first;
auto &SL = KV.second;
JITTargetAddress AtomTargetAddr =
Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections})
for (auto &SI : *SIList) {
AtomTargetAddr = alignTo(AtomTargetAddr, SI.S->getAlignment());
for (auto *DA : SI.Atoms) {
AtomTargetAddr = alignTo(AtomTargetAddr, DA->getAlignment());
DA->setAddress(AtomTargetAddr);
AtomTargetAddr += DA->getSize();
}
}
}
return Error::success();
}
DenseSet<StringRef> JITLinkerBase::getExternalSymbolNames() const {
// Identify unresolved external atoms.
DenseSet<StringRef> UnresolvedExternals;
for (auto *DA : G->external_atoms()) {
assert(DA->getAddress() == 0 &&
"External has already been assigned an address");
assert(DA->getName() != StringRef() && DA->getName() != "" &&
"Externals must be named");
UnresolvedExternals.insert(DA->getName());
}
return UnresolvedExternals;
}
void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
for (auto &KV : Result) {
Atom &A = G->getAtomByName(KV.first);
assert(A.getAddress() == 0 && "Atom already resolved");
A.setAddress(KV.second.getAddress());
}
LLVM_DEBUG({
dbgs() << "Externals after applying lookup result:\n";
for (auto *A : G->external_atoms())
dbgs() << " " << A->getName() << ": "
<< formatv("{0:x16}", A->getAddress()) << "\n";
});
assert(llvm::all_of(G->external_atoms(),
[](Atom *A) { return A->getAddress() != 0; }) &&
"All atoms should have been resolved by this point");
}
void JITLinkerBase::deallocateAndBailOut(Error Err) {
assert(Err && "Should not be bailing out on success value");
assert(Alloc && "can not call deallocateAndBailOut before allocation");
Ctx->notifyFailed(joinErrors(std::move(Err), Alloc->deallocate()));
}
void JITLinkerBase::dumpGraph(raw_ostream &OS) {
assert(G && "Graph is not set yet");
G->dump(dbgs(), [this](Edge::Kind K) { return getEdgeKindName(K); });
}
void prune(AtomGraph &G) {
std::vector<DefinedAtom *> Worklist;
DenseMap<DefinedAtom *, std::vector<Edge *>> EdgesToUpdate;
// Build the initial worklist from all atoms initially live.
for (auto *DA : G.defined_atoms()) {
if (!DA->isLive() || DA->shouldDiscard())
continue;
for (auto &E : DA->edges()) {
if (!E.getTarget().isDefined())
continue;
auto &EDT = static_cast<DefinedAtom &>(E.getTarget());
if (EDT.shouldDiscard())
EdgesToUpdate[&EDT].push_back(&E);
else if (E.isKeepAlive() && !EDT.isLive())
Worklist.push_back(&EDT);
}
}
// Propagate live flags to all atoms reachable from the initial live set.
while (!Worklist.empty()) {
DefinedAtom &NextLive = *Worklist.back();
Worklist.pop_back();
assert(!NextLive.shouldDiscard() &&
"should-discard nodes should never make it into the worklist");
// If this atom has already been marked as live, or is marked to be
// discarded, then skip it.
if (NextLive.isLive())
continue;
// Otherwise set it as live and add any non-live atoms that it points to
// to the worklist.
NextLive.setLive(true);
for (auto &E : NextLive.edges()) {
if (!E.getTarget().isDefined())
continue;
auto &EDT = static_cast<DefinedAtom &>(E.getTarget());
if (EDT.shouldDiscard())
EdgesToUpdate[&EDT].push_back(&E);
else if (E.isKeepAlive() && !EDT.isLive())
Worklist.push_back(&EDT);
}
}
// Collect atoms to remove, then remove them from the graph.
std::vector<DefinedAtom *> AtomsToRemove;
for (auto *DA : G.defined_atoms())
if (DA->shouldDiscard() || !DA->isLive())
AtomsToRemove.push_back(DA);
LLVM_DEBUG(dbgs() << "Pruning atoms:\n");
for (auto *DA : AtomsToRemove) {
LLVM_DEBUG(dbgs() << " " << *DA << "... ");
// Check whether we need to replace this atom with an external atom.
//
// We replace if all of the following hold:
// (1) The atom is marked should-discard,
// (2) it has live edges (i.e. edges from live atoms) pointing to it.
//
// Otherwise we simply delete the atom.
G.removeDefinedAtom(*DA);
auto EdgesToUpdateItr = EdgesToUpdate.find(DA);
if (EdgesToUpdateItr != EdgesToUpdate.end()) {
auto &ExternalReplacement = G.addExternalAtom(DA->getName());
for (auto *EdgeToUpdate : EdgesToUpdateItr->second)
EdgeToUpdate->setTarget(ExternalReplacement);
LLVM_DEBUG(dbgs() << "replaced with " << ExternalReplacement << "\n");
} else
LLVM_DEBUG(dbgs() << "deleted\n");
}
// Finally, discard any absolute symbols that were marked should-discard.
{
std::vector<Atom *> AbsoluteAtomsToRemove;
for (auto *A : G.absolute_atoms())
if (A->shouldDiscard() || A->isLive())
AbsoluteAtomsToRemove.push_back(A);
for (auto *A : AbsoluteAtomsToRemove)
G.removeAbsoluteAtom(*A);
}
}
} // end namespace jitlink
} // end namespace llvm
| 6,531 |
2,151 | /* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* From pp_var.idl modified Wed Dec 14 18:08:00 2011. */
#ifndef PPAPI_C_PP_VAR_H_
#define PPAPI_C_PP_VAR_H_
#include "ppapi/c/pp_bool.h"
#include "ppapi/c/pp_macros.h"
#include "ppapi/c/pp_stdint.h"
/**
* @file
* This file defines the API for handling the passing of data types between
* your module and the page.
*/
/**
* @addtogroup Enums
* @{
*/
/**
* The <code>PP_VarType</code> is an enumeration of the different types that
* can be contained within a <code>PP_Var</code> structure.
*/
typedef enum {
/**
* An undefined value.
*/
PP_VARTYPE_UNDEFINED = 0,
/**
* A NULL value. This is similar to undefined, but JavaScript differentiates
* the two so it is exposed here as well.
*/
PP_VARTYPE_NULL = 1,
/**
* A boolean value, use the <code>as_bool</code> member of the var.
*/
PP_VARTYPE_BOOL = 2,
/**
* A 32-bit integer value. Use the <code>as_int</code> member of the var.
*/
PP_VARTYPE_INT32 = 3,
/**
* A double-precision floating point value. Use the <code>as_double</code>
* member of the var.
*/
PP_VARTYPE_DOUBLE = 4,
/**
* The Var represents a string. The <code>as_id</code> field is used to
* identify the string, which may be created and retrieved from the
* <code>PPB_Var</code> interface.
*/
PP_VARTYPE_STRING = 5,
/**
* Represents a JavaScript object. This vartype is not currently usable
* from modules, although it is used internally for some tasks.
*/
PP_VARTYPE_OBJECT = 6,
/**
* Arrays and dictionaries are not currently supported but will be added
* in future revisions. These objects are reference counted so be sure
* to properly AddRef/Release them as you would with strings to ensure your
* module will continue to work with future versions of the API.
*/
PP_VARTYPE_ARRAY = 7,
PP_VARTYPE_DICTIONARY = 8,
/**
* ArrayBuffer represents a JavaScript ArrayBuffer. This is the type which
* represents Typed Arrays in JavaScript. Unlike JavaScript 'Array', it is
* only meant to contain basic numeric types, and is always stored
* contiguously. See PPB_VarArrayBuffer_Dev for functions special to
* ArrayBuffer vars.
*/
PP_VARTYPE_ARRAY_BUFFER = 9
} PP_VarType;
PP_COMPILE_ASSERT_SIZE_IN_BYTES(PP_VarType, 4);
/**
* @}
*/
/**
* @addtogroup Structs
* @{
*/
/**
* The PP_VarValue union stores the data for any one of the types listed
* in the PP_VarType enum.
*/
union PP_VarValue {
/**
* If <code>type</code> is <code>PP_VARTYPE_BOOL</code>,
* <code>as_bool</code> represents the value of this <code>PP_Var</code> as
* <code>PP_Bool</code>.
*/
PP_Bool as_bool;
/**
* If <code>type</code> is <code>PP_VARTYPE_INT32</code>,
* <code>as_int</code> represents the value of this <code>PP_Var</code> as
* <code>int32_t</code>.
*/
int32_t as_int;
/**
* If <code>type</code> is <code>PP_VARTYPE_DOUBLE</code>,
* <code>as_double</code> represents the value of this <code>PP_Var</code>
* as <code>double</code>.
*/
double as_double;
/**
* If <code>type</code> is <code>PP_VARTYPE_STRING</code>,
* <code>PP_VARTYPE_OBJECT</code>, <code>PP_VARTYPE_ARRAY</code>, or
* <code>PP_VARTYPE_DICTIONARY</code>,
* <code>as_id</code> represents the value of this <code>PP_Var</code> as
* an opaque handle assigned by the browser. This handle is guaranteed
* never to be 0, so a module can initialize this ID to 0 to indicate a
* "NULL handle."
*/
int64_t as_id;
};
/**
* The <code>PP_VAR</code> struct is a variant data type and can contain any
* value of one of the types named in the <code>PP_VarType</code> enum. This
* structure is for passing data between native code which can be strongly
* typed and the browser (JavaScript) which isn't strongly typed.
*
* JavaScript has a "number" type for holding a number, and does not
* differentiate between floating point and integer numbers. The
* JavaScript operations will try to optimize operations by using
* integers when possible, but could end up with doubles. Therefore,
* you can't assume a numeric <code>PP_Var</code> will be the type you expect.
* Your code should be capable of handling either int32_t or double for numeric
* PP_Vars sent from JavaScript.
*/
struct PP_Var {
PP_VarType type;
/**
* The <code>padding</code> ensures <code>value</code> is aligned on an
* 8-byte boundary relative to the start of the struct. Some compilers
* align doubles on 8-byte boundaries for 32-bit x86, and some align on
* 4-byte boundaries.
*/
int32_t padding;
/**
* This <code>value</code> represents the contents of the PP_Var. Only one of
* the fields of <code>value</code> is valid at a time based upon
* <code>type</code>.
*/
union PP_VarValue value;
};
PP_COMPILE_ASSERT_STRUCT_SIZE_IN_BYTES(PP_Var, 16);
/**
* @}
*/
/**
* @addtogroup Functions
* @{
*/
/**
* PP_MakeUndefined() is used to wrap an undefined value into a
* <code>PP_Var</code> struct for passing to the browser.
*
* @return A <code>PP_Var</code> structure.
*/
PP_INLINE struct PP_Var PP_MakeUndefined() {
struct PP_Var result = { PP_VARTYPE_UNDEFINED, 0, {PP_FALSE} };
return result;
}
/**
* PP_MakeNull() is used to wrap a null value into a
* <code>PP_Var</code> struct for passing to the browser.
*
* @return A <code>PP_Var</code> structure,
*/
PP_INLINE struct PP_Var PP_MakeNull() {
struct PP_Var result = { PP_VARTYPE_NULL, 0, {PP_FALSE} };
return result;
}
/**
* PP_MakeBool() is used to wrap a boolean value into a
* <code>PP_Var</code> struct for passing to the browser.
*
* @param[in] value A <code>PP_Bool</code> enumeration to
* wrap.
*
* @return A <code>PP_Var</code> structure.
*/
PP_INLINE struct PP_Var PP_MakeBool(PP_Bool value) {
struct PP_Var result = { PP_VARTYPE_BOOL, 0, {PP_FALSE} };
result.value.as_bool = value;
return result;
}
/**
* PP_MakeInt32() is used to wrap a 32 bit integer value
* into a <code>PP_Var</code> struct for passing to the browser.
*
* @param[in] value An int32 to wrap.
*
* @return A <code>PP_Var</code> structure.
*/
PP_INLINE struct PP_Var PP_MakeInt32(int32_t value) {
struct PP_Var result = { PP_VARTYPE_INT32, 0, {PP_FALSE} };
result.value.as_int = value;
return result;
}
/**
* PP_MakeDouble() is used to wrap a double value into a
* <code>PP_Var</code> struct for passing to the browser.
*
* @param[in] value A double to wrap.
*
* @return A <code>PP_Var</code> structure.
*/
PP_INLINE struct PP_Var PP_MakeDouble(double value) {
struct PP_Var result = { PP_VARTYPE_DOUBLE, 0, {PP_FALSE} };
result.value.as_double = value;
return result;
}
/**
* @}
*/
#endif /* PPAPI_C_PP_VAR_H_ */
| 2,451 |
348 | {"nom":"Sarran","circ":"1ère circonscription","dpt":"Corrèze","inscrits":247,"abs":100,"votants":147,"blancs":12,"nuls":7,"exp":128,"res":[{"nuance":"REM","nom":"<NAME>","voix":82},{"nuance":"SOC","nom":"<NAME>","voix":46}]} | 90 |
4,845 | <reponame>zhangzi0291/sa-token
package com.pj.util;
/**
* 用于测试用时
* @author kong
*
*/
public class Ttime {
private long start=0; //开始时间
private long end=0; //结束时间
public static Ttime t = new Ttime(); //static快捷使用
/**
* 开始计时
* @return
*/
public Ttime start() {
start=System.currentTimeMillis();
return this;
}
/**
* 结束计时
*/
public Ttime end() {
end=System.currentTimeMillis();
return this;
}
/**
* 返回所用毫秒数
*/
public long returnMs() {
return end-start;
}
/**
* 格式化输出结果
*/
public void outTime() {
System.out.println(this.toString());
}
/**
* 结束并格式化输出结果
*/
public void endOutTime() {
this.end().outTime();
}
@Override
public String toString() {
return (returnMs() + 0.0) / 1000 + "s"; // 格式化为:0.01s
}
}
| 449 |
439 | <filename>portlet/src/main/java/org/apache/struts2/portlet/example/FormResultAction.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.struts2.portlet.example;
import java.util.Collection;
import java.util.Map;
import javax.portlet.RenderRequest;
import org.apache.struts2.portlet.context.PortletActionContext;
import com.opensymphony.xwork2.ActionSupport;
/**
*/
public class FormResultAction extends ActionSupport {
private String result = null;
public String getResult() {
return result;
}
public void setResult(String result) {
this.result = result;
}
public Collection getRenderParams() {
RenderRequest req = PortletActionContext.getRenderRequest();
Map params = req.getParameterMap();
return params.entrySet();
}
}
| 461 |
862 | <gh_stars>100-1000
/*
* (c) Copyright 2021 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.lock.client;
import com.palantir.atlasdb.timelock.api.Namespace;
import com.palantir.lock.watch.LockWatchCache;
import com.palantir.lock.watch.LockWatchCacheImpl;
import java.util.Optional;
import org.immutables.value.Value;
public final class RequestBatchersFactory {
private final LockWatchCache cache;
private final Namespace namespace;
private final Optional<MultiClientRequestBatchers> maybeRequestBatchers;
private RequestBatchersFactory(
LockWatchCache cache, Namespace namespace, Optional<MultiClientRequestBatchers> maybeRequestBatchers) {
this.cache = cache;
this.namespace = namespace;
this.maybeRequestBatchers = maybeRequestBatchers;
}
public static RequestBatchersFactory create(
LockWatchCache cache, Namespace namespace, Optional<MultiClientRequestBatchers> maybeRequestBatchers) {
return new RequestBatchersFactory(cache, namespace, maybeRequestBatchers);
}
public static RequestBatchersFactory createForTests() {
return new RequestBatchersFactory(LockWatchCacheImpl.noOp(), Namespace.of("test-client"), Optional.empty());
}
public IdentifiedAtlasDbTransactionStarter createBatchingTransactionStarter(LockLeaseService lockLeaseService) {
Optional<ReferenceTrackingWrapper<MultiClientTransactionStarter>> transactionStarter =
maybeRequestBatchers.map(MultiClientRequestBatchers::transactionStarter);
if (!transactionStarter.isPresent()) {
return BatchingIdentifiedAtlasDbTransactionStarter.create(lockLeaseService, cache);
}
ReferenceTrackingWrapper<MultiClientTransactionStarter> referenceTrackingBatcher = transactionStarter.get();
referenceTrackingBatcher.recordReference();
return new NamespacedIdentifiedTransactionStarter(
namespace, referenceTrackingBatcher, cache, new LockCleanupService(lockLeaseService));
}
public CommitTimestampGetter createBatchingCommitTimestampGetter(LockLeaseService lockLeaseService) {
Optional<ReferenceTrackingWrapper<MultiClientCommitTimestampGetter>> commitTimestampGetter =
maybeRequestBatchers.map(MultiClientRequestBatchers::commitTimestampGetter);
if (!commitTimestampGetter.isPresent()) {
return BatchingCommitTimestampGetter.create(lockLeaseService, cache);
}
ReferenceTrackingWrapper<MultiClientCommitTimestampGetter> referenceTrackingBatcher =
commitTimestampGetter.get();
referenceTrackingBatcher.recordReference();
return new NamespacedCommitTimestampGetter(cache, namespace, referenceTrackingBatcher);
}
@Value.Immutable
public interface MultiClientRequestBatchers {
@Value.Parameter
ReferenceTrackingWrapper<MultiClientCommitTimestampGetter> commitTimestampGetter();
@Value.Parameter
ReferenceTrackingWrapper<MultiClientTransactionStarter> transactionStarter();
}
}
| 1,166 |
1,337 | /*
* Copyright (c) 2008-2016 Haulmont.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.haulmont.cuba.web.gui.components;
import com.haulmont.bali.events.Subscription;
import com.haulmont.cuba.core.global.AppBeans;
import com.haulmont.cuba.gui.components.Frame;
import com.haulmont.cuba.gui.components.HBoxLayout;
import com.haulmont.cuba.gui.components.ListEditor;
import com.haulmont.cuba.gui.components.data.Options;
import com.haulmont.cuba.gui.components.listeditor.ListEditorDelegate;
import com.vaadin.ui.Component;
import com.vaadin.ui.CustomField;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.InitializingBean;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
public class WebListEditor<V> extends WebV8AbstractField<WebListEditor.CubaListEditor<V>, List<V>, List<V>>
implements ListEditor<V>, InitializingBean {
protected static final String LISTEDITOR_STYLENAME = "c-listeditor";
protected ListEditorDelegate<V> delegate;
public WebListEditor() {
initDelegate();
component = createComponent();
}
@Override
public void afterPropertiesSet() {
initComponent(component);
delegate.getLayout().setParent(this);
}
@Override
public void setFrame(Frame frame) {
super.setFrame(frame);
delegate.getLayout().setFrame(frame);
}
protected CubaListEditor<V> createComponent() {
return new CubaListEditor<>(delegate.getLayout());
}
protected void initComponent(Component component) {
component.setStyleName(LISTEDITOR_STYLENAME);
}
protected void initDelegate() {
delegate = createDelegate();
delegate.setActualField(this);
}
protected ListEditorDelegate<V> createDelegate() {
return AppBeans.get(ListEditorDelegate.NAME);
}
@Override
public String getStyleName() {
return StringUtils.normalizeSpace(super.getStyleName().replace(LISTEDITOR_STYLENAME, ""));
}
@Override
public ItemType getItemType() {
return delegate.getItemType();
}
@Override
public void setItemType(ItemType itemType) {
delegate.setItemType(itemType);
}
@Override
public boolean isUseLookupField() {
return delegate.isUseLookupField();
}
@Override
public void setUseLookupField(boolean useLookupField) {
delegate.setUseLookupField(useLookupField);
}
@Override
public String getLookupScreen() {
return delegate.getLookupScreen();
}
@Override
public void setLookupScreen(String lookupScreen) {
delegate.setLookupScreen(lookupScreen);
}
@Override
public String getEntityName() {
return delegate.getEntityName();
}
@Override
public void setEntityName(String entityName) {
delegate.setEntityName(entityName);
}
@Override
public String getEntityJoinClause() {
return delegate.getEntityJoinClause();
}
@Override
public void setEntityJoinClause(String entityJoinClause) {
delegate.setEntityJoinClause(entityJoinClause);
}
@Override
public String getEntityWhereClause() {
return delegate.getEntityWhereClause();
}
@Override
public void setEntityWhereClause(String entityWhereClause) {
delegate.setEntityWhereClause(entityWhereClause);
}
@Override
public Class<? extends Enum> getEnumClass() {
return delegate.getEnumClass();
}
@Override
public void setEnumClass(Class<? extends Enum> enumClass) {
delegate.setEnumClass(enumClass);
}
@Override
public void setClearButtonVisible(boolean visible) {
delegate.setClearButtonVisible(visible);
}
@Override
public boolean isClearButtonVisible() {
return delegate.isClearButtonVisible();
}
@Override
public void setValue(List<V> newValue) {
setValueToPresentation(convertToPresentation(newValue));
delegate.setValue(newValue);
List<V> oldValue = internalValue;
this.internalValue = newValue;
if (!Objects.equals(oldValue, newValue)) {
ValueChangeEvent event = new ValueChangeEvent<>(this, oldValue, newValue);
publish(ValueChangeEvent.class, event);
}
}
@Override
public List<V> getValue() {
return delegate.getValue();
}
@Override
public boolean isEmpty() {
return CollectionUtils.isEmpty(delegate.getValue());
}
@Override
public void focus() {
component.focus();
}
@Override
public int getTabIndex() {
return delegate.getDisplayValuesField().getTabIndex();
}
@Override
public void setTabIndex(int tabIndex) {
delegate.getDisplayValuesField().setTabIndex(tabIndex);
}
public static class CubaListEditor<V> extends CustomField<List<V>> {
private final Component content;
public CubaListEditor(HBoxLayout mainLayout) {
content = WebComponentsHelper.unwrap(mainLayout);
}
@Override
protected Component initContent() {
return content;
}
@Override
public boolean isEmpty() {
return super.isEmpty() || CollectionUtils.isEmpty(getValue());
}
@Override
protected void doSetValue(List<V> value) {
// delegated to ListEditorDelegate
}
@Override
public List<V> getValue() {
// delegated to ListEditorDelegate
return null;
}
}
@Override
protected void setEditableToComponent(boolean editable) {
delegate.setEditable(editable);
}
@Override
public boolean isEditable() {
return delegate.isEditable();
}
@Override
public void setEditorWindowId(String windowId) {
delegate.setEditorWindowId(windowId);
}
@Override
public String getEditorWindowId() {
return delegate.getEditorWindowId();
}
@Override
public Subscription addEditorCloseListener(Consumer<EditorCloseEvent> listener) {
delegate.addEditorCloseListener(listener);
return () -> removeEditorCloseListener(listener);
}
@Override
public void removeEditorCloseListener(Consumer<EditorCloseEvent> listener) {
delegate.removeEditorCloseListener(listener);
}
@Override
public void setEditorParamsSupplier(Supplier<Map<String, Object>> paramsSupplier) {
delegate.setEditorParamsSupplier(paramsSupplier);
}
@Override
public Supplier<Map<String, Object>> getEditorParamsSupplier() {
return delegate.getEditorParamsSupplier();
}
@Override
public void setTimeZone(TimeZone timeZone) {
delegate.setTimeZone(timeZone);
}
@Override
public TimeZone getTimeZone() {
return delegate.getTimeZone();
}
@Override
public void addListItemValidator(Consumer<? super V> validator) {
delegate.addListItemValidator(validator);
}
@Override
public Collection<Consumer<? super V>> getListItemValidators() {
return delegate.getListItemValidators();
}
@Override
public void setOptions(Options<V> options) {
delegate.setOptions(options);
}
@Override
public Options<V> getOptions() {
return delegate.getOptions();
}
@Override
public void setOptionCaptionProvider(Function<? super V, String> optionCaptionProvider) {
delegate.setOptionCaptionProvider(optionCaptionProvider);
}
@Override
public Function<? super V, String> getOptionCaptionProvider() {
return delegate.getOptionCaptionProvider();
}
@Override
public boolean isDisplayValuesFieldEditable() {
return delegate.isDisplayValuesFieldEditable();
}
@Override
public void setDisplayValuesFieldEditable(boolean displayValuesFieldEditable) {
delegate.setDisplayValuesFieldEditable(displayValuesFieldEditable);
}
} | 3,180 |
365 | /*******************************************************************************
* Copyright 2009-2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
#pragma once
#ifdef FFMPEG_PLUGIN
#define AUD_BUILD_PLUGIN
#endif
/**
* @file FFMPEG.h
* @ingroup plugin
* The FFMPEG class.
*/
#include "file/IFileInput.h"
#include "file/IFileOutput.h"
AUD_NAMESPACE_BEGIN
/**
* This plugin class reads and writes sounds via ffmpeg.
*/
class AUD_PLUGIN_API FFMPEG : public IFileInput, public IFileOutput
{
private:
// delete copy constructor and operator=
FFMPEG(const FFMPEG&) = delete;
FFMPEG& operator=(const FFMPEG&) = delete;
public:
/**
* Creates a new ffmpeg plugin.
*/
FFMPEG();
/**
* Registers this plugin.
*/
static void registerPlugin();
virtual std::shared_ptr<IReader> createReader(std::string filename);
virtual std::shared_ptr<IReader> createReader(std::shared_ptr<Buffer> buffer);
virtual std::shared_ptr<IWriter> createWriter(std::string filename, DeviceSpecs specs, Container format, Codec codec, unsigned int bitrate);
};
AUD_NAMESPACE_END
| 487 |
1,127 | // Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/visibility.hpp"
/**
* @file lpt_visibility.hpp
* @brief Defines visibility settings for Inference Engine LP Transformations library
*/
#ifdef OPENVINO_STATIC_LIBRARY
# define LP_TRANSFORMATIONS_API
#else
# ifdef IMPLEMENT_OPENVINO_API
# define LP_TRANSFORMATIONS_API OPENVINO_CORE_EXPORTS
# else
# define LP_TRANSFORMATIONS_API OPENVINO_CORE_IMPORTS
# endif // IMPLEMENT_OPENVINO_API
#endif // OPENVINO_STATIC_LIBRARY
| 231 |
852 | #include "EventFilter/EcalRawToDigi/interface/DCCTCCBlock.h"
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include "EventFilter/EcalRawToDigi/interface/EcalElectronicsMapper.h"
#include "EventFilter/EcalRawToDigi/interface/DCCDataUnpacker.h"
#include "EventFilter/EcalRawToDigi/interface/DCCEventBlock.h"
DCCTCCBlock::DCCTCCBlock(DCCDataUnpacker* u, EcalElectronicsMapper* m, DCCEventBlock* e, bool unpack)
: DCCDataBlockPrototype(u, m, e, unpack) {}
int DCCTCCBlock::unpack(const uint64_t** data, unsigned int* dwToEnd, short tccChId) {
dwToEnd_ = dwToEnd;
datap_ = data;
data_ = *data;
// Need at least 1 dw to findout if pseudo-strips readout is enabled
if (*dwToEnd == 1) {
if (!DCCDataUnpacker::silentMode_) {
edm::LogWarning("IncorrectEvent") << "EcalRawToDigi@SUB=DCCTCCBlock:unpack"
<< "\n Unable to unpack TCC block for event " << event_->l1A() << " in fed "
<< mapper_->getActiveDCC()
<< "\n Only 8 bytes are available until the end of event ..."
<< "\n => Skipping to next fed block...";
}
//todo : add this to error colection
return STOP_EVENT_UNPACKING;
}
blockLength_ = getLength();
if ((*dwToEnd_) < blockLength_) {
if (!DCCDataUnpacker::silentMode_) {
edm::LogWarning("IncorrectEvent") << "EcalRawToDigi@SUB=DCCTCCBlock:unpack"
<< "\n Unable to unpack TCC block for event " << event_->l1A() << " in fed "
<< mapper_->getActiveDCC() << "\n Only " << ((*dwToEnd_) * 8)
<< " bytes are available until the end of event while " << (blockLength_ * 8)
<< " are needed!"
<< "\n => Skipping to next fed block...";
}
//todo : add this to error colection
return STOP_EVENT_UNPACKING;
}
if (unpackInternalData_) {
// Go to the begining of the tcc block
data_++;
tccId_ = (*data_) & TCC_ID_MASK;
ps_ = (*data_ >> TCC_PS_B) & B_MASK;
bx_ = (*data_ >> TCC_BX_B) & TCC_BX_MASK;
l1_ = (*data_ >> TCC_L1_B) & TCC_L1_MASK;
nTTs_ = (*data_ >> TCC_TT_B) & TCC_TT_MASK;
nTSamples_ = (*data_ >> TCC_TS_B) & TCC_TS_MASK;
event_->setTCCSyncNumbers(l1_, bx_, tccChId);
if (!checkTccIdAndNumbTTs()) {
updateEventPointers();
return SKIP_BLOCK_UNPACKING;
}
// Check synchronization
if (sync_) {
const unsigned int dccBx = (event_->bx()) & TCC_BX_MASK;
const unsigned int dccL1 = (event_->l1A()) & TCC_L1_MASK;
const unsigned int fov = (event_->fov()) & H_FOV_MASK;
if (!isSynced(dccBx, bx_, dccL1, l1_, TCC_SRP, fov)) {
if (!DCCDataUnpacker::silentMode_) {
edm::LogWarning("IncorrectBlock")
<< "Synchronization error for TCC block"
<< " (L1A " << event_->l1A() << " bx " << event_->bx() << " fed " << mapper_->getActiveDCC() << ")\n"
<< " dccBx = " << dccBx << " bx_ = " << bx_ << " dccL1 = " << dccL1 << " l1_ = " << l1_ << "\n"
<< " => TCC block skipped";
}
//Note : add to error collection ?
updateEventPointers();
return SKIP_BLOCK_UNPACKING;
}
}
//check numb of samples
/*
unsigned int expTriggerTSamples(mapper_->numbTriggerTSamples());
if( nTSamples_ != expTriggerTSamples ){
edm::LogWarning("IncorrectBlock")
<<"Unable to unpack TCC block for event "<<event_->l1A()<<" in fed "<<mapper_->getActiveDCC()
<<"\n Number of time samples is "<<nTSamples_<<" while "<<expTriggerTSamples<<" is expected"
<<"\n TCC block skipped..."<<endl;
//Note : add to error collection ?
updateEventPointers();
return SKIP_BLOCK_UNPACKING;
}
*/
// debugging
// display(cout);
addTriggerPrimitivesToCollection();
}
updateEventPointers();
return BLOCK_UNPACKED;
}
void DCCTCCBlock::display(std::ostream& o) {
o << "\n Unpacked Info for DCC TCC Block"
<< "\n DW1 ============================="
<< "\n TCC Id " << tccId_ << "\n Bx " << bx_ << "\n L1 " << l1_ << "\n Numb TT " << nTTs_ << "\n Numb Samp "
<< nTSamples_;
}
| 2,143 |
190,993 | /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
extern "C" {
int JustReturnZero(int argc, char** argv) { return 0; }
int ReturnOne(int argc, char** argv) { return 1; }
int ReturnSuccess(int argc, char** argv) {
return ::tflite::acceleration::kMinibenchmarkSuccess;
}
int SigKill(int argc, char** argv) {
kill(getpid(), SIGKILL);
return 1;
}
int WriteOk(int argc, char** argv) {
write(1, "ok\n", 3);
return ::tflite::acceleration::kMinibenchmarkSuccess;
}
int Write10kChars(int argc, char** argv) {
char buffer[10000];
memset(buffer, 'A', 10000);
return write(1, buffer, 10000) == 10000
? ::tflite::acceleration::kMinibenchmarkSuccess
: 1;
}
int WriteArgs(int argc, char** argv) {
for (int i = 3; i < argc; i++) {
write(1, argv[i], strlen(argv[i]));
write(1, "\n", 1);
}
return ::tflite::acceleration::kMinibenchmarkSuccess;
}
} // extern "C"
| 597 |
348 | <reponame>tulumvinh/Raigad
/**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.InstanceManager;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.raigad.utils.ElasticsearchUtils;
import com.netflix.raigad.utils.HttpModule;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.unit.TimeValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class HealthMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(HealthMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_HealthMonitor";
private final Elasticsearch_HealthReporter healthReporter;
private final InstanceManager instanceManager;
private static TimeValue MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(60);
private final DiscoveryClient discoveryClient;
private final HttpModule httpModule;
@Inject
public HealthMonitor(IConfiguration config, InstanceManager instanceManager, HttpModule httpModule) {
super(config);
this.instanceManager = instanceManager;
this.httpModule = httpModule;
healthReporter = new Elasticsearch_HealthReporter();
discoveryClient = DiscoveryManager.getInstance().getDiscoveryClient();
Monitors.registerObject(healthReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not running, check back again later";
logger.info(exceptionMsg);
return;
}
// In case we configured only the master node to report metrics and this node is not a master - bail out
if (config.reportMetricsFromMasterOnly() && !ElasticsearchUtils.amIMasterNode(config, httpModule)) {
return;
}
HealthBean healthBean = new HealthBean();
try {
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
ClusterHealthStatus clusterHealthStatus = esTransportClient.admin().cluster().prepareHealth().setTimeout(MASTER_NODE_TIMEOUT).execute().get().getStatus();
ClusterHealthResponse clusterHealthResponse = esTransportClient.admin().cluster().prepareHealth().execute().actionGet(MASTER_NODE_TIMEOUT);
if (clusterHealthStatus == null) {
logger.info("ClusterHealthStatus is null, hence returning (no health).");
resetHealthStats(healthBean);
return;
}
//Check if status = GREEN, YELLOW or RED
if (clusterHealthStatus.name().equalsIgnoreCase("GREEN")) {
healthBean.greenorredstatus = 0;
healthBean.greenoryellowstatus = 0;
} else if (clusterHealthStatus.name().equalsIgnoreCase("YELLOW")) {
healthBean.greenoryellowstatus = 1;
healthBean.greenorredstatus = 0;
} else if (clusterHealthStatus.name().equalsIgnoreCase("RED")) {
healthBean.greenorredstatus = 1;
healthBean.greenoryellowstatus = 0;
}
if (config.isNodeMismatchWithDiscoveryEnabled()) {
// Check if there is node mismatch between discovery and ES
healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == instanceManager.getAllInstances().size()) ? 0 : 1;
} else {
healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == config.getDesiredNumberOfNodesInCluster()) ? 0 : 1;
}
if (config.isEurekaHealthCheckEnabled()) {
healthBean.eurekanodematch = (clusterHealthResponse.getNumberOfNodes() == discoveryClient.getApplication(config.getAppName()).getInstances().size()) ? 0 : 1;
}
} catch (Exception e) {
resetHealthStats(healthBean);
logger.warn("Failed to load cluster health status", e);
}
healthReporter.healthBean.set(healthBean);
}
public class Elasticsearch_HealthReporter {
private final AtomicReference<HealthBean> healthBean;
public Elasticsearch_HealthReporter() {
healthBean = new AtomicReference<HealthBean>(new HealthBean());
}
@Monitor(name = "es_healthstatus_greenorred", type = DataSourceType.GAUGE)
public int getEsHealthstatusGreenorred() {
return healthBean.get().greenorredstatus;
}
@Monitor(name = "es_healthstatus_greenoryellow", type = DataSourceType.GAUGE)
public int getEsHealthstatusGreenoryellow() {
return healthBean.get().greenoryellowstatus;
}
@Monitor(name = "es_nodematchstatus", type = DataSourceType.GAUGE)
public int getEsNodematchstatus() {
return healthBean.get().nodematch;
}
@Monitor(name = "es_eurekanodematchstatus", type = DataSourceType.GAUGE)
public int getEsEurekanodematchstatus() {
return healthBean.get().eurekanodematch;
}
}
private static class HealthBean {
private int greenorredstatus = -1;
private int greenoryellowstatus = -1;
private int nodematch = -1;
private int eurekanodematch = -1;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
private void resetHealthStats(HealthBean healthBean) {
healthBean.greenorredstatus = -1;
healthBean.greenoryellowstatus = -1;
healthBean.nodematch = -1;
healthBean.eurekanodematch = -1;
}
}
| 2,705 |
435 | <reponame>amaajemyfren/data<gh_stars>100-1000
{
"copyright_text": "Standard YouTube License",
"description": "www.pydata.org\n\nPyData is an educational program of NumFOCUS, a 501(c)3 non-profit organization in the United States. PyData provides a forum for the international community of users and developers of data analysis tools to share ideas and learn from each other. The global PyData network promotes discussion of best practices, new approaches, and emerging technologies for data management, processing, analytics, and visualization. PyData communities approach data science using many languages, including (but not limited to) Python, Julia, and R.\n\nPyData conferences aim to be accessible and community-driven, with novice to advanced level presentations. PyData tutorials and talks bring attendees the latest project features along with cutting-edge use cases.",
"duration": 2357,
"language": "eng",
"recorded": "2017-06-30",
"related_urls": [
{
"label": "schedule",
"url": "https://pydata.org/berlin2017/schedule/"
},
{
"label": "PEP 8",
"url": "https://www.python.org/dev/peps/pep-0008/"
},
{
"label": "Google Python Style Guide",
"url": "https://google.github.io/styleguide/pyguide.html"
},
{
"label": "pytest-ipynb",
"url": "https://pypi.python.org/pypi/pytest-ipynb/"
}
],
"speakers": [
"Volodymyr (<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/2QLgf2YLlus/maxresdefault.jpg",
"title": "Clean Code in Jupyter notebooks, using Python",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=2QLgf2YLlus"
}
]
}
| 596 |
2,118 | // Copyright (c) 2006-2018 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H
#define CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H
#include <intrin.h>
#pragma intrinsic(_ReadWriteBarrier)
#pragma intrinsic(_ReadBarrier)
#pragma intrinsic(_WriteBarrier)
#define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier()
#define CDS_COMPILER_R_BARRIER _ReadBarrier()
#define CDS_COMPILER_W_BARRIER _WriteBarrier()
#endif // #ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H
| 261 |
457 | package io.agora.vlive.protocol.model.request;
public class OssPolicyRequest extends Request {
public static final int OSS_TYPE_AVATOR = 1;
public OssPolicyRequest(String token) {
this.token = token;
}
public String token;
public int type = OSS_TYPE_AVATOR;
}
| 103 |
14,668 | <reponame>zealoussnow/chromium
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_LAYOUT_SHIFT_REGION_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_LAYOUT_SHIFT_REGION_H_
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "ui/gfx/geometry/rect.h"
namespace blink {
// Represents a per-frame layout shift region for LayoutShiftTracker.
//
// This class uses a sweep line algorithm to compute the area in O(n log n) time
// where n is the number of rects recorded by AddRect. For complex layout shift
// regions, this is more efficient than using blink::Region, which is worst-case
// O(n^2) from the repeated calls to Region::Unite.
//
// The high-level approach is described here:
// http://jeffe.cs.illinois.edu/open/klee.html
//
// The sweep line moves from left to right. (TODO: compare performance against a
// top-to-bottom sweep.)
//
// The sweep line's current intersection with the layout shift region ("active
// length") is tracked by a segment tree, similar to what is described at:
// https://en.wikipedia.org/wiki/Segment_tree
//
// There are some subtleties to the segment tree, which are described by the
// comments in the implementation.
class CORE_EXPORT LayoutShiftRegion {
DISALLOW_NEW();
public:
void AddRect(const gfx::Rect& rect) {
if (!rect.IsEmpty())
rects_.push_back(rect);
}
const Vector<gfx::Rect>& GetRects() const { return rects_; }
bool IsEmpty() const { return rects_.IsEmpty(); }
void Reset() { rects_.clear(); }
uint64_t Area() const;
private:
Vector<gfx::Rect> rects_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_LAYOUT_SHIFT_REGION_H_
| 655 |
317 | // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/log_memory.proto
package org.tensorflow.framework;
public interface MemoryLogTensorAllocationOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.MemoryLogTensorAllocation)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Process-unique step id.
* </pre>
*
* <code>optional int64 step_id = 1;</code>
*/
long getStepId();
/**
* <pre>
* Name of the kernel making the allocation as set in GraphDef,
* e.g., "affine2/weights/Assign".
* </pre>
*
* <code>optional string kernel_name = 2;</code>
*/
java.lang.String getKernelName();
/**
* <pre>
* Name of the kernel making the allocation as set in GraphDef,
* e.g., "affine2/weights/Assign".
* </pre>
*
* <code>optional string kernel_name = 2;</code>
*/
com.google.protobuf.ByteString
getKernelNameBytes();
/**
* <pre>
* Allocated tensor details.
* </pre>
*
* <code>optional .tensorflow.TensorDescription tensor = 3;</code>
*/
boolean hasTensor();
/**
* <pre>
* Allocated tensor details.
* </pre>
*
* <code>optional .tensorflow.TensorDescription tensor = 3;</code>
*/
org.tensorflow.framework.TensorDescription getTensor();
/**
* <pre>
* Allocated tensor details.
* </pre>
*
* <code>optional .tensorflow.TensorDescription tensor = 3;</code>
*/
org.tensorflow.framework.TensorDescriptionOrBuilder getTensorOrBuilder();
}
| 580 |
322 | <filename>eagle-jpm/eagle-jpm-spark-history/src/main/java/org/apache/eagle/jpm/spark/history/SparkHistoryJobAppConfig.java
/*
*
* * Licensed to the Apache Software Foundation (ASF) under one or more
* * contributor license agreements. See the NOTICE file distributed with
* * this work for additional information regarding copyright ownership.
* * The ASF licenses this file to You under the Apache License, Version 2.0
* * (the "License"); you may not use this file except in compliance with
* * the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.apache.eagle.jpm.spark.history;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import org.apache.eagle.service.client.impl.EagleServiceBaseClient;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
public class SparkHistoryJobAppConfig implements Serializable {
static final String SPARK_HISTORY_JOB_FETCH_SPOUT_NAME = "sparkHistoryJobFetchSpout";
static final String SPARK_HISTORY_JOB_PARSE_BOLT_NAME = "sparkHistoryJobParseBolt";
static final String DEFAULT_SPARK_JOB_HISTORY_ZOOKEEPER_ROOT = "/apps/spark/history";
public ZKStateConfig zkStateConfig;
public JobHistoryEndpointConfig jobHistoryConfig;
public EagleInfo eagleInfo;
public StormConfig stormConfig;
private Config config;
public Config getConfig() {
return config;
}
private SparkHistoryJobAppConfig(Config config) {
this.zkStateConfig = new ZKStateConfig();
this.jobHistoryConfig = new JobHistoryEndpointConfig();
this.jobHistoryConfig.hdfs = new HashMap<>();
this.eagleInfo = new EagleInfo();
this.stormConfig = new StormConfig();
init(config);
}
public static SparkHistoryJobAppConfig newInstance(Config config) {
return new SparkHistoryJobAppConfig(config);
}
private void init(Config config) {
this.config = config;
this.zkStateConfig.zkQuorum = config.getString("zookeeper.zkQuorum");
this.zkStateConfig.zkRetryInterval = config.getInt("zookeeper.zkRetryInterval");
this.zkStateConfig.zkRetryTimes = config.getInt("zookeeper.zkRetryTimes");
this.zkStateConfig.zkSessionTimeoutMs = config.getInt("zookeeper.zkSessionTimeoutMs");
this.zkStateConfig.zkRoot = DEFAULT_SPARK_JOB_HISTORY_ZOOKEEPER_ROOT;
if (config.hasPath("zookeeper.zkRoot")) {
this.zkStateConfig.zkRoot = config.getString("zookeeper.zkRoot");
}
jobHistoryConfig.rms = config.getString("dataSourceConfig.rm.url").split(",\\s*");
jobHistoryConfig.baseDir = config.getString("dataSourceConfig.hdfs.baseDir");
for (Map.Entry<String, ConfigValue> entry : config.getConfig("dataSourceConfig.hdfs").entrySet()) {
this.jobHistoryConfig.hdfs.put(entry.getKey(), entry.getValue().unwrapped().toString());
}
this.eagleInfo.host = config.getString("service.host");
this.eagleInfo.port = config.getInt("service.port");
this.eagleInfo.username = config.getString("service.username");
this.eagleInfo.password = config.getString("service.password");
this.eagleInfo.timeout = 2;
if (config.hasPath("service.readTimeOutSeconds")) {
this.eagleInfo.timeout = config.getInt("service.readTimeOutSeconds");
}
this.eagleInfo.basePath = EagleServiceBaseClient.DEFAULT_BASE_PATH;
if (config.hasPath("service.basePath")) {
this.eagleInfo.basePath = config.getString("service.basePath");
}
this.eagleInfo.flushLimit = 500;
if (config.hasPath("service.flushLimit")) {
this.eagleInfo.flushLimit = config.getInt("service.flushLimit");
}
this.stormConfig.siteId = config.getString("siteId");
this.stormConfig.spoutCrawlInterval = config.getInt("topology.spoutCrawlInterval");
this.stormConfig.numOfSpoutExecutors = config.getInt("topology.numOfSpoutExecutors");
this.stormConfig.numOfSpoutTasks = config.getInt("topology.numOfSpoutTasks");
this.stormConfig.numOfParserBoltExecutors = config.getInt("topology.numOfParseBoltExecutors");
this.stormConfig.numOfParserBoltTasks = config.getInt("topology.numOfParserBoltTasks");
this.stormConfig.requestLimit = "";
if (config.hasPath("topology.requestLimit")) {
this.stormConfig.requestLimit = config.getString("topology.requestLimit");
}
}
public static class ZKStateConfig implements Serializable {
public String zkQuorum;
public String zkRoot;
public int zkSessionTimeoutMs;
public int zkRetryTimes;
public int zkRetryInterval;
}
public static class JobHistoryEndpointConfig implements Serializable {
public String[] rms;
public String baseDir;
public Map<String, String> hdfs;
}
public static class StormConfig implements Serializable {
public String siteId;
public int spoutCrawlInterval;
public String requestLimit;
public int numOfSpoutExecutors;
public int numOfSpoutTasks;
public int numOfParserBoltExecutors;
public int numOfParserBoltTasks;
}
public static class EagleInfo implements Serializable {
public String host;
public int port;
public String username;
public String password;
public String basePath;
public int timeout;
public int flushLimit;
}
}
| 2,221 |
7,956 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © 2018- Spyder Kernels Contributors
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the console kernel.
"""
# Standard library imports
import os
# Test imports
import pytest
# Local imports
from spyder_kernels.utils.test_utils import get_kernel
from spyder_kernels.comms.frontendcomm import FrontendComm
from spyder.plugins.ipythonconsole.comms.kernelcomm import KernelComm
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def kernel(request):
"""Console kernel fixture"""
# Get kernel instance
kernel = get_kernel()
kernel.namespace_view_settings = {
'check_all': False,
'exclude_private': True,
'exclude_uppercase': True,
'exclude_capitalized': False,
'exclude_unsupported': False,
'exclude_callables_and_modules': True,
'excluded_names': [
'nan',
'inf',
'infty',
'little_endian',
'colorbar_doc',
'typecodes',
'__builtins__',
'__main__',
'__doc__',
'NaN',
'Inf',
'Infinity',
'sctypes',
'rcParams',
'rcParamsDefault',
'sctypeNA',
'typeNA',
'False_',
'True_'
],
'minmax': False}
# Teardown
def reset_kernel():
kernel.do_execute('reset -f', True)
request.addfinalizer(reset_kernel)
return kernel
class dummyComm():
def __init__(self):
self.other = None
self.message_callback = None
self.close_callback = None
self.comm_id = 1
def close(self):
self.other.close_callback({'content': {'comm_id': self.comm_id}})
def send(self, msg_dict, buffers=None):
msg = {
'buffers': buffers,
'content': {'data': msg_dict, 'comm_id': self.comm_id},
}
self.other.message_callback(msg)
def _send_msg(self, *args, **kwargs):
pass
def on_msg(self, callback):
self.message_callback = callback
def on_close(self, callback):
self.close_callback = callback
@pytest.fixture
def comms(kernel):
"""Get the comms"""
commA = dummyComm()
commB = dummyComm()
commA.other = commB
commB.other = commA
frontend_comm = FrontendComm(kernel)
kernel_comm = KernelComm()
def dummy_set_comm_port(port):
"""There is no port to set."""
pass
kernel_comm.register_call_handler('_set_comm_port', dummy_set_comm_port)
class DummyKernelClient():
comm_channel = 0
shell_channel = 0
kernel_comm.kernel_client = DummyKernelClient()
kernel_comm._register_comm(commA)
# Bypass the target system as this is not what is being tested
frontend_comm._comm_open(commB,
{'content': {'data': {'pickle_protocol': 2}}})
return (kernel_comm, frontend_comm)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.skipif(os.name == 'nt', reason="Hangs on Windows")
def test_comm_base(comms):
"""Test basic message exchange."""
commsend, commrecv = comms
assert commsend.is_open()
assert commrecv.is_open()
received_messages = []
def handler(msg_dict, buffer):
received_messages.append((msg_dict, buffer))
# Register callback
commrecv._register_message_handler('test_message', handler)
# Send a message
commsend._send_message('test_message', content='content', data='data')
assert len(received_messages) == 1
assert received_messages[0][0]['spyder_msg_type'] == 'test_message'
assert received_messages[0][0]['content'] == 'content'
assert received_messages[0][1] == 'data'
# Send another message
commsend._send_message('test_message', content='content', data='data')
assert len(received_messages) == 2
# Unregister callback
commrecv._register_message_handler('test_message', None)
# Send another unhandled message
commsend._send_message('test_message', content='content', data='data')
assert len(received_messages) == 2
@pytest.mark.skipif(os.name == 'nt', reason="Hangs on Windows")
def test_request(comms):
"""Test if the requests are being replied to."""
kernel_comm, frontend_comm = comms
def handler(a, b):
return a + b
kernel_comm.register_call_handler('test_request', handler)
res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')
assert res == 'ab'
if __name__ == "__main__":
pytest.main()
| 1,941 |
359 | from learntools.core import *
class YearPlot(ThoughtExperiment):
# TODO: *could* make this a checked coding problem, checking the
# value of c. I'm not sure that really helps though? I think it'll be
# clear to the user by visual inspection of their results whether or not
# they've succeeded. (And checking c could lead to false -ves)
_var = 'c'
show_solution_on_correct = False
# TODO: Some commentary on specific patterns of dist. of age?
_solution = (
"""`c = df.year`
The distribution of year of release does seem to follow some distinct gradients, but the pattern is not global.
Using a colormap that passes through several hues (such as 'brg', or 'cubehelix') can make it easier to identify regions associated with specific eras:
```python
pts = ax.scatter(df.x, df.y, c=c, cmap='cubehelix')
```
Using a qualitative colormap (such as 'Set2') exaggerates this effect even further.
Simple sequential colormaps that use only one or two colors (e.g. 'Oranges', 'YlGn') are more effective at showing overall patterns from old to new.
""")
class MeanRatingPlot(ThoughtExperiment):
_hint = 'Our dataframe `df` has a column called "mean_rating" which will be useful here.'
show_solution_on_correct = False
# Note on relationship between this trend and the year trend we saw previously?
# Was trend of lots of very old movies in top-right just a manifestation
# of our tendency to put highly-rated movies at the right?
_solution = (
"""
```python
fig, ax = plt.subplots(figsize=FS)
c = df.mean_rating
pts = ax.scatter(df.x, df.y, c=c)
cbar = fig.colorbar(pts)
```
Unlike with year of release, there seems to be a clear global pattern here: average rating tends to increase moving from left to right.
""")
class NRatingsPlot(ThoughtExperiment):
# TODO: Mention alternatives like PowerNorm?
_solution = CS(
"""fig, ax = plt.subplots(figsize=FS)
c = df.n_ratings
pts = ax.scatter(df.x, df.y, c=c, norm=mpl.colors.LogNorm())
cbar = fig.colorbar(pts)
""")
qvars = bind_exercises(globals(), [
YearPlot,
MeanRatingPlot,
NRatingsPlot,
],
var_format='part{n}',
)
__all__ = list(qvars)
| 739 |
1,687 | <reponame>agnes-yang/LeetCode-Solutions-in-Good-Style
import java.util.Arrays;
public class Solution3 {
public int minIncrementForUnique(int[] A) {
int len = A.length;
if (len == 0) {
return 0;
}
UnionFind unionFind = new UnionFind();
int res = 0;
for (int num : A) {
if (unionFind.contains(num)) {
int root = unionFind.find(num);
int add = root + 1;
res += (add - num);
unionFind.init(add);
} else {
unionFind.init(num);
}
}
return res;
}
private class UnionFind {
/**
* 代表元法,元素指向父亲结点
*/
private int[] parent;
public void init(int x) {
// 初始化的时候,就得左边看一眼,右边看一眼
parent[x] = x;
if (x > 0 && parent[x - 1] != -1) {
union(x, x - 1);
}
if (parent[x + 1] != -1) {
union(x, x + 1);
}
}
public boolean contains(int x) {
return parent[x] != -1;
}
public UnionFind() {
// 最坏情况下,题目给出的数值都是 40000,
// 依次排下去,排到 79999
this.parent = new int[79999];
// 应初始化成为 -1,表示这个元素还没放进并查集
Arrays.fill(parent, -1);
}
/**
* 返回代表元结点
*
* @param x
* @return 针对这道题,代表元选所有元素中最大的那个
*/
public int find(int x) {
while (x != parent[x]) {
// 只要自己的父亲结点不是自己,就说明不是根结点,继续往上找 x = parent[x];
// 这句是路径压缩,并查集的优化,不加也行
// parent[x] = parent[parent[x]]; 把 x 的父结点指向父亲结点的父亲结点
parent[x] = parent[parent[x]];
x = parent[x];
}
return x;
}
public void union(int x, int y) {
int rootX = find(x);
int rootY = find(y);
// 注意:根据这个问题的特点
// 只能把小的结点指向大的结点
if (rootX < rootY) {
parent[rootX] = rootY;
}
if (rootY < rootX) {
parent[rootY] = rootX;
}
}
}
} | 1,658 |
2,816 | import duckdb
import pytest
try:
import pyarrow as pa
can_run = True
except:
can_run = False
class TestArrowFetchChunk(object):
def test_fetch_arrow_chunk(self, duckdb_cursor):
if not can_run:
return
duckdb_cursor.execute("CREATE table t as select range a from range(3000);")
query = duckdb_cursor.execute("SELECT a FROM t")
cur_chunk = query.fetch_arrow_chunk(return_table=True)
assert(len(cur_chunk) == 1024)
cur_chunk = query.fetch_arrow_chunk(return_table=True)
assert(len(cur_chunk) == 1024)
cur_chunk = query.fetch_arrow_chunk(return_table=True)
assert(len(cur_chunk) == 952)
def test_arrow_empty(self,duckdb_cursor):
if not can_run:
return
duckdb_cursor.execute("CREATE table t as select range a from range(3000);")
query = duckdb_cursor.execute("SELECT a FROM t")
cur_chunk = query.fetch_arrow_chunk(return_table=True)
cur_chunk = query.fetch_arrow_chunk(return_table=True)
cur_chunk = query.fetch_arrow_chunk(return_table=True)
#Should be empty by now
try:
cur_chunk = query.fetch_arrow_chunk(return_table=True)
print(cur_chunk)
except Exception as err:
print(err)
#Should be empty by now
try:
cur_chunk = query.fetch_arrow_chunk(return_table=True)
print(cur_chunk)
except Exception as err:
print(err)
def test_fetch_arrow_chunk_parameter(self, duckdb_cursor):
if not can_run:
return
duckdb_cursor.execute("CREATE table t as select range a from range(10000);")
query = duckdb_cursor.execute("SELECT a FROM t")
# Return 2 vectors
cur_chunk = query.fetch_arrow_chunk(2,True)
assert(len(cur_chunk) == 2048)
# Return Default 1 vector
cur_chunk = query.fetch_arrow_chunk(return_table=True)
assert(len(cur_chunk) == 1024)
# Return 3 vectors
cur_chunk = query.fetch_arrow_chunk(3,True)
assert(len(cur_chunk) == 3072)
# Return 0 vectors
cur_chunk = query.fetch_arrow_chunk(0,True)
assert(len(cur_chunk) == 0)
# Return 1 vector
cur_chunk = query.fetch_arrow_chunk(1,True)
assert(len(cur_chunk) == 1024)
# Return more vectors than we have remaining
cur_chunk = query.fetch_arrow_chunk(100,True)
assert(len(cur_chunk) == 2832)
# This should return 0
cur_chunk = query.fetch_arrow_chunk(100,True)
assert(len(cur_chunk) == 0)
def test_fetch_arrow_chunk_negative_parameter(self, duckdb_cursor):
if not can_run:
return
duckdb_cursor.execute("CREATE table t as select range a from range(100);")
query = duckdb_cursor.execute("SELECT a FROM t")
# Return -1 vector should not work
with pytest.raises(Exception):
cur_chunk = query.fetch_arrow_chunk(-1,True)
def test_fetch_record_batches(self,duckdb_cursor):
if not can_run:
return
duckdb_cursor.execute("CREATE table t as select range a from range(3000);")
query = duckdb_cursor.execute("SELECT a FROM t")
record_batch_list = query.fetch_arrow_chunk()
assert(len(record_batch_list) == 1)
assert(len(record_batch_list[0]) == 1024)
record_batch_list = query.fetch_arrow_chunk(2)
assert(len(record_batch_list) == 2)
assert(len(record_batch_list[0]) == 1024)
assert(len(record_batch_list[1]) == 952) | 1,749 |
1,747 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import sys
import tempfile
import xml.etree.ElementTree as ET
from os.path import join
from PIL import Image, ImageChops, ImageDraw
from . import common
class VerifyError(Exception):
pass
class Recorder:
def __init__(self, input, output, failure_output):
self._input = input
self._output = output
self._realoutput = output
self._failure_output = failure_output
def _get_image_size(self, file_name):
with Image.open(file_name) as im:
return im.size
def _copy(self, name, w, h):
tilewidth, tileheight = self._get_image_size(
join(self._input, common.get_image_file_name(name, 0, 0))
)
canvaswidth = 0
for i in range(w):
input_file = common.get_image_file_name(name, i, 0)
canvaswidth += self._get_image_size(join(self._input, input_file))[0]
canvasheight = 0
for j in range(h):
input_file = common.get_image_file_name(name, 0, j)
canvasheight += self._get_image_size(join(self._input, input_file))[1]
im = Image.new("RGBA", (canvaswidth, canvasheight))
for i in range(w):
for j in range(h):
input_file = common.get_image_file_name(name, i, j)
with Image.open(join(self._input, input_file)) as input_image:
im.paste(input_image, (i * tilewidth, j * tileheight))
input_image.close()
im.save(join(self._output, name + ".png"))
im.close()
def _get_metadata_json(self):
file = open(join(self._input, "metadata.json"), "r")
return json.load(file)
def _record(self):
metadata = self._get_metadata_json()
for screenshot in metadata:
self._copy(
screenshot["name"],
int(screenshot["tileWidth"]),
int(screenshot["tileHeight"]),
)
def _clean(self):
if os.path.exists(self._output):
shutil.rmtree(self._output)
os.makedirs(self._output)
def _is_image_same(self, file1, file2, failure_file):
with Image.open(file1) as im1, Image.open(file2) as im2:
diff_image = ImageChops.difference(im1, im2)
try:
diff = diff_image.getbbox()
if diff is None:
return True
else:
if failure_file:
diff_list = list(diff) if diff else []
draw = ImageDraw.Draw(im2)
draw.rectangle(diff_list, outline=(255, 0, 0))
im2.save(failure_file)
return False
finally:
diff_image.close()
def record(self):
self._clean()
self._record()
def verify(self):
self._output = tempfile.mkdtemp()
self._record()
screenshots = self._get_metadata_json()
failures = []
for screenshot in screenshots:
name = screenshot["name"] + ".png"
actual = join(self._output, name)
expected = join(self._realoutput, name)
if self._failure_output:
diff_name = screenshot["name"] + "_diff.png"
diff = join(self._failure_output, diff_name)
if not self._is_image_same(expected, actual, diff):
expected_name = screenshot["name"] + "_expected.png"
actual_name = screenshot["name"] + "_actual.png"
shutil.copy(actual, join(self._failure_output, actual_name))
shutil.copy(expected, join(self._failure_output, expected_name))
failures.append((expected, actual))
else:
if not self._is_image_same(expected, actual, None):
raise VerifyError("Image %s is not same as %s" % (expected, actual))
if failures:
reason = ""
for expected, actual in failures:
reason = reason + "\nImage %s is not same as %s" % (expected, actual)
raise VerifyError(reason)
shutil.rmtree(self._output)
| 2,230 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-hc32-g7rr-m838",
"modified": "2022-05-01T01:58:36Z",
"published": "2022-05-01T01:58:36Z",
"aliases": [
"CVE-2005-1422"
],
"details": "Raysoft/Raybase Video Cam Server 1.0.0 beta allows remote attackers to conduct administrator operations and cause a denial of service (server or camera shutdown) via a direct request to admin.html.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2005-1422"
},
{
"type": "WEB",
"url": "http://securitytracker.com/id?1013860"
},
{
"type": "WEB",
"url": "http://www.autistici.org/fdonato/advisory/VideoCamServer1.0.0-adv.txt"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 394 |
9,724 | /*
Copyright (c) 2003-2006 <NAME> / <NAME> http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_VECTOR3_H
#define BT_VECTOR3_H
#include "btScalar.h"
#include "btMinMax.h"
#ifdef BT_USE_DOUBLE_PRECISION
#define btVector3Data btVector3DoubleData
#define btVector3DataName "btVector3DoubleData"
#else
#define btVector3Data btVector3FloatData
#define btVector3DataName "btVector3FloatData"
#endif //BT_USE_DOUBLE_PRECISION
/**@brief btVector3 can be used to represent 3D points and vectors.
* It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
* Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
*/
ATTRIBUTE_ALIGNED16(class) btVector3
{
public:
#if defined (__SPU__) && defined (__CELLOS_LV2__)
btScalar m_floats[4];
public:
SIMD_FORCE_INLINE const vec_float4& get128() const
{
return *((const vec_float4*)&m_floats[0]);
}
public:
#else //__CELLOS_LV2__ __SPU__
#ifdef BT_USE_SSE // _WIN32
union {
__m128 mVec128;
btScalar m_floats[4];
};
SIMD_FORCE_INLINE __m128 get128() const
{
return mVec128;
}
SIMD_FORCE_INLINE void set128(__m128 v128)
{
mVec128 = v128;
}
#else
btScalar m_floats[4];
#endif
#endif //__CELLOS_LV2__ __SPU__
public:
/**@brief No initialization constructor */
SIMD_FORCE_INLINE btVector3() {}
/**@brief Constructor from scalars
* @param x X value
* @param y Y value
* @param z Z value
*/
SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = btScalar(0.);
}
/**@brief Add a vector to this one
* @param The vector to add to this one */
SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
{
m_floats[0] += v.m_floats[0]; m_floats[1] += v.m_floats[1];m_floats[2] += v.m_floats[2];
return *this;
}
/**@brief Subtract a vector from this one
* @param The vector to subtract */
SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
{
m_floats[0] -= v.m_floats[0]; m_floats[1] -= v.m_floats[1];m_floats[2] -= v.m_floats[2];
return *this;
}
/**@brief Scale the vector
* @param s Scale factor */
SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
{
m_floats[0] *= s; m_floats[1] *= s;m_floats[2] *= s;
return *this;
}
/**@brief Inversely scale the vector
* @param s Scale factor to divide by */
SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return *this *= btScalar(1.0) / s;
}
/**@brief Return the dot product
* @param v The other vector in the dot product */
SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
{
return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] +m_floats[2] * v.m_floats[2];
}
/**@brief Return the length of the vector squared */
SIMD_FORCE_INLINE btScalar length2() const
{
return dot(*this);
}
/**@brief Return the length of the vector */
SIMD_FORCE_INLINE btScalar length() const
{
return btSqrt(length2());
}
/**@brief Return the distance squared between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const;
/**@brief Return the distance between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
SIMD_FORCE_INLINE btVector3& safeNormalize()
{
btVector3 absVec = this->absolute();
int maxIndex = absVec.maxAxis();
if (absVec[maxIndex]>0)
{
*this /= absVec[maxIndex];
return *this /= length();
}
setValue(1,0,0);
return *this;
}
/**@brief Normalize this vector
* x^2 + y^2 + z^2 = 1 */
SIMD_FORCE_INLINE btVector3& normalize()
{
return *this /= length();
}
/**@brief Return a normalized version of this vector */
SIMD_FORCE_INLINE btVector3 normalized() const;
/**@brief Return a rotated version of this vector
* @param wAxis The axis to rotate about
* @param angle The angle to rotate by */
SIMD_FORCE_INLINE btVector3 rotate( const btVector3& wAxis, const btScalar angle ) const;
/**@brief Return the angle between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
{
btScalar s = btSqrt(length2() * v.length2());
btFullAssert(s != btScalar(0.0));
return btAcos(dot(v) / s);
}
/**@brief Return a vector will the absolute values of each element */
SIMD_FORCE_INLINE btVector3 absolute() const
{
return btVector3(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]));
}
/**@brief Return the cross product between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
{
return btVector3(
m_floats[1] * v.m_floats[2] -m_floats[2] * v.m_floats[1],
m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
}
SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
{
return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) +
m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) +
m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
}
/**@brief Return the axis with the smallest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int minAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[0] <m_floats[2] ? 0 : 2) : (m_floats[1] <m_floats[2] ? 1 : 2);
}
/**@brief Return the axis with the largest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int maxAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[1] <m_floats[2] ? 2 : 1) : (m_floats[0] <m_floats[2] ? 2 : 0);
}
SIMD_FORCE_INLINE int furthestAxis() const
{
return absolute().minAxis();
}
SIMD_FORCE_INLINE int closestAxis() const
{
return absolute().maxAxis();
}
SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt)
{
btScalar s = btScalar(1.0) - rt;
m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0];
m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1];
m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2];
//don't do the unused w component
// m_co[3] = s * v0[3] + rt * v1[3];
}
/**@brief Return the linear interpolation between this and another vector
* @param v The other vector
* @param t The ration of this to v (t = 0 => return this, t=1 => return other) */
SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const
{
return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t,
m_floats[1] + (v.m_floats[1] - m_floats[1]) * t,
m_floats[2] + (v.m_floats[2] -m_floats[2]) * t);
}
/**@brief Elementwise multiply this vector by the other
* @param v The other vector */
SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v)
{
m_floats[0] *= v.m_floats[0]; m_floats[1] *= v.m_floats[1];m_floats[2] *= v.m_floats[2];
return *this;
}
/**@brief Return the x value */
SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; }
/**@brief Return the y value */
SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; }
/**@brief Return the z value */
SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; }
/**@brief Set the x value */
SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x;};
/**@brief Set the y value */
SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y;};
/**@brief Set the z value */
SIMD_FORCE_INLINE void setZ(btScalar z) {m_floats[2] = z;};
/**@brief Set the w value */
SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w;};
/**@brief Return the x value */
SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; }
/**@brief Return the y value */
SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; }
/**@brief Return the z value */
SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; }
/**@brief Return the w value */
SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; }
//SIMD_FORCE_INLINE btScalar& operator[](int i) { return (&m_floats[0])[i]; }
//SIMD_FORCE_INLINE const btScalar& operator[](int i) const { return (&m_floats[0])[i]; }
///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons.
SIMD_FORCE_INLINE operator btScalar *() { return &m_floats[0]; }
SIMD_FORCE_INLINE operator const btScalar *() const { return &m_floats[0]; }
SIMD_FORCE_INLINE bool operator==(const btVector3& other) const
{
return ((m_floats[3]==other.m_floats[3]) && (m_floats[2]==other.m_floats[2]) && (m_floats[1]==other.m_floats[1]) && (m_floats[0]==other.m_floats[0]));
}
SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const
{
return !(*this == other);
}
/**@brief Set each element to the max of the current values and the values of another btVector3
* @param other The other btVector3 to compare with
*/
SIMD_FORCE_INLINE void setMax(const btVector3& other)
{
btSetMax(m_floats[0], other.m_floats[0]);
btSetMax(m_floats[1], other.m_floats[1]);
btSetMax(m_floats[2], other.m_floats[2]);
btSetMax(m_floats[3], other.w());
}
/**@brief Set each element to the min of the current values and the values of another btVector3
* @param other The other btVector3 to compare with
*/
SIMD_FORCE_INLINE void setMin(const btVector3& other)
{
btSetMin(m_floats[0], other.m_floats[0]);
btSetMin(m_floats[1], other.m_floats[1]);
btSetMin(m_floats[2], other.m_floats[2]);
btSetMin(m_floats[3], other.w());
}
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z)
{
m_floats[0]=x;
m_floats[1]=y;
m_floats[2]=z;
m_floats[3] = btScalar(0.);
}
void getSkewSymmetricMatrix(btVector3* v0,btVector3* v1,btVector3* v2) const
{
v0->setValue(0. ,-z() ,y());
v1->setValue(z() ,0. ,-x());
v2->setValue(-y() ,x() ,0.);
}
void setZero()
{
setValue(btScalar(0.),btScalar(0.),btScalar(0.));
}
SIMD_FORCE_INLINE bool isZero() const
{
return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0);
}
SIMD_FORCE_INLINE bool fuzzyZero() const
{
return length2() < SIMD_EPSILON;
}
SIMD_FORCE_INLINE void serialize(struct btVector3Data& dataOut) const;
SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn);
SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData& dataOut) const;
SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn);
SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData& dataOut) const;
SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn);
};
/**@brief Return the sum of two vectors (Point symantics)*/
SIMD_FORCE_INLINE btVector3
operator+(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]);
}
/**@brief Return the elementwise product of two vectors */
SIMD_FORCE_INLINE btVector3
operator*(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]);
}
/**@brief Return the difference between two vectors */
SIMD_FORCE_INLINE btVector3
operator-(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]);
}
/**@brief Return the negative of the vector */
SIMD_FORCE_INLINE btVector3
operator-(const btVector3& v)
{
return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
}
/**@brief Return the vector scaled by s */
SIMD_FORCE_INLINE btVector3
operator*(const btVector3& v, const btScalar& s)
{
return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
}
/**@brief Return the vector scaled by s */
SIMD_FORCE_INLINE btVector3
operator*(const btScalar& s, const btVector3& v)
{
return v * s;
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v, const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return v * (btScalar(1.0) / s);
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] / v2.m_floats[0],v1.m_floats[1] / v2.m_floats[1],v1.m_floats[2] / v2.m_floats[2]);
}
/**@brief Return the dot product between two vectors */
SIMD_FORCE_INLINE btScalar
btDot(const btVector3& v1, const btVector3& v2)
{
return v1.dot(v2);
}
/**@brief Return the distance squared between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance2(const btVector3& v1, const btVector3& v2)
{
return v1.distance2(v2);
}
/**@brief Return the distance between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance(const btVector3& v1, const btVector3& v2)
{
return v1.distance(v2);
}
/**@brief Return the angle between two vectors */
SIMD_FORCE_INLINE btScalar
btAngle(const btVector3& v1, const btVector3& v2)
{
return v1.angle(v2);
}
/**@brief Return the cross product of two vectors */
SIMD_FORCE_INLINE btVector3
btCross(const btVector3& v1, const btVector3& v2)
{
return v1.cross(v2);
}
SIMD_FORCE_INLINE btScalar
btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
{
return v1.triple(v2, v3);
}
/**@brief Return the linear interpolation between two vectors
* @param v1 One vector
* @param v2 The other vector
* @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */
SIMD_FORCE_INLINE btVector3
lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
{
return v1.lerp(v2, t);
}
SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
{
return (v - *this).length2();
}
SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
{
return (v - *this).length();
}
SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
{
return *this / length();
}
SIMD_FORCE_INLINE btVector3 btVector3::rotate( const btVector3& wAxis, const btScalar angle ) const
{
// wAxis must be a unit lenght vector
btVector3 o = wAxis * wAxis.dot( *this );
btVector3 x = *this - o;
btVector3 y;
y = wAxis.cross( *this );
return ( o + x * btCos( angle ) + y * btSin( angle ) );
}
class btVector4 : public btVector3
{
public:
SIMD_FORCE_INLINE btVector4() {}
SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z,const btScalar& w)
: btVector3(x,y,z)
{
m_floats[3] = w;
}
SIMD_FORCE_INLINE btVector4 absolute4() const
{
return btVector4(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]),
btFabs(m_floats[3]));
}
btScalar getW() const { return m_floats[3];}
SIMD_FORCE_INLINE int maxAxis4() const
{
int maxIndex = -1;
btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
if (m_floats[0] > maxVal)
{
maxIndex = 0;
maxVal = m_floats[0];
}
if (m_floats[1] > maxVal)
{
maxIndex = 1;
maxVal = m_floats[1];
}
if (m_floats[2] > maxVal)
{
maxIndex = 2;
maxVal =m_floats[2];
}
if (m_floats[3] > maxVal)
{
maxIndex = 3;
maxVal = m_floats[3];
}
return maxIndex;
}
SIMD_FORCE_INLINE int minAxis4() const
{
int minIndex = -1;
btScalar minVal = btScalar(BT_LARGE_FLOAT);
if (m_floats[0] < minVal)
{
minIndex = 0;
minVal = m_floats[0];
}
if (m_floats[1] < minVal)
{
minIndex = 1;
minVal = m_floats[1];
}
if (m_floats[2] < minVal)
{
minIndex = 2;
minVal =m_floats[2];
}
if (m_floats[3] < minVal)
{
minIndex = 3;
minVal = m_floats[3];
}
return minIndex;
}
SIMD_FORCE_INLINE int closestAxis4() const
{
return absolute4().maxAxis4();
}
/**@brief Set x,y,z and zero w
* @param x Value of x
* @param y Value of y
* @param z Value of z
*/
/* void getValue(btScalar *m) const
{
m[0] = m_floats[0];
m[1] = m_floats[1];
m[2] =m_floats[2];
}
*/
/**@brief Set the values
* @param x Value of x
* @param y Value of y
* @param z Value of z
* @param w Value of w
*/
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z,const btScalar& w)
{
m_floats[0]=x;
m_floats[1]=y;
m_floats[2]=z;
m_floats[3]=w;
}
};
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
{
#ifdef BT_USE_DOUBLE_PRECISION
unsigned char* dest = (unsigned char*) &destVal;
unsigned char* src = (unsigned char*) &sourceVal;
dest[0] = src[7];
dest[1] = src[6];
dest[2] = src[5];
dest[3] = src[4];
dest[4] = src[3];
dest[5] = src[2];
dest[6] = src[1];
dest[7] = src[0];
#else
unsigned char* dest = (unsigned char*) &destVal;
unsigned char* src = (unsigned char*) &sourceVal;
dest[0] = src[3];
dest[1] = src[2];
dest[2] = src[1];
dest[3] = src[0];
#endif //BT_USE_DOUBLE_PRECISION
}
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
{
for (int i=0;i<4;i++)
{
btSwapScalarEndian(sourceVec[i],destVec[i]);
}
}
///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
{
btVector3 swappedVec;
for (int i=0;i<4;i++)
{
btSwapScalarEndian(vector[i],swappedVec[i]);
}
vector = swappedVec;
}
template <class T>
SIMD_FORCE_INLINE void btPlaneSpace1 (const T& n, T& p, T& q)
{
if (btFabs(n[2]) > SIMDSQRT12) {
// choose p in y-z plane
btScalar a = n[1]*n[1] + n[2]*n[2];
btScalar k = btRecipSqrt (a);
p[0] = 0;
p[1] = -n[2]*k;
p[2] = n[1]*k;
// set q = n x p
q[0] = a*k;
q[1] = -n[0]*p[2];
q[2] = n[0]*p[1];
}
else {
// choose p in x-y plane
btScalar a = n[0]*n[0] + n[1]*n[1];
btScalar k = btRecipSqrt (a);
p[0] = -n[1]*k;
p[1] = n[0]*k;
p[2] = 0;
// set q = n x p
q[0] = -n[2]*p[1];
q[1] = n[2]*p[0];
q[2] = a*k;
}
}
struct btVector3FloatData
{
float m_floats[4];
};
struct btVector3DoubleData
{
double m_floats[4];
};
SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int i=0;i<4;i++)
dataOut.m_floats[i] = float(m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
{
for (int i=0;i<4;i++)
m_floats[i] = btScalar(dataIn.m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int i=0;i<4;i++)
dataOut.m_floats[i] = double(m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
{
for (int i=0;i<4;i++)
m_floats[i] = btScalar(dataIn.m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int i=0;i<4;i++)
dataOut.m_floats[i] = m_floats[i];
}
SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn)
{
for (int i=0;i<4;i++)
m_floats[i] = dataIn.m_floats[i];
}
#endif //BT_VECTOR3_H
| 9,404 |
418 | <filename>fhirclient/models/examplescenario_tests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import examplescenario
from .fhirdate import FHIRDate
class ExampleScenarioTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ExampleScenario", js["resourceType"])
return examplescenario.ExampleScenario(js)
def testExampleScenario1(self):
inst = self.instantiate_from("examplescenario-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ExampleScenario instance")
self.implExampleScenario1(inst)
js = inst.as_json()
self.assertEqual("ExampleScenario", js["resourceType"])
inst2 = examplescenario.ExampleScenario(js)
self.implExampleScenario1(inst2)
def implExampleScenario1(self, inst):
self.assertEqual(inst.actor[0].actorId, "Nurse")
self.assertEqual(inst.actor[0].description, "The Nurse")
self.assertEqual(inst.actor[0].name, "Nurse")
self.assertEqual(inst.actor[0].type, "person")
self.assertEqual(inst.actor[1].actorId, "MAP")
self.assertEqual(inst.actor[1].description, "The entity that receives the Administration Requests to show the nurse to perform them")
self.assertEqual(inst.actor[1].name, "<NAME>")
self.assertEqual(inst.actor[1].type, "entity")
self.assertEqual(inst.actor[2].actorId, "OP")
self.assertEqual(inst.actor[2].description, "The Medication Administration Order Placer")
self.assertEqual(inst.actor[2].name, "<NAME>")
self.assertEqual(inst.actor[2].type, "entity")
self.assertEqual(inst.actor[3].actorId, "MAC")
self.assertEqual(inst.actor[3].description, "The entity that receives the Medication Administration reports")
self.assertEqual(inst.actor[3].name, "MAR / EHR")
self.assertEqual(inst.actor[3].type, "entity")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.instance[0].description, "The initial prescription which describes \"medication X, 3 times per day\" - the exact scheduling is not in the initial prescription (it is left for the care teams to decide on the schedule).")
self.assertEqual(inst.instance[0].name, "Initial Prescription")
self.assertEqual(inst.instance[0].resourceId, "iherx001")
self.assertEqual(inst.instance[1].description, "The administration request for day 1, morning")
self.assertEqual(inst.instance[1].name, "Request for day 1, morning")
self.assertEqual(inst.instance[1].resourceId, "iherx001.001")
self.assertEqual(inst.instance[2].description, "The administration request for day 1, lunch")
self.assertEqual(inst.instance[2].name, "Request for day 1, lunch")
self.assertEqual(inst.instance[2].resourceId, "iherx001.002")
self.assertEqual(inst.instance[3].description, "The administration request for day 1, evening")
self.assertEqual(inst.instance[3].name, "Request for day 1, evening")
self.assertEqual(inst.instance[3].resourceId, "iherx001.003")
self.assertEqual(inst.instance[4].description, "The administration request for day 2, morning")
self.assertEqual(inst.instance[4].name, "Request for day 2, morning")
self.assertEqual(inst.instance[4].resourceId, "iherx001.004")
self.assertEqual(inst.instance[5].description, "The administration request for day 2, lunch")
self.assertEqual(inst.instance[5].name, "Request for day 2, lunch")
self.assertEqual(inst.instance[5].resourceId, "iherx001.005")
self.assertEqual(inst.instance[6].description, "The administration request for day 2, evening")
self.assertEqual(inst.instance[6].name, "Request for day 2, evening")
self.assertEqual(inst.instance[6].resourceId, "iherx001.006")
self.assertEqual(inst.instance[7].description, "Administration report for day 1, morning: Taken")
self.assertEqual(inst.instance[7].name, "Morning meds - taken")
self.assertEqual(inst.instance[7].resourceId, "iheadm001a")
self.assertEqual(inst.instance[8].description, "Administration report for day 1, morning: NOT Taken")
self.assertEqual(inst.instance[8].name, "Morning meds - not taken")
self.assertEqual(inst.instance[8].resourceId, "iheadm001b")
self.assertEqual(inst.instance[9].containedInstance[0].resourceId, "iherx001.001")
self.assertEqual(inst.instance[9].containedInstance[1].resourceId, "iherx001.002")
self.assertEqual(inst.instance[9].containedInstance[2].resourceId, "iherx001.003")
self.assertEqual(inst.instance[9].containedInstance[3].resourceId, "iherx001.004")
self.assertEqual(inst.instance[9].containedInstance[4].resourceId, "iherx001.005")
self.assertEqual(inst.instance[9].containedInstance[5].resourceId, "iherx001.006")
self.assertEqual(inst.instance[9].description, "All the medication Requests for Day 1")
self.assertEqual(inst.instance[9].name, "Bundle of Medication Requests")
self.assertEqual(inst.instance[9].resourceId, "iherx001bundle")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.process[0].postConditions, "Medication administration Reports are submitted, EHR is updated.")
self.assertEqual(inst.process[0].preConditions, "Medication administration requests are in the EHR / MAR, scheduled for each individual intake.")
self.assertEqual(inst.process[0].step[0].operation.initiator, "Nurse")
self.assertEqual(inst.process[0].step[0].operation.name, "1. Get today's schedule")
self.assertEqual(inst.process[0].step[0].operation.number, "1")
self.assertEqual(inst.process[0].step[0].operation.receiver, "MAP")
self.assertEqual(inst.process[0].step[1].process[0].description, "Query for medication administration orders,\\n- For today's shifts\\n- For today's patients")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.initiator, "MAP")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.name, "2.Query for medication administration orders,\\n- For today's shifts\\n- For today's patients")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.number, "2")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.receiver, "OP")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.request.resourceId, "iherxqry")
self.assertEqual(inst.process[0].step[1].process[0].step[0].operation.response.resourceId, "iherx001bundle")
self.assertEqual(inst.process[0].step[1].process[0].title, "P1. Query Administration Requests")
self.assertTrue(inst.process[0].step[2].pause)
self.assertEqual(inst.process[0].step[3].operation.initiator, "MAP")
self.assertEqual(inst.process[0].step[3].operation.name, "Notify (alert)")
self.assertEqual(inst.process[0].step[3].operation.number, "4")
self.assertEqual(inst.process[0].step[3].operation.receiver, "Nurse")
self.assertEqual(inst.process[0].step[4].operation.initiator, "Nurse")
self.assertEqual(inst.process[0].step[4].operation.name, "Read orders")
self.assertEqual(inst.process[0].step[4].operation.number, "5")
self.assertEqual(inst.process[0].step[4].operation.receiver, "MAP")
self.assertTrue(inst.process[0].step[5].pause)
self.assertEqual(inst.process[0].step[6].operation.initiator, "Nurse")
self.assertEqual(inst.process[0].step[6].operation.name, "Ask if patient took meds")
self.assertEqual(inst.process[0].step[6].operation.number, "5")
self.assertEqual(inst.process[0].step[6].operation.receiver, "Nurse")
self.assertEqual(inst.process[0].step[7].alternative[0].description, "Invoke if patient took medications")
self.assertEqual(inst.process[0].step[7].alternative[0].step[0].process[0].step[0].operation.initiator, "Nurse")
self.assertTrue(inst.process[0].step[7].alternative[0].step[0].process[0].step[0].operation.initiatorActive)
self.assertEqual(inst.process[0].step[7].alternative[0].step[0].process[0].step[0].operation.name, "Register Meds taken")
self.assertEqual(inst.process[0].step[7].alternative[0].step[0].process[0].step[0].operation.number, "1a")
self.assertEqual(inst.process[0].step[7].alternative[0].step[0].process[0].step[0].operation.receiver, "MAP")
self.assertEqual(inst.process[0].step[7].alternative[0].step[0].process[0].title, "Register Meds taken")
self.assertEqual(inst.process[0].step[7].alternative[0].title, "Patient took drugs")
self.assertEqual(inst.process[0].step[7].alternative[1].description, "No, patient did not take drugs")
self.assertEqual(inst.process[0].step[7].alternative[1].step[0].process[0].step[0].operation.initiator, "Nurse")
self.assertTrue(inst.process[0].step[7].alternative[1].step[0].process[0].step[0].operation.initiatorActive)
self.assertEqual(inst.process[0].step[7].alternative[1].step[0].process[0].step[0].operation.name, "Register Meds NOT taken")
self.assertEqual(inst.process[0].step[7].alternative[1].step[0].process[0].step[0].operation.number, "1b")
self.assertEqual(inst.process[0].step[7].alternative[1].step[0].process[0].step[0].operation.receiver, "MAP")
self.assertEqual(inst.process[0].step[7].alternative[1].step[0].process[0].title, "Register Meds NOT taken")
self.assertEqual(inst.process[0].step[7].alternative[1].title, "No drugs")
self.assertEqual(inst.process[0].step[7].alternative[2].description, "Unknown whether patient took medications or not")
self.assertTrue(inst.process[0].step[7].alternative[2].step[0].pause)
self.assertEqual(inst.process[0].step[7].alternative[2].title, "Not clear")
self.assertTrue(inst.process[0].step[8].pause)
self.assertEqual(inst.process[0].step[9].operation.initiator, "Nurse")
self.assertEqual(inst.process[0].step[9].operation.name, "Administer drug")
self.assertEqual(inst.process[0].step[9].operation.number, "6")
self.assertEqual(inst.process[0].step[9].operation.receiver, "Nurse")
self.assertEqual(inst.process[0].title, "Mobile Medication Administration")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
| 4,319 |
3,269 | # Time: O(m * n)
# Space: O(m * n)
class Solution(object):
def highestPeak(self, isWater):
"""
:type isWater: List[List[int]]
:rtype: List[List[int]]
"""
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
q = []
for r, row in enumerate(isWater):
for c, cell in enumerate(row):
row[c] -= 1
if not cell:
continue
q.append((r, c))
while q:
new_q = []
for r, c in q:
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(isWater) and
0 <= nc < len(isWater[0]) and
isWater[nr][nc] == -1):
continue
isWater[nr][nc] = isWater[r][c]+1
q.append((nr, nc))
q = new_q
return isWater
# Time: O(m * n)
# Space: O(m * n)
class Solution2(object):
def highestPeak(self, isWater):
"""
:type isWater: List[List[int]]
:rtype: List[List[int]]
"""
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
q, heights = [], [[-1]*len(isWater[0]) for _ in xrange(len(isWater))]
for r, row in enumerate(isWater):
for c, cell in enumerate(row):
if not cell:
continue
heights[r][c] = 0
q.append((r, c))
while q:
new_q = []
for r, c in q:
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(isWater) and
0 <= nc < len(isWater[0]) and
heights[nr][nc] == -1):
continue
heights[nr][nc] = heights[r][c]+1
q.append((nr, nc))
q = new_q
return heights
| 1,232 |
1,474 | <reponame>billzhonggz/Transfer-Learning-Library<filename>common/utils/analysis/tsne.py
"""
@author: <NAME>
@contact: <EMAIL>
"""
import torch
import matplotlib
matplotlib.use('Agg')
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
def visualize(source_feature: torch.Tensor, target_feature: torch.Tensor,
filename: str, source_color='r', target_color='b'):
"""
Visualize features from different domains using t-SNE.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
filename (str): the file name to save t-SNE
source_color (str): the color of the source features. Default: 'r'
target_color (str): the color of the target features. Default: 'b'
"""
source_feature = source_feature.numpy()
target_feature = target_feature.numpy()
features = np.concatenate([source_feature, target_feature], axis=0)
# map features to 2-d using TSNE
X_tsne = TSNE(n_components=2, random_state=33).fit_transform(features)
# domain labels, 1 represents source while 0 represents target
domains = np.concatenate((np.ones(len(source_feature)), np.zeros(len(target_feature))))
# visualize using matplotlib
fig, ax = plt.subplots(figsize=(10, 10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=domains, cmap=col.ListedColormap([target_color, source_color]), s=20)
plt.xticks([])
plt.yticks([])
plt.savefig(filename)
| 671 |
4,054 | /*
* Copyright LWJGL. All rights reserved.
* License terms: https://www.lwjgl.org/license
* MACHINE GENERATED FILE, DO NOT EDIT
*/
package org.lwjgl.opengl;
/**
* Native bindings to the <a target="_blank" href="https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_422_pixels.txt">EXT_422_pixels</a> extension.
*
* <p>This extension provides support for converting 422 pixels in host memory to 444 pixels as part of the pixel storage operation.</p>
*
* <p>The pixel unpack storage operation treats a 422 pixel as a 2 element format where the first element is C (chrominance) and the second element is L
* (luminance). Luminance is present on all pixels; a full chrominance value requires two pixels.</p>
*
* <p>The pixel pack storage operation converts RGB to a 422 pixel defined as a 2 element format where the first element stored is C (chrominance) and the
* second element stored is L (luminance). Luminance is present on all pixels; a full chrominance value requires two pixels.</p>
*
* <p>Both averaging and non-averaging is supported for green and blue assignments for pack and unpack operations.</p>
*/
public final class EXT422Pixels {
/**
* Accepted by the {@code format} parameter of DrawPixels, ReadPixels, TexImage1D, TexImage2D, GetTexImage, TexImage3D, TexSubImage1D, TexSubImage2D,
* TexSubImage3D, GetHistogram, GetMinmax, ConvolutionFilter1D, ConvolutionFilter2D, ConvolutionFilter3D, GetConvolutionFilter, SeparableFilter2D,
* SeparableFilter3D, GetSeparableFilter, ColorTable, and GetColorTable.
*/
public static final int
GL_422_EXT = 0x80CC,
GL_422_REV_EXT = 0x80CD,
GL_422_AVERAGE_EXT = 0x80CE,
GL_422_REV_AVERAGE_EXT = 0x80CF;
private EXT422Pixels() {}
} | 605 |
468 | # CheckMentioned.py
# Find all the symbols in scintilla/include/Scintilla.h and check if they
# are mentioned in scintilla/doc/ScintillaDoc.html.
import string
srcRoot = "../.."
incFileName = srcRoot + "/scintilla/include/Scintilla.h"
docFileName = srcRoot + "/scintilla/doc/ScintillaDoc.html"
identCharacters = "_" + string.letters + string.digits
# Convert all punctuation characters except '_' into spaces.
def depunctuate(s):
d = ""
for ch in s:
if ch in identCharacters:
d = d + ch
else:
d = d + " "
return d
symbols = {}
incFile = open(incFileName, "rt")
for line in incFile.readlines():
if line.startswith("#define"):
identifier = line.split()[1]
symbols[identifier] = 0
incFile.close()
docFile = open(docFileName, "rt")
for line in docFile.readlines():
for word in depunctuate(line).split():
if word in symbols.keys():
symbols[word] = 1
docFile.close()
identifiersSorted = symbols.keys()
identifiersSorted.sort()
for identifier in identifiersSorted:
if not symbols[identifier]:
print identifier
| 370 |
903 | /*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.build.transforms;
import java.util.Collections;
import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import software.amazon.smithy.build.TransformContext;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.shapes.Shape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.model.traits.TraitDefinition;
import software.amazon.smithy.model.transform.ModelTransformer;
import software.amazon.smithy.utils.Pair;
/**
* {@code excludeTraits} removes trait definitions and traits from
* shapes when a trait name matches any of the values given in
* {@code traits}.
*
* <p>Arguments that end with "#" exclude the traits of an entire
* namespace. Trait shape IDs that are relative are assumed to be
* part of the {@code smithy.api} prelude namespace.
*/
public final class ExcludeTraits extends BackwardCompatHelper<ExcludeTraits.Config> {
private static final Logger LOGGER = Logger.getLogger(ExcludeTraits.class.getName());
/**
* {@code excludeTraits} configuration settings.
*/
public static final class Config {
private Set<String> traits = Collections.emptySet();
/**
* Gets the list of trait shape IDs/namespaces to exclude.
*
* @return shape IDs to exclude.
*/
public Set<String> getTraits() {
return traits;
}
/**
* Sets the list of trait shape IDs/namespaces to exclude.
*
* <p>Relative shape IDs are considered traits in the prelude
* namespace, {@code smithy.api}. Strings ending in "#" are
* used to exclude traits from an entire namespace.
*
* @param traits Traits to exclude.
*/
public void setTraits(Set<String> traits) {
this.traits = traits;
}
}
@Override
public Class<Config> getConfigType() {
return Config.class;
}
@Override
public String getName() {
return "excludeTraits";
}
@Override
String getBackwardCompatibleNameMapping() {
return "traits";
}
@Override
public Model transformWithConfig(TransformContext context, Config config) {
Pair<Set<ShapeId>, Set<String>> namesAndNamespaces = TraitRemovalUtils.parseTraits(config.getTraits());
Set<ShapeId> names = namesAndNamespaces.getLeft();
Set<String> namespaces = namesAndNamespaces.getRight();
LOGGER.info(() -> "Excluding traits by ID " + names + " and namespaces " + namespaces);
Model model = context.getModel();
ModelTransformer transformer = context.getTransformer();
Set<Shape> removeTraits = model.getShapesWithTrait(TraitDefinition.class).stream()
.filter(trait -> TraitRemovalUtils.matchesTraitDefinition(trait, names, namespaces))
.collect(Collectors.toSet());
if (!removeTraits.isEmpty()) {
LOGGER.info(() -> "Excluding traits: " + removeTraits);
}
return transformer.removeShapes(model, removeTraits);
}
}
| 1,311 |
348 | {"nom":"Nelling","circ":"5ème circonscription","dpt":"Moselle","inscrits":224,"abs":127,"votants":97,"blancs":4,"nuls":0,"exp":93,"res":[{"nuance":"LR","nom":"<NAME>","voix":54},{"nuance":"REM","nom":"<NAME>","voix":39}]} | 89 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.mediaservices;
import com.azure.core.util.Context;
import com.azure.resourcemanager.mediaservices.models.Job;
import com.azure.resourcemanager.mediaservices.models.JobInput;
import com.azure.resourcemanager.mediaservices.models.JobOutput;
import com.azure.resourcemanager.mediaservices.models.Priority;
import java.util.Arrays;
/** Samples for Jobs Update. */
public final class JobsUpdateSamples {
/**
* Sample code: Update a Job.
*
* @param mediaServicesManager Entry point to MediaServicesManager. This Swagger was generated by the API Framework.
*/
public static void updateAJob(com.azure.resourcemanager.mediaservices.MediaServicesManager mediaServicesManager) {
Job resource =
mediaServicesManager
.jobs()
.getWithResponse("contosoresources", "contosomedia", "exampleTransform", "job1", Context.NONE)
.getValue();
resource
.update()
.withDescription("Example job to illustrate update.")
.withInput(new JobInput())
.withOutputs(Arrays.asList(new JobOutput()))
.withPriority(Priority.HIGH)
.apply();
}
}
| 511 |
479 | <reponame>jeremyvdw/aurora
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.aurora.benchmark;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.util.Modules;
import org.apache.aurora.common.stats.StatsProvider;
import org.apache.aurora.common.util.Clock;
import org.apache.aurora.common.util.testing.FakeClock;
import org.apache.aurora.scheduler.base.Query;
import org.apache.aurora.scheduler.storage.Storage;
import org.apache.aurora.scheduler.storage.TaskStore;
import org.apache.aurora.scheduler.storage.entities.IJobKey;
import org.apache.aurora.scheduler.storage.entities.IScheduledTask;
import org.apache.aurora.scheduler.storage.mem.MemStorageModule;
import org.apache.aurora.scheduler.testing.FakeStatsProvider;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
public class TaskStoreBenchmarks {
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 1, time = 10, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(1)
@State(Scope.Thread)
public abstract static class AbstractFetchTasksBenchmark {
protected Storage storage;
protected IJobKey job;
public abstract void setUp();
@Param({"10000", "50000", "100000"})
protected int numTasks;
protected void createTasks(int size) {
storage.write((Storage.MutateWork.NoResult.Quiet) storeProvider -> {
TaskStore.Mutable taskStore = storeProvider.getUnsafeTaskStore();
Set<IScheduledTask> tasks = new Tasks.Builder().build(size);
job = tasks.stream().findFirst().get().getAssignedTask().getTask().getJob();
taskStore.saveTasks(tasks);
});
}
protected void deleteTasks() {
storage.write((Storage.MutateWork.NoResult.Quiet) storeProvider -> {
TaskStore.Mutable taskStore = storeProvider.getUnsafeTaskStore();
taskStore.deleteAllTasks();
});
}
}
public static class FetchAll extends AbstractFetchTasksBenchmark {
@Setup(Level.Trial)
@Override
public void setUp() {
storage = Guice.createInjector(
Modules.combine(
new MemStorageModule(),
new AbstractModule() {
@Override
protected void configure() {
bind(StatsProvider.class).toInstance(new FakeStatsProvider());
bind(Clock.class).toInstance(new FakeClock());
}
}))
.getInstance(Storage.class);
}
@Setup(Level.Iteration)
public void setUpIteration() {
createTasks(numTasks);
}
@TearDown(Level.Iteration)
public void tearDownIteration() {
deleteTasks();
}
@Benchmark
public int run() {
return storage.read(store -> store.getTaskStore().fetchTasks(Query.unscoped())).size();
}
}
public static class IndexedFetchAndFilter extends AbstractFetchTasksBenchmark {
@Setup(Level.Trial)
@Override
public void setUp() {
storage = Guice.createInjector(
Modules.combine(
new MemStorageModule(),
new AbstractModule() {
@Override
protected void configure() {
bind(StatsProvider.class).toInstance(new FakeStatsProvider());
bind(Clock.class).toInstance(new FakeClock());
}
}))
.getInstance(Storage.class);
}
@Setup(Level.Iteration)
public void setUpIteration() {
createTasks(numTasks);
}
@TearDown(Level.Iteration)
public void tearDownIteration() {
deleteTasks();
}
@Benchmark
public int run() {
return storage.read(
store -> store.getTaskStore().fetchTasks(Query.instanceScoped(job, 0))).size();
}
}
}
| 1,917 |
1,205 | <filename>nn.py<gh_stars>1000+
import theano
import theano.tensor as T
from utils import shared
class HiddenLayer(object):
"""
Hidden layer with or without bias.
Input: tensor of dimension (dims*, input_dim)
Output: tensor of dimension (dims*, output_dim)
"""
def __init__(self, input_dim, output_dim, bias=True, activation='sigmoid',
name='hidden_layer'):
self.input_dim = input_dim
self.output_dim = output_dim
self.bias = bias
self.name = name
if activation is None:
self.activation = None
elif activation == 'tanh':
self.activation = T.tanh
elif activation == 'sigmoid':
self.activation = T.nnet.sigmoid
elif activation == 'softmax':
self.activation = T.nnet.softmax
else:
raise Exception("Unknown activation function: " % activation)
# Initialize weights and bias
self.weights = shared((input_dim, output_dim), name + '__weights')
self.bias = shared((output_dim,), name + '__bias')
# Define parameters
if self.bias:
self.params = [self.weights, self.bias]
else:
self.params = [self.weights]
def link(self, input):
"""
The input has to be a tensor with the right
most dimension equal to input_dim.
"""
self.input = input
self.linear_output = T.dot(self.input, self.weights)
if self.bias:
self.linear_output = self.linear_output + self.bias
if self.activation is None:
self.output = self.linear_output
else:
self.output = self.activation(self.linear_output)
return self.output
class EmbeddingLayer(object):
"""
Embedding layer: word embeddings representations
Input: tensor of dimension (dim*) with values in range(0, input_dim)
Output: tensor of dimension (dim*, output_dim)
"""
def __init__(self, input_dim, output_dim, name='embedding_layer'):
"""
Typically, input_dim is the vocabulary size,
and output_dim the embedding dimension.
"""
self.input_dim = input_dim
self.output_dim = output_dim
self.name = name
# Randomly generate weights
self.embeddings = shared((input_dim, output_dim),
self.name + '__embeddings')
# Define parameters
self.params = [self.embeddings]
def link(self, input):
"""
Return the embeddings of the given indexes.
Input: tensor of shape (dim*)
Output: tensor of shape (dim*, output_dim)
"""
self.input = input
self.output = self.embeddings[self.input]
return self.output
class DropoutLayer(object):
"""
Dropout layer. Randomly set to 0 values of the input
with probability p.
"""
def __init__(self, p=0.5, name='dropout_layer'):
"""
p has to be between 0 and 1 (1 excluded).
p is the probability of dropping out a unit, so
setting p to 0 is equivalent to have an identity layer.
"""
assert 0. <= p < 1.
self.p = p
self.rng = T.shared_randomstreams.RandomStreams(seed=123456)
self.name = name
def link(self, input):
"""
Dropout link: we just apply mask to the input.
"""
if self.p > 0:
mask = self.rng.binomial(n=1, p=1-self.p, size=input.shape,
dtype=theano.config.floatX)
self.output = input * mask
else:
self.output = input
return self.output
class LSTM(object):
"""
Long short-term memory (LSTM). Can be used with or without batches.
Without batches:
Input: matrix of dimension (sequence_length, input_dim)
Output: vector of dimension (output_dim)
With batches:
Input: tensor3 of dimension (batch_size, sequence_length, input_dim)
Output: matrix of dimension (batch_size, output_dim)
"""
def __init__(self, input_dim, hidden_dim, with_batch=True, name='LSTM'):
"""
Initialize neural network.
"""
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.with_batch = with_batch
self.name = name
# Input gate weights
self.w_xi = shared((input_dim, hidden_dim), name + '__w_xi')
self.w_hi = shared((hidden_dim, hidden_dim), name + '__w_hi')
self.w_ci = shared((hidden_dim, hidden_dim), name + '__w_ci')
# Forget gate weights
# self.w_xf = shared((input_dim, hidden_dim), name + '__w_xf')
# self.w_hf = shared((hidden_dim, hidden_dim), name + '__w_hf')
# self.w_cf = shared((hidden_dim, hidden_dim), name + '__w_cf')
# Output gate weights
self.w_xo = shared((input_dim, hidden_dim), name + '__w_xo')
self.w_ho = shared((hidden_dim, hidden_dim), name + '__w_ho')
self.w_co = shared((hidden_dim, hidden_dim), name + '__w_co')
# Cell weights
self.w_xc = shared((input_dim, hidden_dim), name + '__w_xc')
self.w_hc = shared((hidden_dim, hidden_dim), name + '__w_hc')
# Initialize the bias vectors, c_0 and h_0 to zero vectors
self.b_i = shared((hidden_dim,), name + '__b_i')
# self.b_f = shared((hidden_dim,), name + '__b_f')
self.b_c = shared((hidden_dim,), name + '__b_c')
self.b_o = shared((hidden_dim,), name + '__b_o')
self.c_0 = shared((hidden_dim,), name + '__c_0')
self.h_0 = shared((hidden_dim,), name + '__h_0')
# Define parameters
self.params = [self.w_xi, self.w_hi, self.w_ci,
# self.w_xf, self.w_hf, self.w_cf,
self.w_xo, self.w_ho, self.w_co,
self.w_xc, self.w_hc,
self.b_i, self.b_c, self.b_o, # self.b_f,
self.c_0, self.h_0]
def link(self, input):
"""
Propagate the input through the network and return the last hidden
vector. The whole sequence is also accessible via self.h, but
where self.h of shape (sequence_length, batch_size, output_dim)
"""
def recurrence(x_t, c_tm1, h_tm1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.w_xi) +
T.dot(h_tm1, self.w_hi) +
T.dot(c_tm1, self.w_ci) +
self.b_i)
# f_t = T.nnet.sigmoid(T.dot(x_t, self.w_xf) +
# T.dot(h_tm1, self.w_hf) +
# T.dot(c_tm1, self.w_cf) +
# self.b_f)
c_t = ((1 - i_t) * c_tm1 + i_t * T.tanh(T.dot(x_t, self.w_xc) +
T.dot(h_tm1, self.w_hc) + self.b_c))
o_t = T.nnet.sigmoid(T.dot(x_t, self.w_xo) +
T.dot(h_tm1, self.w_ho) +
T.dot(c_t, self.w_co) +
self.b_o)
h_t = o_t * T.tanh(c_t)
return [c_t, h_t]
# If we use batches, we have to permute the first and second dimension.
if self.with_batch:
self.input = input.dimshuffle(1, 0, 2)
outputs_info = [T.alloc(x, self.input.shape[1], self.hidden_dim)
for x in [self.c_0, self.h_0]]
else:
self.input = input
outputs_info = [self.c_0, self.h_0]
[_, h], _ = theano.scan(
fn=recurrence,
sequences=self.input,
outputs_info=outputs_info,
n_steps=self.input.shape[0]
)
self.h = h
self.output = h[-1]
return self.output
def log_sum_exp(x, axis=None):
"""
Sum probabilities in the log-space.
"""
xmax = x.max(axis=axis, keepdims=True)
xmax_ = x.max(axis=axis)
return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))
def forward(observations, transitions, viterbi=False,
return_alpha=False, return_best_sequence=False):
"""
Takes as input:
- observations, sequence of shape (n_steps, n_classes)
- transitions, sequence of shape (n_classes, n_classes)
Probabilities must be given in the log space.
Compute alpha, matrix of size (n_steps, n_classes), such that
alpha[i, j] represents one of these 2 values:
- the probability that the real path at node i ends in j
- the maximum probability of a path finishing in j at node i (Viterbi)
Returns one of these 2 values:
- alpha
- the final probability, which can be:
- the sum of the probabilities of all paths
- the probability of the best path (Viterbi)
"""
assert not return_best_sequence or (viterbi and not return_alpha)
def recurrence(obs, previous, transitions):
previous = previous.dimshuffle(0, 'x')
obs = obs.dimshuffle('x', 0)
if viterbi:
scores = previous + obs + transitions
out = scores.max(axis=0)
if return_best_sequence:
out2 = scores.argmax(axis=0)
return out, out2
else:
return out
else:
return log_sum_exp(previous + obs + transitions, axis=0)
initial = observations[0]
alpha, _ = theano.scan(
fn=recurrence,
outputs_info=(initial, None) if return_best_sequence else initial,
sequences=[observations[1:]],
non_sequences=transitions
)
if return_alpha:
return alpha
elif return_best_sequence:
sequence, _ = theano.scan(
fn=lambda beta_i, previous: beta_i[previous],
outputs_info=T.cast(T.argmax(alpha[0][-1]), 'int32'),
sequences=T.cast(alpha[1][::-1], 'int32')
)
sequence = T.concatenate([sequence[::-1], [T.argmax(alpha[0][-1])]])
return sequence
else:
if viterbi:
return alpha[-1].max(axis=0)
else:
return log_sum_exp(alpha[-1], axis=0)
| 4,976 |
1,172 | <filename>ChasingTrainFramework_GeneralOneClassDetection/logging_GOCD.py
# -*- coding:utf-8 -*-
import logging
import os
import sys
'''
logging module
'''
def init_logging(log_file_path=None, log_file_mode='w', log_overwrite_flag=False, log_level=logging.INFO):
# basically, the basic log offers console output
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s[%(levelname)s]: %(message)s')
console_handler.setFormatter(formatter)
logging.getLogger().setLevel(log_level)
logging.getLogger().addHandler(console_handler)
if not log_file_path or log_file_path == '':
print('No log file is specified. The log information is only displayed in console.')
return
# check that the log_file is already existed or not
if not os.path.exists(log_file_path):
location_dir = os.path.dirname(log_file_path)
if not os.path.exists(location_dir):
os.makedirs(location_dir)
file_handler = logging.FileHandler(filename=log_file_path, mode=log_file_mode)
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
else:
if log_overwrite_flag:
print('The file [%s] is existed. And it is to be handled according to the arg [file_mode](the default is \'w\').' % log_file_path)
file_handler = logging.FileHandler(filename=log_file_path, mode=log_file_mode)
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
else:
print('The file [%s] is existed. The [overwrite_flag] is False, please change the log file name.')
sys.exit(0)
def temp_test():
log_file = './test.log'
file_mode = 'w'
init_logging(log_file_path=log_file, log_file_mode=file_mode, log_overwrite_flag=True, log_level=logging.DEBUG)
if __name__ == '__main__':
temp_test()
logging.info('test info')
| 777 |
5,250 | <reponame>jiandiao/flowable-engine
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.spring.test.jpa;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.HashMap;
import java.util.Map;
import org.flowable.engine.repository.Deployment;
import org.flowable.engine.runtime.ProcessInstance;
import org.flowable.spring.impl.test.SpringFlowableTestCase;
import org.flowable.task.api.Task;
import org.junit.jupiter.api.Test;
import org.springframework.test.context.ContextConfiguration;
/**
* @author <NAME>
*/
@ContextConfiguration(locations = "JPASpringTest-context.xml")
public class JpaTest extends SpringFlowableTestCase {
@Test
public void testJpaVariableHappyPath() {
before();
Map<String, Object> variables = new HashMap<>();
variables.put("customerName", "<NAME>");
variables.put("amount", 15000L);
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("LoanRequestProcess", variables);
// Variable should be present containing the loanRequest created by the
// spring bean
Object value = runtimeService.getVariable(processInstance.getId(), "loanRequest");
assertThat(value).isInstanceOf(LoanRequest.class);
LoanRequest request = (LoanRequest) value;
assertThat(request.getCustomerName()).isEqualTo("<NAME>");
assertThat(request.getAmount().longValue()).isEqualTo(15000L);
assertThat(request.isApproved()).isFalse();
// We will approve the request, which will update the entity
variables = new HashMap<>();
variables.put("approvedByManager", Boolean.TRUE);
Task task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult();
assertThat(task).isNotNull();
taskService.complete(task.getId(), variables);
// If approved, the processInstance should be finished, gateway based
// on loanRequest.approved value
assertThat(runtimeService.createProcessInstanceQuery().processInstanceId(processInstance.getId()).count()).isZero();
// Cleanup
deleteDeployments();
}
@Test
public void testJpaVariableDisapprovalPath() {
before();
Map<String, Object> variables = new HashMap<>();
variables.put("customerName", "<NAME>");
variables.put("amount", 50000);
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("LoanRequestProcess", variables);
// Variable should be present containing the loanRequest created by the
// spring bean
Object value = runtimeService.getVariable(processInstance.getId(), "loanRequest");
assertThat(value).isInstanceOf(LoanRequest.class);
LoanRequest request = (LoanRequest) value;
assertThat(request.getCustomerName()).isEqualTo("<NAME>");
assertThat(request.getAmount().longValue()).isEqualTo(50000L);
assertThat(request.isApproved()).isFalse();
// We will disapprove the request, which will update the entity
variables = new HashMap<>();
variables.put("approvedByManager", Boolean.FALSE);
Task task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult();
assertThat(task).isNotNull();
taskService.complete(task.getId(), variables);
runtimeService.getVariable(processInstance.getId(), "loanRequest");
request = (LoanRequest) value;
assertThat(request.isApproved()).isFalse();
// If disapproved, an extra task will be available instead of the
// process ending
task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult();
assertThat(task).isNotNull();
assertThat(task.getName()).isEqualTo("Send rejection letter");
// Cleanup
deleteDeployments();
}
protected void before() {
String[] defs = { "org/flowable/spring/test/jpa/JPASpringTest.bpmn20.xml" };
for (String pd : defs)
repositoryService.createDeployment().addClasspathResource(pd).deploy();
}
@Override
protected void deleteDeployments() {
for (Deployment deployment : repositoryService.createDeploymentQuery().list()) {
repositoryService.deleteDeployment(deployment.getId(), true);
}
}
}
| 1,677 |
335 | <reponame>OotinnyoO1/N64Wasm
#ifndef GLN64_H
#define GLN64_H
#ifdef __cplusplus
extern "C" {
#endif
#include "m64p_config.h"
#include "stdio.h"
//#define DEBUG
#define PLUGIN_NAME "gles2n64"
#define PLUGIN_VERSION 0x000005
#define PLUGIN_API_VERSION 0x020200
#define renderCallback gln64RenderCallback
extern void (*renderCallback)();
#ifdef __cplusplus
}
#endif
#endif
| 168 |
492 | #!/usr/bin/env python
"""SNMP library for Python
SNMP v1/v2c/v3 engine and Standard Applications suite written in pure-Python.
Supports Manager/Agent/Proxy roles, Manager/Agent-side MIBs, asynchronous
operation and multiple network transports.
"""
import os
import re
import sys
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: Information Technology
Intended Audience :: System Administrators
Intended Audience :: Telecommunications Industry
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Topic :: Communications
Topic :: System :: Monitoring
Topic :: System :: Networking :: Monitoring
Topic :: Software Development :: Libraries :: Python Modules
"""
def howto_install_setuptools():
print("""\
Error: You need setuptools Python package!
It's very easy to install it, just type:
wget https://bootstrap.pypa.io/ez_setup.py
python ez_setup.py
Then you could make eggs from this package.
""")
py_version = sys.version_info[:2]
if py_version < (2, 6):
print("ERROR: this package requires Python 2.6 or later!")
sys.exit(1)
requires = [ln.strip() for ln in open('requirements.txt').readlines()]
resolved_requires = []
for requirement in requires:
match = re.match(
r'(.*?)\s*;\s*python_version\s*([<>=!~]+)\s*\'(.*?)\'', requirement)
if not match:
resolved_requires.append(requirement)
continue
package, condition, expected_py = match.groups()
expected_py = tuple([int(x) for x in expected_py.split('.')])
if py_version == expected_py and condition in ('<=', '==', '>='):
resolved_requires.append(package)
elif py_version < expected_py and condition in ('<=', '<'):
resolved_requires.append(package)
elif py_version > expected_py and condition in ('>=', '>'):
resolved_requires.append(package)
try:
import setuptools
setup, Command = setuptools.setup, setuptools.Command
observed_version = [int(x) for x in setuptools.__version__.split('.')[:3]]
required_version = [36, 2, 0]
# NOTE(etingof): require fresh setuptools to build proper wheels
# See also: https://hynek.me/articles/conditional-python-dependencies/
if ('bdist_wheel' in sys.argv and
observed_version < required_version):
print("ERROR: your wheels won't come out round with setuptools %s! "
"Upgrade to %s and try again." % (
'.'.join(str(x) for x in observed_version),
'.'.join(str(x) for x in required_version)))
sys.exit(1)
# NOTE(etingof): older setuptools fail at parsing python_version
if observed_version < required_version:
requires = resolved_requires
params = {
'install_requires': requires,
'zip_safe': True
}
except ImportError:
if 'bdist_wheel' in sys.argv or 'bdist_egg' in sys.argv:
howto_install_setuptools()
sys.exit(1)
from distutils.core import setup
params = {}
if py_version > (2, 4):
params['requires'] = [
re.sub(r'(.*?)([<>=!~]+)(.*)', r'\g<1>\g<2>(\g<3>)', r) for r in resolved_requires
]
doclines = [x.strip() for x in (__doc__ or '').split('\n')]
params.update({
'name': 'pysnmp',
'version': open(os.path.join('pysnmp', '__init__.py')).read().split('\'')[1],
'description': doclines[0],
'long_description': '\n'.join(doclines[1:]),
'maintainer': '<NAME> <<EMAIL>>',
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'https://github.com/etingof/pysnmp',
'classifiers': [x for x in classifiers.split('\n') if x],
'platforms': ['any'],
'license': 'BSD-2-Clause',
'packages': ['pysnmp',
'pysnmp.smi',
'pysnmp.smi.mibs',
'pysnmp.smi.mibs.instances',
'pysnmp.carrier',
'pysnmp.carrier.asyncore',
'pysnmp.carrier.asyncore.dgram',
'pysnmp.carrier.twisted',
'pysnmp.carrier.twisted.dgram',
'pysnmp.carrier.asyncio',
'pysnmp.carrier.asyncio.dgram',
'pysnmp.entity',
'pysnmp.entity.rfc3413',
'pysnmp.hlapi',
'pysnmp.hlapi.v1arch',
'pysnmp.hlapi.v1arch.asyncio',
'pysnmp.hlapi.v1arch.asyncore',
'pysnmp.hlapi.v1arch.asyncore.sync',
'pysnmp.hlapi.v3arch',
'pysnmp.hlapi.v3arch.asyncio',
'pysnmp.hlapi.v3arch.asyncore',
'pysnmp.hlapi.v3arch.asyncore.sync',
'pysnmp.hlapi.v3arch.twisted',
'pysnmp.proto',
'pysnmp.proto.mpmod',
'pysnmp.proto.secmod',
'pysnmp.proto.secmod.rfc3414',
'pysnmp.proto.secmod.rfc3414.auth',
'pysnmp.proto.secmod.rfc3414.priv',
'pysnmp.proto.secmod.rfc3826',
'pysnmp.proto.secmod.rfc3826.priv',
'pysnmp.proto.secmod.rfc7860',
'pysnmp.proto.secmod.rfc7860.auth',
'pysnmp.proto.secmod.eso',
'pysnmp.proto.secmod.eso.priv',
'pysnmp.proto.acmod',
'pysnmp.proto.proxy',
'pysnmp.proto.api']
})
setup(**params)
| 2,697 |
1,286 | //////////////////////////////////////////////////////////////////////////////
// UDTRefs bit maps
#include "pdbimpl.h"
#include "dbiimpl.h"
#include <stdio.h>
UDTRefs::UDTRefs(unsigned int cti_, bool fGrowRefSets_) :
cti(cti_), ucur(0), fGrowRefSets(fGrowRefSets_)
{
}
UDTRefs::~UDTRefs()
{
}
unsigned int UDTRefs::normalize(TI ti)
{
assert(!CV_IS_PRIMITIVE(ti));
unsigned int retval = ti - CV_FIRST_NONPRIM;
assert(retval <= cti);
return retval;
}
TI UDTRefs::denormalize(unsigned int u)
{
return u + CV_FIRST_NONPRIM;
}
BOOL UDTRefs::fNoteRef(TI ti)
{
if (fGrowRefSets && ti - CV_FIRST_NONPRIM > cti ) {
cti = ti - CV_FIRST_NONPRIM;
}
return isetRefs.add(normalize(ti));
}
BOOL UDTRefs::tiNext(TI *pti)
{
if (cti) {
unsigned int u = ucur;
do {
if (isetRefs.contains(u) && !isetProcessed.contains(u)) {
*pti = denormalize(u);
ucur = (u + 1) % cti;
return isetProcessed.add(u);
}
u = (u + 1) % cti;
} while (u != ucur);
}
*pti = tiNil;
return TRUE;
}
| 574 |
60,067 | #pragma once
#include <torch/arg.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/types.h>
namespace torch {
namespace nn {
/// Options for the `AdaptiveLogSoftmaxWithLoss` module.
///
/// Example:
/// ```
/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10, {4, 8}).div_value(2.).head_bias(true));
/// ```
struct TORCH_API AdaptiveLogSoftmaxWithLossOptions {
/* implicit */ AdaptiveLogSoftmaxWithLossOptions(int64_t in_features, int64_t n_classes, std::vector<int64_t> cutoffs);
/// Number of features in the input tensor
TORCH_ARG(int64_t, in_features);
/// Number of classes in the dataset
TORCH_ARG(int64_t, n_classes);
/// Cutoffs used to assign targets to their buckets
TORCH_ARG(std::vector<int64_t>, cutoffs);
/// value used as an exponent to compute sizes of the clusters. Default: 4.0
TORCH_ARG(double, div_value) = 4.;
/// If ``true``, adds a bias term to the 'head' of
/// the adaptive softmax. Default: false
TORCH_ARG(bool, head_bias) = false;
};
} // namespace nn
} // namespace torch
| 381 |
358 | <gh_stars>100-1000
#pragma once
#include <QCheckBox>
#include <QSpinBox>
#include <agz/editor/renderer/export/export_renderer.h>
#include <agz/editor/ui/utility/real_slider.h>
#include <agz/editor/ui/utility/vec_input.h>
AGZ_EDITOR_BEGIN
class ExportRendererPSSMLTPT : public ExportRendererWidget
{
public:
explicit ExportRendererPSSMLTPT(QWidget *parent = nullptr);
RC<tracer::ConfigGroup> to_config() const override;
void save_asset(AssetSaver &saver) const override;
void load_asset(AssetLoader &loader) override;
private:
QSpinBox *worker_count_ = nullptr;
QSlider *min_depth_ = nullptr;
QSlider *max_depth_ = nullptr;
QSlider *cont_prob_ = nullptr;
QCheckBox *use_mis_ = nullptr;
QSpinBox *startup_sample_count_ = nullptr;
QSpinBox *mut_per_pixel_ = nullptr;
RealInput *sigma_ = nullptr;
RealSlider *large_step_prob_ = nullptr;
QSpinBox *chain_count_ = nullptr;
};
AGZ_EDITOR_END
| 395 |
805 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2018 <NAME>
import argparse
import os
import numpy as np
import sys
import re
parser = argparse.ArgumentParser(description="""Removes dev/test set lines
from the LOB corpus. Reads the
corpus from stdin, and writes it to stdout.""")
parser.add_argument('dev_text', type=str,
help='dev transcription location.')
parser.add_argument('test_text', type=str,
help='test transcription location.')
args = parser.parse_args()
def remove_punctuations(transcript):
char_list = []
for char in transcript:
if char.isdigit() or char == '+' or char == '~' or char == '?':
continue
if char == '#' or char == '=' or char == '-' or char == '!':
continue
if char == ',' or char == '.' or char == ')' or char == '\'':
continue
if char == '(' or char == ':' or char == ';' or char == '"':
continue
char_list.append(char)
return char_list
def remove_special_words(words):
word_list = []
for word in words:
if word == '<SIC>' or word == '#':
continue
word_list.append(word)
return word_list
# process and add dev/eval transcript in a list
# remove special words, punctuations, spaces between words
# lowercase the characters
def read_utterances(text_file_path):
with open(text_file_path, 'rt') as in_file:
for line in in_file:
words = line.strip().split()
words_wo_sw = remove_special_words(words)
transcript = ''.join(words_wo_sw[1:])
transcript = transcript.lower()
trans_wo_punct = remove_punctuations(transcript)
transcript = ''.join(trans_wo_punct)
utterance_dict[words_wo_sw[0]] = transcript
### main ###
# read utterances and add it to utterance_dict
utterance_dict = dict()
read_utterances(args.dev_text)
read_utterances(args.test_text)
# read corpus and add it to below lists
corpus_text_lowercase_wo_sc = list()
corpus_text_wo_sc = list()
original_corpus_text = list()
for line in sys.stdin:
original_corpus_text.append(line)
words = line.strip().split()
words_wo_sw = remove_special_words(words)
transcript = ''.join(words_wo_sw)
transcript = transcript.lower()
trans_wo_punct = remove_punctuations(transcript)
transcript = ''.join(trans_wo_punct)
corpus_text_lowercase_wo_sc.append(transcript)
transcript = ''.join(words_wo_sw)
trans_wo_punct = remove_punctuations(transcript)
transcript = ''.join(trans_wo_punct)
corpus_text_wo_sc.append(transcript)
# find majority of utterances below
# for utterances which were not found
# add them to remaining_utterances
row_to_keep = [True for i in range(len(original_corpus_text))]
remaining_utterances = dict()
for line_id, line_to_find in utterance_dict.items():
found_line = False
for i in range(1, (len(corpus_text_lowercase_wo_sc) - 2)):
# Combine 3 consecutive lines of the corpus into a single line
prev_words = corpus_text_lowercase_wo_sc[i - 1].strip()
curr_words = corpus_text_lowercase_wo_sc[i].strip()
next_words = corpus_text_lowercase_wo_sc[i + 1].strip()
new_line = prev_words + curr_words + next_words
transcript = ''.join(new_line)
if line_to_find in transcript:
found_line = True
row_to_keep[i-1] = False
row_to_keep[i] = False
row_to_keep[i+1] = False
if not found_line:
remaining_utterances[line_id] = line_to_find
for i in range(len(original_corpus_text)):
transcript = original_corpus_text[i].strip()
if row_to_keep[i]:
print(transcript)
print('Sentences not removed from LOB: {}'.format(remaining_utterances), file=sys.stderr)
print('Total test+dev sentences: {}'.format(len(utterance_dict)), file=sys.stderr)
print('Number of sentences not removed from LOB: {}'. format(len(remaining_utterances)), file=sys.stderr)
print('LOB lines: Before: {} After: {}'.format(len(original_corpus_text),
row_to_keep.count(True)), file=sys.stderr)
| 1,811 |
7,794 | <reponame>neilp78/dps
#ifndef SASS_VALUES_H
#define SASS_VALUES_H
#include "ast.hpp"
namespace Sass {
union Sass_Value* ast_node_to_sass_value (const Expression_Ptr val);
Value_Ptr sass_value_to_ast_node (const union Sass_Value* val);
}
#endif
| 105 |
1,006 | <filename>boards/arm/lpc17xx_40xx/pnev5180b/src/pnev5180b.h
/****************************************************************************
* boards/arm/lpc17xx_40xx/pnev5180b/src/pnev5180b.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __BOARDS_ARM_LPC17XX_40XX_PNEV5180B_SRC_PNEV5180B_H
#define __BOARDS_ARM_LPC17XX_40XX_PNEV5180B_SRC_PNEV5180B_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/compiler.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* NXP PNEV5180B Pin Usage
****************************************************************************/
/* Pin Description On Board Connector
* -------------------------------- ---------------- -------------
* P0.2/TXD0/AD0.7 TX J201
* P0.3/RXD0/AD0.6 RX
* P0.22/RTS1/TD1 LD200 ORANGE LED
* P0.15/TXD1/SCK0/SCK PN5180-SCK
* P0.16/RXD1/SSEL0/SSEL PN5180-SSEL PN5180
* P0.17/CTS1/MISO0/MISO PN5180-MISO
* P0.18/DCD1/M0SI0/MOSI PN5180-MOSI
* P0.19/DSR1/SDA1 EEPROM (Not Assembled)
* P0.20/DTR1/SCL1 EEPROM
* P0.21/RI1/RD1 PN5180-AUX2 PN5180
* P0.29/USB_D+ USB-D+ USB
* P0.30/USB_D- USB-D-
* P2.0/PWM1.1/TXD1 LD201 RED LED
* P2.5/PWM1.6/DTR1/TRACEDATA0 PN5180-nPN_RST
* P2.9/USB_CONNECT/RXD2 USB_CONNECT USB
* P2.11/nEINT1/I2STX_CLK PN5180-BUSY PN5180
* P2.12/nEINT2/I2STX_WS PN5180-IRQ
* P3.25/MAT0.0/PWM1.2 LD203 GREEN LED
* P3.26/STCLK/MAT0.1/PWM1.3 LD202 BLUE LED
*/
#define PNEV5180B_LED_BLUE (GPIO_OUTPUT | GPIO_VALUE_ONE | GPIO_PORT3 | GPIO_PIN26)
#define PNEV5180B_LED_GREEN (GPIO_OUTPUT | GPIO_VALUE_ONE | GPIO_PORT3 | GPIO_PIN25)
#define PNEV5180B_LED_ORANGE (GPIO_OUTPUT | GPIO_VALUE_ONE | GPIO_PORT0 | GPIO_PIN22)
#define PNEV5180B_LED_RED (GPIO_OUTPUT | GPIO_VALUE_ONE | GPIO_PORT2 | GPIO_PIN0)
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Functions Definitions
****************************************************************************/
/****************************************************************************
* Name: pnev5180b_bringup
*
* Description:
* Perform architecture-specific initialization
*
* CONFIG_BOARD_LATE_INITIALIZE=y :
* Called from board_late_initialize().
*
* CONFIG_BOARD_LATE_INITIALIZE=n && CONFIG_BOARDCTL=y :
* Called from the NSH library
*
****************************************************************************/
int pnev5180b_bringup(void);
/****************************************************************************
* Name: pnev5180b_autoled_initialize
*
* Description:
* Called early in power-up initialization to initialize the LED hardware.
*
****************************************************************************/
void pnev5180b_autoled_initialize(void);
/****************************************************************************
* Name: pnev5180b_spidev_initialize
*
* Description:
* Called to configure SPI chip select GPIO pins for the NXP PNEV5180B
* board.
*
****************************************************************************/
void weak_function pnev5180b_spidev_initialize(void);
#endif /* __ASSEMBLY__ */
#endif /* __BOARDS_ARM_LPC17XX_40XX_PNEV5180B_SRC_PNEV5180B_H */
| 1,834 |
886 | package com.immomo.rhizobia.rhizobia_J;
import com.immomo.rhizobia.rhizobia_J.sqli.DB2Sanitiser;
import org.junit.Test;
public class DB2SanitiserTest {
@Test
public void DB2Sanitise() {
//确认是连接的是哪种数据库 6ms
DB2Sanitiser db2Tool = DB2Sanitiser.getInstance();
//对sql语句进行特殊字符转义
String id = "1' or '1'='1' #";
long t1 = System.nanoTime();
String idEncode = db2Tool.DB2Sanitise(id);
long t2 = System.nanoTime();
System.out.println("without column: ");
System.out.println(t2 - t1);
String query = "SELECT NAME FROM users WHERE id = '" + idEncode + "'";
System.out.println(query);
query = "SELECT NAME FROM users WHERE id = '" + id + "'";
System.out.println(query);
String name = "name";
String nameEncode = db2Tool.DB2Sanitise(name);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user_name";
nameEncode = db2Tool.DB2Sanitise(name);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user-name";
nameEncode = db2Tool.DB2Sanitise(name);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user$name";
nameEncode = db2Tool.DB2Sanitise(name);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
System.out.println("\nwith column sign: ");
name = "name";
nameEncode = db2Tool.DB2Sanitise(name, true);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user_name";
nameEncode = db2Tool.DB2Sanitise(name, true);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user-name";
nameEncode = db2Tool.DB2Sanitise(name, true);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "user$name";
nameEncode = db2Tool.DB2Sanitise(name, true);
query = "SELECT NAME FROM users order by " + nameEncode;
System.out.println(query);
query = "SELECT NAME FROM users order by " + name;
System.out.println(query);
name = "1%0A%0Dand%0A%0D1=1";
nameEncode = db2Tool.DB2Sanitise( name, true);
System.out.println(nameEncode);
}
} | 1,417 |
3,631 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.maven.integration.embedder;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.maven.DefaultMaven;
import org.apache.maven.Maven;
import org.apache.maven.artifact.InvalidRepositoryException;
import org.apache.maven.artifact.repository.ArtifactRepository;
import org.apache.maven.execution.DefaultMavenExecutionRequest;
import org.apache.maven.execution.DefaultMavenExecutionResult;
import org.apache.maven.execution.MavenExecutionRequest;
import org.apache.maven.execution.MavenExecutionRequestPopulationException;
import org.apache.maven.execution.MavenExecutionRequestPopulator;
import org.apache.maven.execution.MavenExecutionResult;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.model.Profile;
import org.apache.maven.model.building.ModelSource;
import org.apache.maven.plugin.LegacySupport;
import org.apache.maven.project.MavenProject;
import org.apache.maven.project.ProjectBuilder;
import org.apache.maven.project.ProjectBuildingException;
import org.apache.maven.project.ProjectBuildingRequest;
import org.apache.maven.project.ProjectBuildingResult;
import org.apache.maven.repository.RepositorySystem;
import org.apache.maven.settings.Settings;
import org.apache.maven.settings.building.DefaultSettingsBuildingRequest;
import org.apache.maven.settings.building.FileSettingsSource;
import org.apache.maven.settings.building.SettingsBuilder;
import org.apache.maven.settings.building.SettingsBuildingException;
import org.apache.maven.settings.building.SettingsBuildingRequest;
import org.apache.maven.settings.building.SettingsSource;
import org.codehaus.plexus.PlexusContainer;
import org.codehaus.plexus.component.repository.exception.ComponentLookupException;
import org.codehaus.plexus.logging.Logger;
import org.codehaus.plexus.util.Os;
import org.eclipse.aether.RepositorySystemSession;
import org.eclipse.aether.artifact.Artifact;
import org.eclipse.aether.resolution.ArtifactDescriptorException;
import org.kie.maven.integration.MavenRepository;
import org.kie.maven.integration.MavenRepositoryConfiguration;
import org.kie.util.maven.support.ReleaseIdImpl;
import org.slf4j.LoggerFactory;
import static org.kie.maven.integration.IoUtils.copyInTempFile;
public class MavenEmbedder {
private static final org.slf4j.Logger log = LoggerFactory.getLogger( MavenEmbedder.class );
public static final File DEFAULT_GLOBAL_SETTINGS_FILE =
new File( System.getProperty( "maven.home", System.getProperty( "user.dir", "" ) ), "conf/settings.xml" );
private final MavenRequest mavenRequest;
private final ComponentProvider componentProvider;
private MavenExecutionRequest mavenExecutionRequest;
private MavenSession mavenSession;
public MavenEmbedder( MavenRequest mavenRequest ) throws MavenEmbedderException {
this( Thread.currentThread().getContextClassLoader(), null, mavenRequest );
}
public MavenEmbedder( ClassLoader mavenClassLoader,
ClassLoader parent,
MavenRequest mavenRequest ) throws MavenEmbedderException {
this( mavenRequest, MavenEmbedderUtils.buildComponentProvider( mavenClassLoader, parent, mavenRequest ) );
}
protected MavenEmbedder( MavenRequest mavenRequest,
ComponentProvider componentProvider ) throws MavenEmbedderException {
this.mavenRequest = mavenRequest;
this.componentProvider = componentProvider;
init();
}
void init() throws MavenEmbedderException {
try {
this.mavenExecutionRequest = this.buildMavenExecutionRequest( mavenRequest );
RepositorySystemSession rss = ( (DefaultMaven) componentProvider.lookup( Maven.class ) ).newRepositorySession( mavenExecutionRequest );
mavenSession = new MavenSession( componentProvider.getPlexusContainer(), rss, mavenExecutionRequest, new DefaultMavenExecutionResult() );
componentProvider.lookup( LegacySupport.class ).setSession( mavenSession );
} catch ( MavenEmbedderException e ) {
log.error( "Unable to build MavenEmbedder", e );
throw e;
} catch ( ComponentLookupException e ) {
log.error( "Unable to build MavenEmbedder", e );
throw new MavenEmbedderException( e.getMessage(), e );
}
}
protected MavenExecutionRequest buildMavenExecutionRequest( MavenRequest mavenRequest )
throws MavenEmbedderException, ComponentLookupException {
MavenExecutionRequest mavenExecutionRequest = new DefaultMavenExecutionRequest();
if ( mavenRequest.getGlobalSettingsFile() != null ) {
mavenExecutionRequest.setGlobalSettingsFile( new File( mavenRequest.getGlobalSettingsFile() ) );
}
SettingsSource userSettings = mavenRequest.getUserSettingsSource();
if ( userSettings != null ) {
if ( userSettings instanceof FileSettingsSource ) {
mavenExecutionRequest.setUserSettingsFile( ( (FileSettingsSource) userSettings ).getSettingsFile() );
} else {
try {
mavenExecutionRequest.setUserSettingsFile( copyInTempFile( userSettings.getInputStream(), "xml" ) );
} catch ( IOException ioe ) {
log.warn( "Unable to use maven settings defined in " + userSettings, ioe );
}
}
}
try {
componentProvider.lookup( MavenExecutionRequestPopulator.class ).populateFromSettings( mavenExecutionRequest, getSettings() );
componentProvider.lookup( MavenExecutionRequestPopulator.class ).populateDefaults( mavenExecutionRequest );
} catch ( MavenExecutionRequestPopulationException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
}
ArtifactRepository localRepository = getLocalRepository();
mavenExecutionRequest.setLocalRepository( localRepository );
mavenExecutionRequest.setLocalRepositoryPath( localRepository.getBasedir() );
mavenExecutionRequest.setOffline( mavenRequest.isOffline() );
mavenExecutionRequest.setUpdateSnapshots( mavenRequest.isUpdateSnapshots() );
// TODO check null and create a console one ?
mavenExecutionRequest.setTransferListener( mavenRequest.getTransferListener() );
mavenExecutionRequest.setCacheNotFound( mavenRequest.isCacheNotFound() );
mavenExecutionRequest.setCacheTransferError( true );
mavenExecutionRequest.setUserProperties( mavenRequest.getUserProperties() );
mavenExecutionRequest.getSystemProperties().putAll( System.getProperties() );
if ( mavenRequest.getSystemProperties() != null ) {
mavenExecutionRequest.getSystemProperties().putAll( mavenRequest.getSystemProperties() );
}
mavenExecutionRequest.getSystemProperties().putAll( getEnvVars() );
if ( mavenRequest.getProfiles() != null && !mavenRequest.getProfiles().isEmpty() ) {
for ( String id : mavenRequest.getProfiles() ) {
Profile p = new Profile();
p.setId( id );
p.setSource( "cli" );
mavenExecutionRequest.addProfile( p );
mavenExecutionRequest.addActiveProfile( id );
}
}
MavenRepositoryConfiguration mavenRepoConf = getMavenRepositoryConfiguration();
//DROOLS-899: Copy repositories defined in settings to execution request
for ( ArtifactRepository artifactRepository : mavenRepoConf.getArtifactRepositoriesForRequest() ) {
mavenExecutionRequest.addRemoteRepository( artifactRepository );
}
mavenExecutionRequest.setProxies( mavenRepoConf.getProxies() );
mavenExecutionRequest.setLoggingLevel( mavenRequest.getLoggingLevel() );
componentProvider.lookup( Logger.class ).setThreshold( mavenRequest.getLoggingLevel() );
mavenExecutionRequest.setExecutionListener( mavenRequest.getExecutionListener() )
.setInteractiveMode( mavenRequest.isInteractive() )
.setGlobalChecksumPolicy( mavenRequest.getGlobalChecksumPolicy() )
.setGoals( mavenRequest.getGoals() );
if ( mavenRequest.getPom() != null ) {
mavenExecutionRequest.setPom( new File( mavenRequest.getPom() ) );
}
if ( mavenRequest.getWorkspaceReader() != null ) {
mavenExecutionRequest.setWorkspaceReader( mavenRequest.getWorkspaceReader() );
}
if (mavenRequest.getBaseDirectory() != null) {
mavenExecutionRequest.setBaseDirectory(new File(mavenRequest.getBaseDirectory()));
}
return mavenExecutionRequest;
}
protected MavenRepositoryConfiguration getMavenRepositoryConfiguration() {
return MavenSettings.getMavenRepositoryConfiguration();
}
private Properties getEnvVars() {
Properties envVars = new Properties();
boolean caseSensitive = !Os.isFamily( Os.FAMILY_WINDOWS );
for ( Entry<String, String> entry : System.getenv().entrySet() ) {
String key = "env." + ( caseSensitive ? entry.getKey() : entry.getKey().toUpperCase( Locale.ENGLISH ) );
envVars.setProperty( key, entry.getValue() );
}
return envVars;
}
public Settings getSettings() throws MavenEmbedderException, ComponentLookupException {
SettingsBuildingRequest settingsBuildingRequest = new DefaultSettingsBuildingRequest();
if ( this.mavenRequest.getGlobalSettingsFile() != null ) {
settingsBuildingRequest.setGlobalSettingsFile( new File( this.mavenRequest.getGlobalSettingsFile() ) );
} else {
settingsBuildingRequest.setGlobalSettingsFile( DEFAULT_GLOBAL_SETTINGS_FILE );
}
if ( this.mavenRequest.getUserSettingsSource() != null ) {
settingsBuildingRequest.setUserSettingsSource( this.mavenRequest.getUserSettingsSource() );
} else {
SettingsSource userSettingsSource = MavenSettings.getUserSettingsSource();
if ( userSettingsSource != null ) {
settingsBuildingRequest.setUserSettingsSource( userSettingsSource );
}
}
settingsBuildingRequest.setUserProperties( this.mavenRequest.getUserProperties() );
settingsBuildingRequest.getSystemProperties().putAll( System.getProperties() );
settingsBuildingRequest.getSystemProperties().putAll( this.mavenRequest.getSystemProperties() );
settingsBuildingRequest.getSystemProperties().putAll( getEnvVars() );
try {
return componentProvider.lookup( SettingsBuilder.class ).build( settingsBuildingRequest ).getEffectiveSettings();
} catch ( SettingsBuildingException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
}
}
public ArtifactRepository getLocalRepository() throws ComponentLookupException {
try {
String localRepositoryPath = getLocalRepositoryPath();
if ( localRepositoryPath != null ) {
return componentProvider.lookup( RepositorySystem.class ).createLocalRepository( new File( localRepositoryPath ) );
}
return componentProvider.lookup( RepositorySystem.class ).createLocalRepository( RepositorySystem.defaultUserLocalRepository );
} catch ( InvalidRepositoryException e ) {
// never happened
throw new IllegalStateException( e );
}
}
public String getLocalRepositoryPath() {
String path = null;
try {
Settings settings = getSettings();
path = settings.getLocalRepository();
} catch ( MavenEmbedderException e ) {
// ignore
} catch ( ComponentLookupException e ) {
// ignore
}
if ( this.mavenRequest.getLocalRepositoryPath() != null ) {
path = this.mavenRequest.getLocalRepositoryPath();
}
if ( path == null ) {
path = RepositorySystem.defaultUserLocalRepository.getAbsolutePath();
}
return path;
}
// ----------------------------------------------------------------------
// Project
// ----------------------------------------------------------------------
public MavenProject readProject( final InputStream mavenProjectStream ) throws ProjectBuildingException, MavenEmbedderException {
ModelSource modelSource = new ModelSource() {
@Override
public InputStream getInputStream() {
return mavenProjectStream;
}
@Override
public String getLocation() {
return "";
}
};
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
org.eclipse.aether.artifact.Artifact lastArtifact = null;
do {
Thread.currentThread().setContextClassLoader(componentProvider.getSystemClassLoader());
ProjectBuilder projectBuilder = componentProvider.lookup(ProjectBuilder.class);
// BZ-1007894: Check if added dependencies are resolvable.
ProjectBuildingResult result = projectBuilder.build(modelSource, getProjectBuildingRequest());
if (result != null && result.getDependencyResolutionResult() != null && !result.getDependencyResolutionResult().getCollectionErrors().isEmpty()) {
// A dependency resolution error has been produced. It can contains some error. Throw the first one to the client, so the user will fix every one sequentially.
final Exception depedencyResolutionException = result.getDependencyResolutionResult().getCollectionErrors().get(0);
if (depedencyResolutionException instanceof ArtifactDescriptorException) {
final org.eclipse.aether.artifact.Artifact artifact = ((ArtifactDescriptorException) depedencyResolutionException).getResult().getArtifact();
if (!artifact.equals(lastArtifact)) {
tryRemoveLocalArtifact(artifact);
lastArtifact = artifact;
continue;
}
}
if (depedencyResolutionException != null) {
throw new MavenEmbedderException(depedencyResolutionException.getMessage(), depedencyResolutionException);
}
}
return (result == null || result.getProject() == null ) ? null : result.getProject();
} while (true);
} catch ( ComponentLookupException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
try {
mavenProjectStream.close();
} catch ( IOException e ) {
}
}
}
void tryRemoveLocalArtifact(Artifact artifact) {
MavenRepository.getMavenRepository().removeLocalArtifact(new ReleaseIdImpl(artifact.getGroupId(), artifact.getArtifactId(), artifact.getVersion()));
}
public MavenProject readProject( File mavenProject ) throws ProjectBuildingException, MavenEmbedderException {
List<MavenProject> projects = readProjects( mavenProject, false );
return projects == null || projects.isEmpty() ? null : projects.get( 0 );
}
public List<MavenProject> readProjects( File mavenProject,
boolean recursive ) throws ProjectBuildingException, MavenEmbedderException {
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
List<ProjectBuildingResult> results = buildProjects( mavenProject, recursive );
List<MavenProject> projects = new ArrayList<MavenProject>( results.size() );
for ( ProjectBuildingResult result : results ) {
projects.add( result.getProject() );
}
return projects;
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
}
}
public List<ProjectBuildingResult> buildProjects( File mavenProject,
boolean recursive ) throws ProjectBuildingException, MavenEmbedderException {
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
ProjectBuilder projectBuilder = componentProvider.lookup( ProjectBuilder.class );
return projectBuilder.build( Collections.singletonList( mavenProject ), recursive, getProjectBuildingRequest() );
} catch ( ComponentLookupException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
}
}
ProjectBuildingRequest getProjectBuildingRequest() throws ComponentLookupException {
ProjectBuildingRequest projectBuildingRequest = this.mavenExecutionRequest.getProjectBuildingRequest();
projectBuildingRequest.setValidationLevel( this.mavenRequest.getValidationLevel() );
RepositorySystemSession repositorySystemSession = componentProvider.getRepositorySystemSession( mavenExecutionRequest );
projectBuildingRequest.setRepositorySession( repositorySystemSession );
projectBuildingRequest.setProcessPlugins( this.mavenRequest.isProcessPlugins() );
projectBuildingRequest.setResolveDependencies( this.mavenRequest.isResolveDependencies() );
return projectBuildingRequest;
}
public MavenSession getMavenSession() {
return mavenSession;
}
public MavenExecutionRequest getMavenExecutionRequest() {
return mavenExecutionRequest;
}
public void dispose() {
PlexusContainer plexusContainer = componentProvider.getPlexusContainer();
if ( plexusContainer != null ) {
plexusContainer.dispose();
}
}
public MavenExecutionResult execute( final MavenRequest mavenRequest )
throws MavenEmbedderException {
final ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
final Maven maven = componentProvider.lookup( Maven.class );
return maven.execute( buildMavenExecutionRequest( mavenRequest ) );
} catch ( final MavenEmbedderException e ) {
log.error( "An MavenEmbedderException occurred during maven execution.", e );
throw e;
} catch ( final Throwable e ) {
log.error( "An exception occurred during maven execution.", e );
throw new MavenEmbedderException( e.getMessage(), e );
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
}
}
}
| 7,443 |
1,681 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from hypertools.tools.cluster import cluster
from hypertools.plot.plot import plot
cluster1 = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=100)
cluster2 = np.random.multivariate_normal(np.zeros(3)+100, np.eye(3), size=100)
data = np.vstack([cluster1, cluster2])
labels = cluster(data, n_clusters=2)
def test_cluster_n_clusters():
assert len(set(labels))==2
def test_cluster_returns_list():
assert type(labels) is list
def test_cluster_hdbscan():
try:
from hdbscan import HDBSCAN
_has_hdbscan = True
except:
_has_hdbscan = False
if _has_hdbscan:
hdbscan_labels = cluster(data, cluster='HDBSCAN')
assert len(set(hdbscan_labels)) == 2
else:
with pytest.raises(ImportError):
hdbscan_labels = cluster(data, cluster='HDBSCAN')
| 377 |
2,890 | package com.github.ltsopensource.kv.index;
import com.github.ltsopensource.core.factory.NamedThreadFactory;
import com.github.ltsopensource.core.logger.Logger;
import com.github.ltsopensource.core.logger.LoggerFactory;
import com.github.ltsopensource.kv.Entry;
import com.github.ltsopensource.kv.StoreConfig;
import com.github.ltsopensource.kv.cache.DataCache;
import com.github.ltsopensource.kv.data.DataBlockEngine;
import com.github.ltsopensource.kv.iterator.DBIterator;
import com.github.ltsopensource.kv.iterator.MemIteratorImpl;
import com.github.ltsopensource.kv.txlog.StoreTxLogPosition;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
/**
* @author <NAME> (<EMAIL>) on 12/16/15.
*/
public class MemIndex<K, V> implements Index<K, V> {
private static final Logger LOGGER = LoggerFactory.getLogger(MemIndex.class);
private StoreTxLogPosition lastTxLog;
private ConcurrentMap<K, IndexItem<K>> indexMap;
private StoreConfig storeConfig;
private DataBlockEngine<K, V> dataBlockEngine;
private DataCache<K, V> dataCache;
private AtomicLong lastSnapshotChangeNum = new AtomicLong(0);
private AtomicLong currentChangeNum = new AtomicLong(0);
private IndexSnapshot<K, V> indexSnapshot;
public MemIndex(final StoreConfig storeConfig, DataBlockEngine<K, V> dataBlockEngine, DataCache<K, V> dataCache) {
this.indexMap = new ConcurrentSkipListMap<K, IndexItem<K>>();
this.storeConfig = storeConfig;
this.dataBlockEngine = dataBlockEngine;
this.dataCache = dataCache;
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ltsdb-index-snapshot-check-service", true));
executorService.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
// 检查一下当改变的量达到一定量时要snapshot
if (currentChangeNum.get() - lastSnapshotChangeNum.get() > storeConfig.getIndexSnapshotThreshold()) {
indexSnapshot.snapshot();
}
} catch (Throwable t) {
LOGGER.error("SNAPSHOT Error", t);
}
}
}, 3, 2, TimeUnit.SECONDS);
}
public IndexItem<K> getIndexItem(K key) {
return indexMap.get(key);
}
@Override
public IndexItem<K> removeIndexItem(StoreTxLogPosition txLogResult, K key) {
IndexItem<K> value = indexMap.remove(key);
this.lastTxLog = txLogResult;
currentChangeNum.incrementAndGet();
return value;
}
@Override
public void putIndexItem(StoreTxLogPosition txLogResult, K key, IndexItem<K> indexItem) {
indexMap.put(key, indexItem);
this.lastTxLog = txLogResult;
currentChangeNum.incrementAndGet();
}
@Override
public int size() {
return indexMap.size();
}
@Override
public boolean containsKey(K key) {
return indexMap.containsKey(key);
}
@Override
public DBIterator<Entry<K, V>> iterator() {
return new MemIteratorImpl<K, V>(this, dataBlockEngine, dataCache);
}
@Override
public StoreTxLogPosition lastTxLog() {
return lastTxLog;
}
void setLastTxLog(StoreTxLogPosition lastTxLog) {
this.lastTxLog = lastTxLog;
}
public ConcurrentMap<K, IndexItem<K>> getIndexMap() {
return indexMap;
}
void setIndexMap(ConcurrentMap<K, IndexItem<K>> indexMap) {
this.indexMap = indexMap;
}
public void setIndexSnapshot(IndexSnapshot<K, V> indexSnapshot) {
this.indexSnapshot = indexSnapshot;
}
}
| 1,523 |
335 | <gh_stars>100-1000
{
"word": "Plutonium",
"definitions": [
"A transuranic element with a fissile isotope of mass number 239 (plutonium 239) that can be produced from non-fissile uranium 238, as in a breeder reactor."
],
"parts-of-speech": "Noun"
}
| 103 |
1,179 | <reponame>fengjixuchui/hypervisor-2
/// @copyright
/// Copyright (C) 2020 Assured Information Security, Inc.
///
/// @copyright
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// @copyright
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// @copyright
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
/// SOFTWARE.
#ifndef BASIC_ENTRIES_T_HPP
#define BASIC_ENTRIES_T_HPP
namespace lib
{
/// <!-- description -->
/// @brief Returns the result of get_entries(), providing all of the
/// entries seen during a translation.
///
///
/// <!-- template parameters -->
/// @tparam L3E_TYPE the type of level-3 table to use
/// @tparam L2E_TYPE the type of level-2 table to use
/// @tparam L1E_TYPE the type of level-1 table to use
/// @tparam L0E_TYPE the type of level-0 table to use
///
template<typename L3E_TYPE, typename L2E_TYPE, typename L1E_TYPE, typename L0E_TYPE>
struct basic_entries_t final
{
/// @brief a pointer to the resulting l3e_t
L3E_TYPE *l3e;
/// @brief a pointer to the resulting l2e_t
L2E_TYPE *l2e;
/// @brief a pointer to the resulting l1e_t
L1E_TYPE *l1e;
/// @brief a pointer to the resulting l0e_t
L0E_TYPE *l0e;
};
}
#endif
| 754 |
4,067 | import sys
from bintrees import RBTree
def count_duplicates(lines):
h, m = RBTree(), 0
for l in lines:
if (l in h): h[l] += 1
else: h[l] = 1
if (m < h[l]): m = h[l]
return h, m
h, m = count_duplicates(sys.stdin)
# print(len(h), m)
| 137 |
733 | <reponame>trisadmeslek/V-Sekai-Blender-tools<gh_stars>100-1000
from rx import Observable, AnonymousObservable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def take_last_buffer(self, count):
"""Returns an array with the specified number of contiguous elements
from the end of an observable sequence.
Example:
res = source.take_last(5)
Description:
This operator accumulates a buffer with a length enough to store
elements count elements. Upon completion of the source sequence, this
buffer is drained on the result sequence. This causes the elements to be
delayed.
Keyword arguments:
:param int count: Number of elements to take from the end of the source
sequence.
:returns: An observable sequence containing a single list with the specified
number of elements from the end of the source sequence.
:rtype: Observable
"""
source = self
def subscribe(observer):
q = []
def on_next(x):
with self.lock:
q.append(x)
if len(q) > count:
q.pop(0)
def on_completed():
observer.on_next(q)
observer.on_completed()
return source.subscribe(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
| 498 |
14,668 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/public/common/media/watch_time_component.h"
#include "third_party/blink/public/common/common_export.h"
#include "third_party/blink/public/common/media/display_type.h"
namespace blink {
template <typename T>
WatchTimeComponent<T>::WatchTimeComponent(
T initial_value,
std::vector<media::WatchTimeKey> keys_to_finalize,
ValueToKeyCB value_to_key_cb,
GetMediaTimeCB get_media_time_cb,
media::mojom::WatchTimeRecorder* recorder)
: keys_to_finalize_(std::move(keys_to_finalize)),
value_to_key_cb_(std::move(value_to_key_cb)),
get_media_time_cb_(std::move(get_media_time_cb)),
recorder_(recorder),
current_value_(initial_value),
pending_value_(initial_value) {}
template <typename T>
WatchTimeComponent<T>::~WatchTimeComponent() = default;
template <typename T>
void WatchTimeComponent<T>::OnReportingStarted(
base::TimeDelta start_timestamp) {
start_timestamp_ = start_timestamp;
end_timestamp_ = last_timestamp_ = media::kNoTimestamp;
}
template <typename T>
void WatchTimeComponent<T>::SetPendingValue(T new_value) {
pending_value_ = new_value;
if (current_value_ != new_value) {
// Don't trample an existing finalize; the first takes precedence.
//
// Note: For components with trinary or higher state, which experience
// multiple state changes during an existing finalize, this will drop all
// watch time between the current and final state. E.g., state=0 {0ms} ->
// state=1 {1ms} -> state=2 {2ms} will result in loss of state=1 watch time.
if (end_timestamp_ != media::kNoTimestamp)
return;
end_timestamp_ = get_media_time_cb_.Run();
return;
}
// Clear any pending finalize since we returned to the previous value before
// the finalize could completed. I.e., assume this is a continuation.
end_timestamp_ = media::kNoTimestamp;
}
template <typename T>
void WatchTimeComponent<T>::SetCurrentValue(T new_value) {
current_value_ = new_value;
}
template <typename T>
void WatchTimeComponent<T>::RecordWatchTime(base::TimeDelta current_timestamp) {
DCHECK_NE(current_timestamp, media::kNoTimestamp);
DCHECK_NE(current_timestamp, media::kInfiniteDuration);
DCHECK_GE(current_timestamp, base::TimeDelta());
// If we're finalizing, use the media time at time of finalization. We only
// use the |end_timestamp_| if it's less than the current timestamp, otherwise
// we may report more watch time than expected.
if (NeedsFinalize() && end_timestamp_ < current_timestamp)
current_timestamp = end_timestamp_;
// Don't update watch time if media time hasn't changed since the last run;
// this may occur if a seek is taking some time to complete or the playback
// is stalled for some reason.
if (last_timestamp_ == current_timestamp)
return;
last_timestamp_ = current_timestamp;
const base::TimeDelta elapsed = last_timestamp_ - start_timestamp_;
if (elapsed <= base::TimeDelta())
return;
// If no value to key callback has been provided, record |elapsed| to every
// key in the |keys_to_finalize_| list.
if (!value_to_key_cb_) {
for (auto k : keys_to_finalize_)
recorder_->RecordWatchTime(k, elapsed);
return;
}
// A conversion callback has been specified, so only report elapsed to the
// key provided by the callback.
//
// Record watch time using |current_value_| and not |pending_value_| since
// that transition should not happen until Finalize().
recorder_->RecordWatchTime(value_to_key_cb_.Run(current_value_), elapsed);
}
template <typename T>
void WatchTimeComponent<T>::Finalize(
std::vector<media::WatchTimeKey>* keys_to_finalize) {
DCHECK(NeedsFinalize());
// Update |current_value_| and |start_timestamp_| to |end_timestamp_| since
// that's when the |pending_value_| was set.
current_value_ = pending_value_;
start_timestamp_ = end_timestamp_;
// Complete the finalize and indicate which keys need to be finalized.
end_timestamp_ = media::kNoTimestamp;
keys_to_finalize->insert(keys_to_finalize->end(), keys_to_finalize_.begin(),
keys_to_finalize_.end());
DCHECK(!NeedsFinalize());
}
template <typename T>
bool WatchTimeComponent<T>::NeedsFinalize() const {
return end_timestamp_ != media::kNoTimestamp;
}
// Required to avoid linking errors since we've split this file into a .cc + .h
// file set instead of putting the function definitions in the header file. Any
// new component type must be added here.
//
// Note: These must be the last line in this file, otherwise you will also see
// linking errors since the templates won't have been fully defined prior.
template class BLINK_COMMON_EXPORT WatchTimeComponent<bool>;
template class BLINK_COMMON_EXPORT WatchTimeComponent<DisplayType>;
} // namespace blink
| 1,636 |
679 | <filename>main/chart2/source/inc/NamedProperties.hxx<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef CHART_NAMEDPROPERTIES_HXX
#define CHART_NAMEDPROPERTIES_HXX
#include "NamedLineProperties.hxx"
#include "NamedFillProperties.hxx"
#include "PropertyHelper.hxx"
#include <com/sun/star/beans/Property.hpp>
#include <vector>
namespace chart
{
class NamedProperties
{
/** this class combines the classes NamedFillAttributes and NamedLineAttributes
thus you can handle all named properties with one call if you like
*/
public:
static void AddPropertiesToVector(
::std::vector< ::com::sun::star::beans::Property > & rOutProperties );
//will return e.g. "FillGradientName" for nHandle == PROP_FILL_GRADIENT_NAME
static ::rtl::OUString GetPropertyNameForHandle( sal_Int32 nHandle );
private:
// not implemented
NamedProperties();
};
} // namespace chart
// CHART_NAMEDPROPERTIES_HXX
#endif
| 538 |
1,056 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.debugger.jpda.truffle.ast.model;
import org.netbeans.api.debugger.jpda.JPDADebugger;
import org.netbeans.modules.debugger.jpda.truffle.access.TruffleStrataProvider;
import org.netbeans.modules.debugger.jpda.truffle.ast.TruffleNode;
import org.netbeans.modules.debugger.jpda.truffle.ast.view.ASTView;
import org.netbeans.spi.debugger.ContextProvider;
import org.netbeans.spi.debugger.DebuggerServiceRegistration;
import org.netbeans.spi.viewmodel.ModelListener;
import org.netbeans.spi.viewmodel.NodeModel;
import org.netbeans.spi.viewmodel.UnknownTypeException;
@DebuggerServiceRegistration(path="netbeans-JPDASession/"+TruffleStrataProvider.TRUFFLE_STRATUM+"/"+ASTView.AST_VIEW_NAME,
types={ NodeModel.class })
public class ASTNodeModel implements NodeModel {
private final JPDADebugger debugger;
public ASTNodeModel(ContextProvider lookupProvider) {
debugger = lookupProvider.lookupFirst(null, JPDADebugger.class);
}
@Override
public String getDisplayName(Object node) throws UnknownTypeException {
if (node instanceof TruffleNode) {
TruffleNode ast = (TruffleNode) node;
String label = ast.getClassSimpleName();
String tags = ast.getTags();
if (!tags.isEmpty()) {
label = '(' + tags + ") " + label;
}
int l1 = ast.getStartLine();
if (l1 >= 0) {
int c1 = ast.getStartColumn();
int l2 = ast.getEndLine();
int c2 = ast.getEndColumn();
label += " ["+l1+":"+c1+"-"+l2+":"+c2+"]";
}
if (ast.isCurrent()) {
label = "<html><b>" + label + "</b></html>";
}
return label;
} else {
throw new UnknownTypeException(node);
}
}
@Override
public String getIconBase(Object node) throws UnknownTypeException {
return "org/netbeans/modules/debugger/resources/threadsView/RunningThread";
}
@Override
public String getShortDescription(Object node) throws UnknownTypeException {
if (node instanceof TruffleNode) {
TruffleNode ast = (TruffleNode) node;
return ast.getClassName() + " (" + ast.getDescription() +")";
} else {
throw new UnknownTypeException(node);
}
}
@Override
public void addModelListener(ModelListener l) {
}
@Override
public void removeModelListener(ModelListener l) {
}
}
| 1,324 |
465 | <filename>marmaray/src/main/java/com/uber/marmaray/utilities/KafkaUtil.java
/*
* Copyright (c) 2018 Uber Technologies, Inc.
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
package com.uber.marmaray.utilities;
import com.github.rholder.retry.AttemptTimeLimiters;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.uber.marmaray.common.configuration.KafkaConfiguration;
import com.uber.marmaray.common.exceptions.JobRuntimeException;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.hibernate.validator.constraints.NotEmpty;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* {@link KafkaUtil} provides utility methods for interacting with Kafka
*/
@Slf4j
public final class KafkaUtil {
public static final int FETCH_OFFSET_TIMEOUT_SEC = 60;
public static final int FETCH_OFFSET_RETRY_CNT = 3;
// Local topic partition cache to avoid additional topic partition lookups.
public static Map<String, List<PartitionInfo>> topicPartitions;
private KafkaUtil() {
throw new JobRuntimeException("This utility class should never be instantiated");
}
@VisibleForTesting
public static int getFetchOffsetTimeoutSec() {
return FETCH_OFFSET_TIMEOUT_SEC;
}
@VisibleForTesting
public static int getFetchOffsetRetryCnt() {
return FETCH_OFFSET_RETRY_CNT;
}
@VisibleForTesting
public static Map<TopicPartition, Long> getTopicPartitionOffsets(@NonNull final KafkaConsumer kafkaConsumer,
@NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) {
final Map<TopicPartition, Long> partitionOffsets = new ConcurrentHashMap<>();
try {
log.info("requesting topicPartitions for {} - start", topicName);
final AtomicInteger attemptNumber = new AtomicInteger(0);
verifyTopicPartitions(kafkaConsumer, topicName, topicPartitions);
final Callable<Void> fetchOffsetTask = () -> {
log.info(
"requesting topicPartitions for {} - % success {}/{} - attemptNumber - {}",
topicName,
partitionOffsets.size(),
topicPartitions.size(),
attemptNumber.incrementAndGet()
);
topicPartitions.stream().forEach(
tp -> {
try {
if (!partitionOffsets.containsKey(tp)) {
partitionOffsets.put(tp, kafkaConsumer.position(tp));
}
} catch (Exception e) {
log.error("ERROR requesting topicPartitions for {} - % success {}/{}",
topicName, partitionOffsets.size(), topicPartitions.size(), e);
kafkaConsumer.wakeup();
throw e;
}
}
);
return null;
};
// As the kafka fetch operations can hang we would like to add timeout with retry logic while fetching
// offsets from broker.
final Retryer<Void> retryer = RetryerBuilder.<Void>newBuilder()
.withAttemptTimeLimiter(AttemptTimeLimiters.fixedTimeLimit(getFetchOffsetTimeoutSec(),
TimeUnit.SECONDS))
.retryIfExceptionOfType(Exception.class)
.withStopStrategy(StopStrategies.stopAfterAttempt(getFetchOffsetRetryCnt()))
.build();
retryer.call(fetchOffsetTask);
log.info("requesting topicPartitions for {} - % success {}/{} - end", topicName,
partitionOffsets.size(), topicPartitions.size());
Preconditions.checkState(topicPartitions.size() == partitionOffsets.size(),
"could not retrieve offsets for few partitions");
return partitionOffsets;
} catch (Exception e) {
log.error("retrieving topic partition offsets timed out for {} - % success {}/{}", topicName,
partitionOffsets.size(), topicPartitions.size());
throw new JobRuntimeException("failed to fetch offsets - Timeout", e);
}
}
/**
* Helper method to verify that given kafka topic has all passed in topicPartitions.
*/
public static void verifyTopicPartitions(@NonNull final KafkaConsumer kafkaConsumer,
@NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) {
final Set<Integer> partitions = new HashSet<>();
topicPartitions.stream().forEach(
tp -> {
partitions.add(tp.partition());
}
);
getTopicPartitions(kafkaConsumer, topicName).stream().forEach(p -> partitions.remove(p.partition()));
if (!partitions.isEmpty()) {
throw new JobRuntimeException(String.format("invalid partitions :{} : topic : {}",
partitions.toString(), topicName));
}
}
/**
* It fetches earliest offset ranges available for given topic-partitions.
*/
public static Map<TopicPartition, Long> getEarliestLeaderOffsets(@NonNull final KafkaConsumer kafkaConsumer,
@NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) {
kafkaConsumer.assign(topicPartitions);
verifyTopicPartitions(kafkaConsumer, topicName, topicPartitions);
final Map<TopicPartition, Long> earliestLeaderOffsets =
kafkaConsumer.beginningOffsets(topicPartitions);
log.info("topic-partition earliest offsets :{}", earliestLeaderOffsets);
return earliestLeaderOffsets;
}
/**
* It fetches latest offset ranges available for given topic-partitions.
*/
public static Map<TopicPartition, Long> getLatestLeaderOffsets(@NonNull final KafkaConsumer kafkaConsumer,
@NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) {
kafkaConsumer.assign(topicPartitions);
verifyTopicPartitions(kafkaConsumer, topicName, topicPartitions);
final Map<TopicPartition, Long> latestLeaderOffsets =
kafkaConsumer.endOffsets(topicPartitions);
log.info("topic-partition latest offsets :{}", latestLeaderOffsets);
return latestLeaderOffsets;
}
@VisibleForTesting
public static synchronized void resetTopicPartitionCache() {
KafkaUtil.topicPartitions = null;
}
/**
* It returns available {@link TopicPartition}s for given topic.
*/
public static Set<TopicPartition> getTopicPartitions(@NonNull final KafkaConsumer kafkaConsumer,
@NotEmpty final String topicName) {
try {
Map<String, List<PartitionInfo>> topicPartitions = KafkaUtil.topicPartitions;
if (topicPartitions == null) {
synchronized (KafkaUtil.class) {
if (topicPartitions == null) {
topicPartitions = kafkaConsumer.listTopics();
KafkaUtil.topicPartitions = new ConcurrentHashMap<>(topicPartitions);
}
}
}
if (!topicPartitions.containsKey(topicName)) {
throw new JobRuntimeException("topic is not found :" + topicName);
}
final List<PartitionInfo> partitions = topicPartitions.get(topicName);
final Set<TopicPartition> topicPartitionSet = new HashSet<>();
partitions.forEach(
p -> {
topicPartitionSet.add(new TopicPartition(p.topic(), p.partition()));
});
log.info("topic-partitions:{}", partitions);
return topicPartitionSet;
} catch (KafkaException e) {
log.error("error retrieving topic partitions:", e);
throw new JobRuntimeException(e);
}
}
/**
* Helper method to get {@link KafkaConsumer}.
*/
public static KafkaConsumer getKafkaConsumer(final Map<String, String> kafkaPrams) {
return new KafkaConsumer(kafkaPrams);
}
public static Map<String, Object> getKafkaParams(@NonNull final KafkaConfiguration kafkaConf) {
final Map<String, Object> newKafkaParams = new HashMap<>();
kafkaConf.getKafkaParams().entrySet().stream().forEach(
entry -> {
final String val = entry.getValue();
try {
final long longVal = Long.parseLong(val);
if (longVal >= Integer.MAX_VALUE || longVal <= Integer.MIN_VALUE) {
newKafkaParams.put(entry.getKey(), longVal);
} else {
newKafkaParams.put(entry.getKey(), (int) longVal);
}
return;
} catch (NumberFormatException e) {
// ignore it.
}
// Add all remaining (key,value) pairs as Strings.
newKafkaParams.put(entry.getKey(), entry.getValue());
}
);
return newKafkaParams;
}
}
| 4,493 |
377 | <filename>code/foundation/core/cvar.h
#pragma once
//------------------------------------------------------------------------------
/**
@file cvar.h
Contains API for creating, writing to, and reading from a Core::CVar
@struct Core::CVar
A console variable.
Always handle as a opaque object. Pass the CVar* to the various functions to
perform operations on the variable.
Prefix should reflect which subsystem it affects. Ex.
`r_` - Render subsystem
`ui_` - UI specific
`cl_` - General client/application
`sv_` - Server only (networking)
@copyright
(C) 2021 Individual contributors, see AUTHORS file
*/
//------------------------------------------------------------------------------
namespace Core
{
/// Forward declaration of a Core::CVar
struct CVar;
/// Denotes the type of a Core::CVar
enum CVarType
{
CVar_Int,
CVar_Float,
CVar_String
};
/// Used to create a Core::CVar
struct CVarCreateInfo
{
const char* name = nullptr;
const char* defaultValue = nullptr;
CVarType type = CVar_Int;
const char* description = nullptr;
};
/// Create or get a console variable
CVar* CVarCreate(CVarCreateInfo const&);
/// Create or get a console variable
CVar* CVarCreate(CVarType type, const char* name, const char* defaultValue, const char* description = nullptr);
/// Get a console variable
CVar* CVarGet(const char* name);
/// Parse value from c string and assign to cvar
void CVarParseWrite(CVar*, const char* value);
/// Write float value to cvar
void CVarWriteFloat(CVar*, float value);
/// Write int value to cvar
void CVarWriteInt(CVar*, int value);
/// Write string value to cvar
void CVarWriteString(CVar*, const char* value);
/// Read int value from cvar
int const CVarReadInt(CVar*);
/// Read float value from cvar
float const CVarReadFloat(CVar*);
/// Read string value from cvar
const char* CVarReadString(CVar*);
/// Check if a CVar has been modified
bool CVarModified(CVar*);
/// Set the modified status of a cvar
void CVarSetModified(CVar*, bool);
/// Get the type of a cvar
CVarType CVarGetType(CVar*);
/// Get the cvars name
const char* CVarGetName(CVar*);
/// Get the cvars description
const char* CVarGetDescription(CVar*);
/// Get the number of vars created
int CVarNum();
/// Get a pointer to the first cvar in the array
CVar* CVarsBegin();
/// Get a pointer to the address after the last valid cvar in the array
CVar* CVarsEnd();
/// increment the iterator
CVar* CVarNext(CVar*);
} // namespace Core
| 792 |
645 | // Copyright 2013 Viewfinder. All rights reserved.
// Author: <NAME>
package co.viewfinder;
import android.content.Context;
import android.graphics.Typeface;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.*;
import co.viewfinder.widgets.SpannableBuilder;
import junit.framework.Assert;
import java.util.LinkedList;
/**
* This adapter produces the views that form the individual parts of a conversation.
* This is created by ConvPageFragment and feeds the ListView contained within ConvPageFragment.
*/
public class ConvAdapter extends BaseAdapter {
private static final String TAG = "Viewfinder.ConvAdapter";
private static final int SHARED_IMAGE_SIZE = 120;
// In the future, we may consider making this dynamic to respond to different sized screens.
private static final int SHARED_IMAGES_PER_ROW = 4;
private ConvPageFragment mConvPageFragment;
private LayoutInflater mInflater;
private ViewData.ConvViewData mConvViewData = null;
private int mSharedImageDesiredSize;
private int mCommentImageDesiredSize;
public ConvAdapter(ConvPageFragment convPageFragment, ViewData.ConvViewData convViewData) {
mConvPageFragment = convPageFragment;
mConvViewData = convViewData;
mInflater = (LayoutInflater) mConvPageFragment.getActivity().getSystemService(Context.LAYOUT_INFLATER_SERVICE);
mCommentImageDesiredSize =
mConvPageFragment.getResources().getDimensionPixelSize(R.dimen.convItem_commentImage);
// For now, just assume square images:
mSharedImageDesiredSize = Math.round(mConvPageFragment.getDisplayWidth() / (float)SHARED_IMAGES_PER_ROW);
}
@Override
public int getCount() { return mConvViewData.getCount(); }
@Override
public Object getItem(int position) { return mConvViewData.getItem(position); }
@Override
public long getItemId(int position) { return mConvViewData.getItem(position).getId(); }
@Override
public int getViewTypeCount() { return ViewData.ConvViewData.ConvItemType.getCount(); }
@Override
public int getItemViewType(int position) { return mConvViewData.getItem(position).getItemType().ordinal(); }
/**
* Hook into the ListView's recycler so that we can cancel image fetches that obviously no longer matter.
*/
public void setListView(ListView listView) {
listView.setRecyclerListener(new AbsListView.RecyclerListener() {
@Override
public void onMovedToScrapHeap(View view) {
ConvViewHolder convViewHolder = (ConvViewHolder)view.getTag();
convViewHolder.cancelImageViewFetches();
}
});
}
/**
* Base ViewHolder that conversation item views can add their ImageView's to so that if they're recycled,
* any outstanding image fetches can be canceled.
*/
class ConvViewHolder {
public View mViewForBackground;
public LinkedList<PhotoImageView> mImageViews;
public ConvViewHolder(View view) {
mViewForBackground = view.findViewById(R.id.convItem_container);
view.setTag(this);
}
public void setContainerBackground(int resource) {
mViewForBackground.setBackgroundResource(resource);
}
public void addImageView(PhotoImageView photoImageView) {
if (null == mImageViews) {
mImageViews = new LinkedList<PhotoImageView>();
}
mImageViews.add(photoImageView);
}
public void cancelImageViewFetches() {
if (null != mImageViews) {
for (PhotoImageView photoImageView : mImageViews) {
photoImageView.cancelFetchRequest();
}
}
}
}
/**
* Depending on the type of view needed, dispatch to the correct view type getter.
*/
@Override
public View getView(int position, View convertView, ViewGroup parent) {
View view = null;
ViewData.ConvViewData.ConvItemViewData viewData = mConvViewData.getItem(position);
switch(viewData.getItemType()) {
case HEADER:
view = getHeaderView((ViewData.ConvViewData.ConvHeaderViewData)viewData,
convertView,
parent);
break;
case STARTED:
view = getFormattedTextView(viewData,
convertView,
parent);
break;
case COMMENT:
view = getCommentView((ViewData.ConvViewData.ConvCommentViewData)viewData,
convertView,
parent);
break;
case SHARE_PHOTOS:
view = getSharePhotosView((ViewData.ConvViewData.ConvSharePhotosViewData)viewData,
convertView,
parent);
break;
case ADD_FOLLOWERS:
view = getFormattedTextView(viewData,
convertView,
parent);
break;
default:
Assert.fail();
break;
}
Assert.assertNotNull(view);
setItemBackground(position, view, viewData);
return view;
}
public View getHeaderView(ViewData.ConvViewData.ConvHeaderViewData viewData,
View convertView,
ViewGroup parent) {
View view;
class ViewHolder extends ConvViewHolder {
public ViewHolder(View view) { super(view); }
public TextView title;
public TextView followers;
}
ViewHolder viewHolder;
if (null == convertView) {
view = mInflater.inflate(R.layout.conv_item_header, parent, false);
viewHolder = new ViewHolder(view);
viewHolder.title = (TextView)view.findViewById(R.id.convItem_headerTitle);
viewHolder.followers = (TextView)view.findViewById(R.id.convItem_headerFollowers);
viewHolder.followers.setTypeface(null, Typeface.BOLD);
setContainerMargins(view);
} else {
view = convertView;
viewHolder = (ViewHolder)view.getTag();
}
viewHolder.title.setText(viewData.getTitle());
viewHolder.followers.setText(Utils.enumeratedStringFromStrings(viewData.getFollowers(),
false /* skipLast */));
return view;
}
public View getCommentView(ViewData.ConvViewData.ConvCommentViewData viewData,
View convertView,
ViewGroup parent) {
final View view;
class ViewHolder extends ConvViewHolder {
public ViewHolder(View view) { super(view); }
public TextView commenter;
public TextView commentTimestamp;
public TextView comment;
public FrameLayout threading;
public PhotoImageView commentedImage;
}
final ViewHolder viewHolder;
if (null == convertView) {
view = mInflater.inflate(R.layout.conv_item_comment, parent, false);
viewHolder = new ViewHolder(view);
viewHolder.commenter = (TextView)view.findViewById(R.id.convItem_commenter);
viewHolder.commentTimestamp = (TextView)view.findViewById(R.id.convItem_commentTimestamp);
viewHolder.comment = (TextView)view.findViewById(R.id.convItem_comment);
viewHolder.commentedImage = (PhotoImageView)view.findViewById(R.id.convItem_commentImage);
viewHolder.threading = (FrameLayout)view.findViewById(R.id.convItem_threading);
viewHolder.addImageView(viewHolder.commentedImage);
viewHolder.commenter.setTextColor(mConvPageFragment.getResources().getColor(R.color.conv_text));
viewHolder.commentTimestamp.setTextColor(mConvPageFragment.getResources().getColor(R.color.conv_textLight));
viewHolder.comment.setTextColor(mConvPageFragment.getResources().getColor(R.color.conv_text));
setContainerMargins(view);
} else {
view = convertView;
viewHolder = (ViewHolder)view.getTag();
}
viewHolder.commenter.setText(viewData.getCommenter());
viewHolder.commentTimestamp.setText(Time.formatTime(viewData.getTimestamp()));
String comment = viewData.getComment();
String formattedTime = Time.formatTime(viewData.getTimestamp());
if (viewData.isCombined()) {
viewHolder.commenter.setVisibility(View.GONE);
viewHolder.commentTimestamp.setVisibility(View.GONE);
if (viewData.isTimestampAppended()) {
viewHolder.comment.setText((new SpannableBuilder(mConvPageFragment.getActivity()))
.append(comment)
.append(" ")
.turnItalicOn()
.setTextColor(R.color.conv_textLight)
.append(formattedTime)
.getSpannable());
comment = null;
}
} else {
viewHolder.commenter.setVisibility(View.VISIBLE);
viewHolder.commentTimestamp.setVisibility(View.VISIBLE);
viewHolder.commentTimestamp.setText(formattedTime);
}
if (null != comment) {
viewHolder.comment.setText(comment);
}
final ViewData.PhotoViewData photoViewData = viewData.getCommentedPhoto();
if (0 == photoViewData.getCount()) {
viewHolder.commentedImage.setImageBitmap(null);
viewHolder.commentedImage.setVisibility(View.GONE);
} else {
Assert.assertTrue("Comments with photo cannot be combined!", !viewData.isCombined());
viewHolder.commentedImage.fetchBitmap(mCommentImageDesiredSize,
mCommentImageDesiredSize,
BitmapFetcher.DIMENSIONS_AT_LEAST,
photoViewData.getItem(0),
mConvPageFragment.getAppState());
viewHolder.commentedImage.setVisibility(View.VISIBLE);
viewHolder.commentedImage.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
mConvPageFragment.onClickPhoto(mConvViewData.getId(), photoViewData.getItem(0).getId());
}
});
}
if (viewData.isGroupStart()) {
viewHolder.threading.setBackgroundResource(R.drawable.convo_thread_start);
} else if (viewData.isGroupEnd()) {
viewHolder.threading.setBackgroundResource(R.drawable.convo_thread_end);
} else if (viewData.isGroupContinuation()) {
viewHolder.threading.setBackgroundResource(R.drawable.convo_thread_point);
} else if (viewData.isCombined()) {
viewHolder.threading.setBackgroundResource(R.drawable.convo_thread_stroke);
} else {
viewHolder.threading.setBackground(null);
}
return view;
}
public View getSharePhotosView(ViewData.ConvViewData.ConvSharePhotosViewData viewData,
View convertView,
ViewGroup parent) {
View view;
class ViewHolder extends ConvViewHolder {
public ViewHolder(View view) { super(view); }
public TextView photosSharerText;
public TextView photosTimestampText;
public TextView photosLocationText;
public FullGridView photosView;
}
ViewHolder viewHolder;
final ViewData.PhotoViewData photos = viewData.getPhotos();
if (null == convertView) {
view = mInflater.inflate(R.layout.conv_item_share_photos, parent, false);
viewHolder = new ViewHolder(view);
viewHolder.photosSharerText = (TextView)view.findViewById(R.id.convItem_sharePhotosSharer);
viewHolder.photosTimestampText = (TextView)view.findViewById(R.id.convItem_sharePhotosTimestamp);
viewHolder.photosLocationText = (TextView)view.findViewById(R.id.convItem_sharePhotosLocation);
viewHolder.photosView = (FullGridView)view.findViewById(R.id.convItem_sharePhotosGrid);
viewHolder.photosView.setColumnWidth(mSharedImageDesiredSize);
// Hook into GridView in order to cancel fetches on recycled ImageView's.
viewHolder.photosView.setRecyclerListener(new AbsListView.RecyclerListener() {
@Override
public void onMovedToScrapHeap(View view) {
((PhotoImageView) view).cancelFetchRequest();
}
});
setContainerMargins(view);
} else {
view = convertView;
viewHolder = (ViewHolder)view.getTag();
}
viewHolder.photosLocationText.setText(viewData.getLocation());
viewHolder.photosSharerText.setText(viewData.getSharer());
viewHolder.photosTimestampText.setText(Time.formatTime(viewData.getTimestamp()));
// TODO(mike): Just a placeholder until a more sophisticated photo layout is implemented.
viewHolder.photosView.setAdapter(new BaseAdapter() {
@Override
public int getCount() {
return photos.getCount();
}
@Override
public Object getItem(int position) {
return photos.getItem(position);
}
@Override
public long getItemId(int position) {
return photos.getItem(position).getId();
}
@Override
public View getView(final int position, View convertView, ViewGroup parent) {
PhotoImageView photoImageView;
if (null == convertView) {
photoImageView = new PhotoImageView(mConvPageFragment.getActivity());
photoImageView.setScaleType(PhotoImageView.ScaleType.CENTER_CROP);
photoImageView.setBackgroundColor(mConvPageFragment.getResources().getColor(android.R.color.darker_gray));
AbsListView.LayoutParams lp = new AbsListView.LayoutParams(mSharedImageDesiredSize, mSharedImageDesiredSize);
photoImageView.setLayoutParams(lp);
} else {
photoImageView = (PhotoImageView) convertView;
// This has been recycled, check that there are no pending fetches for it.
photoImageView.assertNoPendingFetch();
}
final ViewData.PhotoViewData.PhotoItemViewData photoItem = photos.getItem(position);
photoImageView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
mConvPageFragment.onClickPhoto(mConvViewData.getId(), photoItem.getId());
}
});
photoImageView.fetchBitmap(SHARED_IMAGE_SIZE,
SHARED_IMAGE_SIZE,
BitmapFetcher.DIMENSIONS_AT_LEAST,
photoItem,
mConvPageFragment.getAppState());
return photoImageView;
}
});
return view;
}
// Handles any ConvItemViewData which only needs formatted text to be materialized.
// TODO(Mike): properly format these with different color text using TextView.BufferType.SPANNABLE.
public View getFormattedTextView(ViewData.ConvViewData.ConvItemViewData viewData,
View convertView,
ViewGroup parent) {
View view;
class ViewHolder extends ConvViewHolder {
public ViewHolder(View view) { super(view); }
public TextView formattedText;
}
ViewHolder viewHolder;
if (null == convertView) {
view = mInflater.inflate(R.layout.conv_item_formatted_text, parent, false);
viewHolder = new ViewHolder(view);
viewHolder.formattedText = (TextView)view.findViewById(R.id.convItem_formattedText);
setContainerMargins(view);
} else {
view = convertView;
viewHolder = (ViewHolder)view.getTag();
}
if (viewData instanceof ViewData.ConvViewData.ConvStartedViewData) {
formatStartedTextView((ViewData.ConvViewData.ConvStartedViewData) viewData,
viewHolder.formattedText);
} else if (viewData instanceof ViewData.ConvViewData.ConvAddFollowersViewData) {
formatAddFollowersTextView((ViewData.ConvViewData.ConvAddFollowersViewData) viewData,
viewHolder.formattedText);
} else {
Assert.fail();
}
return view;
}
private void formatStartedTextView(ViewData.ConvViewData.ConvStartedViewData viewData,
TextView textView) {
textView.setText((new SpannableBuilder(mConvPageFragment.getActivity()))
.turnBoldOn()
.append(viewData.getStartingFollower())
.turnBoldOff()
.setTextColor(R.color.conv_textLight)
.append(" started the conversation ")
.turnItalicOn()
.append(Time.formatRelativeTime(viewData.getTimestamp(), Time.TimeFormat.TIME_FORMAT_MEDIUM))
.getSpannable());
}
private void formatAddFollowersTextView(ViewData.ConvViewData.ConvAddFollowersViewData viewData,
TextView textView) {
String[] followers = viewData.getAddedFollowers();
SpannableBuilder sb = new SpannableBuilder(mConvPageFragment.getActivity());
sb.turnBoldOn()
.append(viewData.getAddingFollower())
.turnBoldOff()
.setTextColor(R.color.conv_textLight)
.append(" added ")
.setDefaultTextColor()
.turnBoldOn();
if (followers.length > 1) {
sb.append(Utils.enumeratedStringFromStrings(followers, true /* skipLast */))
.setTextColor(R.color.conv_textLight)
.turnBoldOff()
.append(" and ")
.turnBoldOn()
.setDefaultTextColor();
}
sb.append(followers[followers.length - 1])
.turnBoldOff()
.append(" ")
.turnItalicOn()
.setTextColor(R.color.conv_textLight)
.append(Time.formatRelativeTime(viewData.getTimestamp(), Time.TimeFormat.TIME_FORMAT_MEDIUM))
.getSpannable();
textView.setText(sb.getSpannable());
}
private void setContainerMargins(View view) {
View itemContainer = view.findViewById(R.id.convItem_container);
FrameLayout.LayoutParams lp = (FrameLayout.LayoutParams)itemContainer.getLayoutParams();
lp.setMargins(Math.round(mConvPageFragment.getResources().getDimension(R.dimen.convItem_leftRightMargin)),
0,
Math.round(mConvPageFragment.getResources().getDimension(R.dimen.convItem_leftRightMargin)),
0);
itemContainer.setLayoutParams(lp);
}
private void setItemBackground(int position, View view, ViewData.ConvViewData.ConvItemViewData viewData) {
// It looks like it's possible to do all this using selectors and custom states in drawable xml, but the following
// seems more straight forward.
int backgroundResource = -1;
if (0 == position) {
backgroundResource = R.drawable.conv_item_first;
} else if (mConvViewData.getCount() - 1 == position) {
// The view data determines which of two alternating backgrounds should be displayed for this item.
backgroundResource = viewData.useAlternateBackground() ?
R.drawable.conv_item_last_alternate :
R.drawable.conv_item_last;
} else {
// The view data determines which of two alternating backgrounds should be displayed for this item.
backgroundResource = viewData.useAlternateBackground() ?
R.drawable.conv_item_middle_alternate :
R.drawable.conv_item_middle;
}
((ConvViewHolder)view.getTag()).setContainerBackground(backgroundResource);
}
}
| 7,893 |
839 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package cxf.client;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.xml.ws.wsaddressing.W3CEndpointReference;
import cxf.common.Account;
import cxf.common.AccountCORBAService;
import cxf.common.Bank;
import cxf.common.BankCORBAService;
public final class Client {
private static final Logger LOG =
Logger.getLogger(Client.class.getPackage().getName());
private Client() {
}
public static void main(String[] args) throws Exception {
LOG.log(Level.INFO, "Resolving the bank object");
BankCORBAService service = new BankCORBAService();
Bank port = service.getBankCORBAPort();
// Test the method Bank.createAccount()
System.out.println("Creating account called \"Account1\"");
W3CEndpointReference epr1 = port.createAccount("Account1");
Account account1 = getAccountFromEPR(epr1);
System.out.println("Depositing 100.00 into account \'Account1\"");
account1.deposit(100.00f);
System.out.println("Current balance of account \"Account1\" is " + account1.getBalance());
System.out.println();
/* Re-enable when we have a utility to manipulate the meta data stored
within the EPR.
// Test the method Bank.createEprAccount()
System.out.println("Creating account called \"Account2\"");
W3CEndpointReference epr2 = port.createEprAccount("Account2");
Account account2 = getAccountFromEPR(epr2);
System.out.println("Depositing 5.00 into account \'Account2\"");
account2.deposit(5.00f);
System.out.println("Current balance of account \"Account2\" is " + account2.getBalance());
System.out.println();
*/
// create two more accounts to use with the getAccount calls
Account acc3 = getAccountFromEPR(port.createAccount("Account3"));
acc3.deposit(200.00f);
Account acc4 = getAccountFromEPR(port.createAccount("Account4"));
acc4.deposit(400.00f);
// Test the method Bank.getAccount()
System.out.println("Retrieving account called \"Account3\"");
W3CEndpointReference epr3 = port.getAccount("Account3");
Account account3 = getAccountFromEPR(epr3);
System.out.println("Current balance for \"Account3\" is " + account3.getBalance());
System.out.println("Depositing 10.00 into account \"Account3\"");
account3.deposit(10.00f);
System.out.println("New balance for account \"Account3\" is " + account3.getBalance());
System.out.println();
/* Re-enable when we have a utility to manipulate the meta data stored
within the EPR.
// Test the method Bank.getEprAccount()
System.out.println("Retrieving account called \"Account4\"");
EndpointReferenceType epr4 = port.getEprAccount("Account4");
Account account4 = getAccountFromEPR(epr4);
System.out.println("Current balance for account \"Account4\" is " + account4.getBalance());
System.out.println("Withdrawing 150.00 into account \"Account4\"");
account4.deposit(-150.00f);
System.out.println("New balance for account \"Account4\" is " + account4.getBalance());
System.out.println();
*/
port.removeAccount("Account1");
port.removeAccount("Account3");
port.removeAccount("Account4");
System.exit(0);
}
private static Account getAccountFromEPR(W3CEndpointReference epr) {
AccountCORBAService service = new AccountCORBAService();
return service.getPort(epr, Account.class);
}
}
| 1,562 |
595 | <gh_stars>100-1000
/*
* Copyright (C) <NAME>
* Copyright (C) NGINX, Inc.
*/
#ifndef _NJS_LVLHSH_H_INCLUDED_
#define _NJS_LVLHSH_H_INCLUDED_
typedef struct njs_lvlhsh_query_s njs_lvlhsh_query_t;
typedef njs_int_t (*njs_lvlhsh_test_t)(njs_lvlhsh_query_t *lhq, void *data);
typedef void *(*njs_lvlhsh_alloc_t)(void *ctx, size_t size);
typedef void (*njs_lvlhsh_free_t)(void *ctx, void *p, size_t size);
#if (NJS_64BIT)
#define NJS_LVLHSH_DEFAULT_BUCKET_SIZE 128
#define NJS_LVLHSH_ENTRY_SIZE 3
/* 3 is shift of 64-bit pointer. */
#define NJS_LVLHSH_MEMALIGN_SHIFT (NJS_MAX_MEMALIGN_SHIFT - 3)
#else
#define NJS_LVLHSH_DEFAULT_BUCKET_SIZE 64
#define NJS_LVLHSH_ENTRY_SIZE 2
/* 2 is shift of 32-bit pointer. */
#define NJS_LVLHSH_MEMALIGN_SHIFT (NJS_MAX_MEMALIGN_SHIFT - 2)
#endif
#if (NJS_LVLHSH_MEMALIGN_SHIFT < 10)
#define NJS_LVLHSH_MAX_MEMALIGN_SHIFT NJS_LVLHSH_MEMALIGN_SHIFT
#else
#define NJS_LVLHSH_MAX_MEMALIGN_SHIFT 10
#endif
#define NJS_LVLHSH_BUCKET_END(bucket_size) \
(((bucket_size) - sizeof(void *)) \
/ (NJS_LVLHSH_ENTRY_SIZE * sizeof(uint32_t)) \
* NJS_LVLHSH_ENTRY_SIZE)
#define NJS_LVLHSH_BUCKET_SIZE(bucket_size) \
NJS_LVLHSH_BUCKET_END(bucket_size), bucket_size, (bucket_size - 1)
#define NJS_LVLHSH_DEFAULT \
NJS_LVLHSH_BUCKET_SIZE(NJS_LVLHSH_DEFAULT_BUCKET_SIZE), \
{ 4, 4, 4, 4, 4, 4, 4, 0 }
#define NJS_LVLHSH_LARGE_SLAB \
NJS_LVLHSH_BUCKET_SIZE(NJS_LVLHSH_DEFAULT_BUCKET_SIZE), \
{ 10, 4, 4, 4, 4, 4, 4, 0 }
#define NJS_LVLHSH_LARGE_MEMALIGN \
NJS_LVLHSH_BUCKET_SIZE(NJS_LVLHSH_DEFAULT_BUCKET_SIZE), \
{ NJS_LVLHSH_MAX_MEMALIGN_SHIFT, 4, 4, 4, 4, 0, 0, 0 }
typedef struct {
uint32_t bucket_end;
uint32_t bucket_size;
uint32_t bucket_mask;
uint8_t shift[8];
njs_lvlhsh_test_t test;
njs_lvlhsh_alloc_t alloc;
njs_lvlhsh_free_t free;
} njs_lvlhsh_proto_t;
typedef struct {
void *slot;
} njs_lvlhsh_t;
struct njs_lvlhsh_query_s {
uint32_t key_hash;
njs_str_t key;
uint8_t replace; /* 1 bit */
void *value;
const njs_lvlhsh_proto_t *proto;
void *pool;
/* Opaque data passed for the test function. */
void *data;
};
#define njs_lvlhsh_is_empty(lh) \
((lh)->slot == NULL)
#define njs_lvlhsh_init(lh) \
(lh)->slot = NULL
#define njs_lvlhsh_eq(lhl, lhr) \
((lhl)->slot == (lhr)->slot)
/*
* njs_lvlhsh_find() finds a hash element. If the element has been
* found then it is stored in the lhq->value and njs_lvlhsh_find()
* returns NJS_OK. Otherwise NJS_DECLINED is returned.
*
* The required njs_lvlhsh_query_t fields: key_hash, key, proto.
*/
NJS_EXPORT njs_int_t njs_lvlhsh_find(const njs_lvlhsh_t *lh,
njs_lvlhsh_query_t *lhq);
/*
* njs_lvlhsh_insert() adds a hash element. If the element already
* presents in lvlhsh and the lhq->replace flag is zero, then lhq->value
* is updated with the old element and NJS_DECLINED is returned.
* If the element already presents in lvlhsh and the lhq->replace flag
* is non-zero, then the old element is replaced with the new element.
* lhq->value is updated with the old element, and NJS_OK is returned.
* If the element is not present in lvlhsh, then it is inserted and
* NJS_OK is returned. The lhq->value is not changed.
* On memory allocation failure NJS_ERROR is returned.
*
* The required njs_lvlhsh_query_t fields: key_hash, key, proto, replace, value.
* The optional njs_lvlhsh_query_t fields: pool.
*/
NJS_EXPORT njs_int_t njs_lvlhsh_insert(njs_lvlhsh_t *lh,
njs_lvlhsh_query_t *lhq);
/*
* njs_lvlhsh_delete() deletes a hash element. If the element has been
* found then it is removed from lvlhsh and is stored in the lhq->value,
* and NJS_OK is returned. Otherwise NJS_DECLINED is returned.
*
* The required njs_lvlhsh_query_t fields: key_hash, key, proto.
* The optional njs_lvlhsh_query_t fields: pool.
*/
NJS_EXPORT njs_int_t njs_lvlhsh_delete(njs_lvlhsh_t *lh,
njs_lvlhsh_query_t *lhq);
typedef struct {
const njs_lvlhsh_proto_t *proto;
/*
* Fields to store current bucket entry position. They cannot be
* combined in a single bucket pointer with number of entries in low
* bits, because entry positions are not aligned. A current level is
* stored as key bit path from the root.
*/
uint32_t *bucket;
uint32_t current;
uint32_t entry;
uint32_t entries;
uint32_t key_hash;
} njs_lvlhsh_each_t;
#define njs_lvlhsh_each_init(lhe, _proto) \
do { \
njs_memzero(lhe, sizeof(njs_lvlhsh_each_t)); \
(lhe)->proto = _proto; \
} while (0)
NJS_EXPORT void *njs_lvlhsh_each(const njs_lvlhsh_t *lh,
njs_lvlhsh_each_t *lhe);
#endif /* _NJS_LVLHSH_H_INCLUDED_ */
| 3,158 |
3,489 | <filename>src/crf_trainer_averaged_perceptron.c
#include "crf_trainer_averaged_perceptron.h"
void crf_averaged_perceptron_trainer_destroy(crf_averaged_perceptron_trainer_t *self) {
if (self == NULL) return;
uint32_t feature_id;
khash_t(class_weights) *weights;
if (self->weights != NULL) {
kh_foreach(self->weights, feature_id, weights, {
if (weights != NULL) {
kh_destroy(class_weights, weights);
}
})
kh_destroy(feature_class_weights, self->weights);
}
khash_t(prev_tag_class_weights) *prev_tag_weights;
if (self->prev_tag_weights != NULL) {
kh_foreach(self->prev_tag_weights, feature_id, prev_tag_weights, {
if (prev_tag_weights != NULL) {
kh_destroy(prev_tag_class_weights, prev_tag_weights);
}
})
kh_destroy(feature_prev_tag_class_weights, self->prev_tag_weights);
}
if (self->trans_weights != NULL) {
kh_destroy(prev_tag_class_weights, self->trans_weights);
}
if (self->update_counts != NULL) {
uint64_array_destroy(self->update_counts);
}
if (self->prev_tag_update_counts != NULL) {
uint64_array_destroy(self->prev_tag_update_counts);
}
if (self->sequence_features != NULL) {
cstring_array_destroy(self->sequence_features);
}
if (self->sequence_features_indptr != NULL) {
uint32_array_destroy(self->sequence_features_indptr);
}
if (self->sequence_prev_tag_features != NULL) {
cstring_array_destroy(self->sequence_prev_tag_features);
}
if (self->sequence_prev_tag_features_indptr != NULL) {
uint32_array_destroy(self->sequence_prev_tag_features_indptr);
}
if (self->label_ids != NULL) {
uint32_array_destroy(self->label_ids);
}
if (self->viterbi != NULL) {
uint32_array_destroy(self->viterbi);
}
if (self->base_trainer != NULL) {
crf_trainer_destroy(self->base_trainer);
}
free(self);
}
crf_averaged_perceptron_trainer_t *crf_averaged_perceptron_trainer_new(size_t num_classes, size_t min_updates) {
crf_averaged_perceptron_trainer_t *self = calloc(1, sizeof(crf_averaged_perceptron_trainer_t));
if (self == NULL) return NULL;
log_info("num_classes %zu\n", num_classes);
self->num_updates = 0;
self->num_errors = 0;
self->iterations = 0;
self->min_updates = min_updates;
self->base_trainer = crf_trainer_new(num_classes);
if (self->base_trainer == NULL) {
goto exit_trainer_created;
}
self->weights = kh_init(feature_class_weights);
if (self->weights == NULL) {
goto exit_trainer_created;
}
self->prev_tag_weights = kh_init(feature_prev_tag_class_weights);
if (self->prev_tag_weights == NULL) {
goto exit_trainer_created;
}
self->trans_weights = kh_init(prev_tag_class_weights);
if (self->trans_weights == NULL) {
goto exit_trainer_created;
}
self->update_counts = uint64_array_new();
if (self->update_counts == NULL) {
goto exit_trainer_created;
}
self->prev_tag_update_counts = uint64_array_new();
if (self->prev_tag_update_counts == NULL) {
goto exit_trainer_created;
}
self->sequence_features = cstring_array_new();
if (self->sequence_features == NULL) {
goto exit_trainer_created;
}
self->sequence_features_indptr = uint32_array_new();
if (self->sequence_features_indptr == NULL) {
goto exit_trainer_created;
}
self->sequence_prev_tag_features = cstring_array_new();
if (self->sequence_prev_tag_features == NULL) {
goto exit_trainer_created;
}
self->sequence_prev_tag_features_indptr = uint32_array_new();
if (self->sequence_prev_tag_features_indptr == NULL) {
goto exit_trainer_created;
}
self->label_ids = uint32_array_new();
if (self->label_ids == NULL) {
goto exit_trainer_created;
}
self->viterbi = uint32_array_new();
if (self->viterbi == NULL) {
goto exit_trainer_created;
}
return self;
exit_trainer_created:
crf_averaged_perceptron_trainer_destroy(self);
return NULL;
}
static inline uint32_t tag_bigram_class_id(crf_averaged_perceptron_trainer_t *self, tag_bigram_t tag_bigram) {
return tag_bigram.prev_class_id * self->base_trainer->num_classes + tag_bigram.class_id;
}
khash_t(class_weights) *crf_averaged_perceptron_trainer_get_class_weights(crf_averaged_perceptron_trainer_t *self, uint32_t feature_id, bool add_if_missing) {
khiter_t k;
k = kh_get(feature_class_weights, self->weights, feature_id);
if (k != kh_end(self->weights)) {
return kh_value(self->weights, k);
} else if (add_if_missing) {
khash_t(class_weights) *weights = kh_init(class_weights);
int ret;
k = kh_put(feature_class_weights, self->weights, feature_id, &ret);
if (ret < 0) {
kh_destroy(class_weights, weights);
return NULL;
}
kh_value(self->weights, k) = weights;
return weights;
}
return NULL;
}
khash_t(prev_tag_class_weights) *crf_averaged_perceptron_trainer_get_prev_tag_class_weights(crf_averaged_perceptron_trainer_t *self, uint32_t feature_id, bool add_if_missing) {
khiter_t k;
k = kh_get(feature_prev_tag_class_weights, self->prev_tag_weights, feature_id);
if (k != kh_end(self->prev_tag_weights)) {
return kh_value(self->prev_tag_weights, k);
} else if (add_if_missing) {
khash_t(prev_tag_class_weights) *weights = kh_init(prev_tag_class_weights);
int ret;
k = kh_put(feature_prev_tag_class_weights, self->prev_tag_weights, feature_id, &ret);
if (ret < 0) {
kh_destroy(prev_tag_class_weights, weights);
return NULL;
}
kh_value(self->prev_tag_weights, k) = weights;
return weights;
}
return NULL;
}
static inline bool crf_averaged_perceptron_trainer_update_weight(khash_t(class_weights) *weights, uint64_t iter, uint32_t class_id, double value) {
class_weight_t weight = NULL_WEIGHT;
khiter_t k;
k = kh_get(class_weights, weights, class_id);
if (k != kh_end(weights)) {
weight = kh_value(weights, k);
}
weight.total += (iter - weight.last_updated) * weight.value;
weight.last_updated = iter;
weight.value += value;
int ret;
k = kh_put(class_weights, weights, class_id, &ret);
if (ret < 0) return false;
kh_value(weights, k) = weight;
return true;
}
static inline bool crf_averaged_perceptron_trainer_update_prev_tag_weight(khash_t(prev_tag_class_weights) *weights, uint64_t iter, uint32_t prev_class_id, uint32_t class_id, double value) {
class_weight_t weight = NULL_WEIGHT;
tag_bigram_t tag_bigram;
tag_bigram.prev_class_id = prev_class_id;
tag_bigram.class_id = class_id;
uint64_t key = tag_bigram.value;
khiter_t k;
k = kh_get(prev_tag_class_weights, weights, key);
if (k != kh_end(weights)) {
weight = kh_value(weights, k);
}
weight.total += (iter - weight.last_updated) * weight.value;
weight.last_updated = iter;
weight.value += value;
int ret;
k = kh_put(prev_tag_class_weights, weights, key, &ret);
if (ret < 0) return false;
kh_value(weights, k) = weight;
return true;
}
static inline bool crf_averaged_perceptron_trainer_update_feature(crf_averaged_perceptron_trainer_t *self, uint32_t feature_id, uint32_t guess, uint32_t truth, double value) {
bool add_if_missing = true;
khash_t(class_weights) *weights = crf_averaged_perceptron_trainer_get_class_weights(self, feature_id, add_if_missing);
if (weights == NULL) {
return false;
}
uint64_t updates = self->num_updates;
if (!crf_averaged_perceptron_trainer_update_weight(weights, updates, guess, -1.0 * value) ||
!crf_averaged_perceptron_trainer_update_weight(weights, updates, truth, value)) {
return false;
}
return true;
}
static inline bool crf_averaged_perceptron_trainer_update_prev_tag_feature(crf_averaged_perceptron_trainer_t *self, uint32_t feature_id, uint32_t prev_guess, uint32_t prev_truth, uint32_t guess, uint32_t truth, double value) {
bool add_if_missing = true;
khash_t(prev_tag_class_weights) *weights = crf_averaged_perceptron_trainer_get_prev_tag_class_weights(self, feature_id, add_if_missing);
if (weights == NULL) {
return false;
}
uint64_t updates = self->num_updates;
if (!crf_averaged_perceptron_trainer_update_prev_tag_weight(weights, updates, prev_guess, guess, -1.0 * value) ||
!crf_averaged_perceptron_trainer_update_prev_tag_weight(weights, updates, prev_truth, truth, value)) {
return false;
}
return true;
}
static inline bool crf_averaged_perceptron_trainer_update_trans_feature(crf_averaged_perceptron_trainer_t *self, uint32_t prev_guess, uint32_t prev_truth, uint32_t guess, uint32_t truth, double value) {
bool add_if_missing = true;
khash_t(prev_tag_class_weights) *weights = self->trans_weights;
if (weights == NULL) {
return false;
}
uint64_t updates = self->num_updates;
if (!crf_averaged_perceptron_trainer_update_prev_tag_weight(weights, updates, prev_guess, guess, -1.0 * value) ||
!crf_averaged_perceptron_trainer_update_prev_tag_weight(weights, updates, prev_truth, truth, value)) {
return false;
}
return true;
}
static inline bool crf_averaged_perceptron_trainer_cache_features(crf_averaged_perceptron_trainer_t *self, cstring_array *features) {
size_t i;
char *feature;
uint32_t feature_id;
cstring_array_foreach(features, i, feature, {
cstring_array_add_string(self->sequence_features, feature);
})
size_t num_strings = cstring_array_num_strings(self->sequence_features);
uint32_array_push(self->sequence_features_indptr, num_strings);
return true;
}
static inline bool crf_averaged_perceptron_trainer_cache_prev_tag_features(crf_averaged_perceptron_trainer_t *self, cstring_array *features) {
size_t i;
char *feature;
uint32_t feature_id;
cstring_array_foreach(features, i, feature, {
cstring_array_add_string(self->sequence_prev_tag_features, feature);
})
size_t num_strings = cstring_array_num_strings(self->sequence_prev_tag_features);
uint32_array_push(self->sequence_prev_tag_features_indptr, num_strings);
return true;
}
static bool crf_averaged_perceptron_trainer_state_score(crf_averaged_perceptron_trainer_t *self) {
if (self == NULL || self->base_trainer == NULL ||
self->sequence_features == NULL || self->sequence_features_indptr == NULL) {
return false;
}
crf_context_t *context = self->base_trainer->context;
uint32_t class_id;
class_weight_t weight;
cstring_array *sequence_features = self->sequence_features;
uint64_t *update_counts = self->update_counts->a;
size_t num_tokens = self->sequence_features_indptr->n - 1;
uint32_t *indptr = self->sequence_features_indptr->a;
for (size_t t = 0; t < num_tokens; t++) {
uint32_t idx = indptr[t];
uint32_t next_start = indptr[t + 1];
double *scores = state_score(context, t);
for (uint32_t j = idx; j < next_start; j++) {
char *feature = cstring_array_get_string(sequence_features, j);
uint32_t feature_id;
if (!crf_trainer_get_feature_id(self->base_trainer, feature, &feature_id)) {
continue;
}
uint64_t update_count = update_counts[feature_id];
bool keep_feature = update_count >= self->min_updates;
if (keep_feature) {
bool add_if_missing = false;
khash_t(class_weights) *weights = crf_averaged_perceptron_trainer_get_class_weights(self, feature_id, add_if_missing);
if (weights == NULL) {
continue;
}
kh_foreach(weights, class_id, weight, {
scores[class_id] += weight.value;
})
}
}
}
return true;
}
static bool crf_averaged_perceptron_trainer_state_trans_score(crf_averaged_perceptron_trainer_t *self) {
if (self == NULL || self->base_trainer == NULL ||
self->sequence_prev_tag_features == NULL || self->sequence_features_indptr == NULL) {
return false;
}
crf_context_t* context = self->base_trainer->context;
uint32_t t = 0;
uint32_t idx = 0;
uint32_t length = 0;
bool add_if_missing = false;
class_weight_t weight;
cstring_array *sequence_features = self->sequence_prev_tag_features;
uint64_t *update_counts = self->prev_tag_update_counts->a;
size_t num_tokens = self->sequence_prev_tag_features_indptr->n - 1;
uint32_t *indptr = self->sequence_prev_tag_features_indptr->a;
for (size_t t = 0; t < num_tokens; t++) {
uint32_t idx = indptr[t];
uint32_t next_start = indptr[t + 1];
double *scores = state_trans_score_all(context, t);
for (uint32_t j = idx; j < next_start; j++) {
char *feature = cstring_array_get_string(sequence_features, j);
uint32_t feature_id;
if (!crf_trainer_get_prev_tag_feature_id(self->base_trainer, feature, &feature_id)) {
continue;
}
uint64_t update_count = update_counts[feature_id];
bool keep_feature = update_count >= self->min_updates;
if (keep_feature) {
bool add_if_missing = false;
khash_t(prev_tag_class_weights) *prev_tag_weights = crf_averaged_perceptron_trainer_get_prev_tag_class_weights(self, feature_id, add_if_missing);
if (prev_tag_weights == NULL) {
continue;
}
tag_bigram_t tag_bigram;
uint64_t tag_bigram_key;
kh_foreach(prev_tag_weights, tag_bigram_key, weight, {
tag_bigram.value = tag_bigram_key;
uint32_t class_id = tag_bigram_class_id(self, tag_bigram);
scores[class_id] += weight.value;
})
}
}
}
return true;
}
static bool crf_averaged_perceptron_trainer_trans_score(crf_averaged_perceptron_trainer_t *self) {
if (self == NULL || self->base_trainer == NULL || self->trans_weights == NULL) return false;
crf_context_t *context = self->base_trainer->context;
khash_t(prev_tag_class_weights) *trans_weights = self->trans_weights;
class_weight_t weight;
tag_bigram_t tag_bigram;
uint64_t tag_bigram_key;
double *scores = context->trans->values;
kh_foreach(trans_weights, tag_bigram_key, weight, {
tag_bigram.value = tag_bigram_key;
uint32_t class_id = tag_bigram_class_id(self, tag_bigram);
scores[class_id] += weight.value;
})
return true;
}
bool crf_averaged_perceptron_trainer_update(crf_averaged_perceptron_trainer_t *self, double value) {
if (self->viterbi == NULL || self->label_ids == NULL || self->label_ids->n != self->viterbi->n ||
self->sequence_features == NULL || self->sequence_features_indptr == NULL ||
self->label_ids->n != self->sequence_features_indptr->n - 1 ||
self->sequence_prev_tag_features == NULL || self->sequence_prev_tag_features_indptr == NULL ||
self->label_ids->n != self->sequence_prev_tag_features_indptr->n - 1 ||
self->update_counts == NULL || self->prev_tag_update_counts == NULL) {
log_error("Something was NULL\n");
return false;
}
uint32_t t, idx, length;
bool add_if_missing = false;
uint32_t *viterbi = self->viterbi->a;
uint32_t *labels = self->label_ids->a;
uint32_t truth, guess;
size_t num_tokens = self->sequence_features_indptr->n - 1;
uint32_t *indptr = self->sequence_features_indptr->a;
cstring_array *sequence_features = self->sequence_features;
for (size_t t = 0; t < num_tokens; t++) {
truth = labels[t];
guess = viterbi[t];
if (guess != truth) {
uint32_t idx = indptr[t];
uint32_t next_start = indptr[t + 1];
for (uint32_t j = idx; j < next_start; j++) {
char *feature = cstring_array_get_string(sequence_features, j);
if (feature == NULL) {
log_error("feature NULL, j = %u, len = %zu\n", j, cstring_array_num_strings(sequence_features));
return false;
}
uint32_t feature_id;
bool exists;
if (!crf_trainer_hash_feature_to_id_exists(self->base_trainer, feature, &feature_id, &exists)) {
return false;
}
if (!crf_averaged_perceptron_trainer_update_feature(self, feature_id, guess, truth, value)) {
return false;
}
if (exists) {
self->update_counts->a[feature_id]++;
} else {
uint64_array_push(self->update_counts, 1);
}
}
// This is shared between the state and state-trans features, only increment once
self->num_updates++;
self->num_errors++;
}
}
uint32_t prev_truth, prev_guess;
uint64_t *prev_tag_update_counts = self->prev_tag_update_counts->a;
sequence_features = self->sequence_prev_tag_features;
num_tokens = self->sequence_prev_tag_features_indptr->n - 1;
indptr = self->sequence_prev_tag_features_indptr->a;
for (size_t t = 0; t < num_tokens; t++) {
truth = labels[t];
guess = viterbi[t];
if (t > 0 && (guess != truth || prev_guess != prev_truth)) {
uint32_t idx = indptr[t];
uint32_t next_start = indptr[t + 1];
for (uint32_t j = idx; j < next_start; j++) {
char *feature = cstring_array_get_string(sequence_features, j);
if (feature == NULL) {
log_error("feature NULL, j = %u, len = %zu\n", j, cstring_array_num_strings(sequence_features));
return false;
}
uint32_t feature_id;
bool exists;
if (!crf_trainer_hash_prev_tag_feature_to_id_exists(self->base_trainer, feature, &feature_id, &exists)) {
return false;
}
if (!crf_averaged_perceptron_trainer_update_prev_tag_feature(self, feature_id, prev_guess, prev_truth, guess, truth, value)) {
return false;
}
if (exists) {
self->prev_tag_update_counts->a[feature_id]++;
} else {
uint64_array_push(self->prev_tag_update_counts, 1);
}
}
}
prev_truth = truth;
prev_guess = guess;
}
size_t sequence_len = self->label_ids->n;
for (t = 0; t < sequence_len; t++) {
truth = labels[t];
guess = viterbi[t];
if (t > 0 && (guess != truth || prev_guess != prev_truth)) {
if (!crf_averaged_perceptron_trainer_update_trans_feature(self, prev_guess, prev_truth, guess, truth, value)) {
return false;
}
}
prev_truth = truth;
prev_guess = guess;
}
return true;
}
bool crf_averaged_perceptron_trainer_train_example(crf_averaged_perceptron_trainer_t *self, void *tagger, void *tagger_context, cstring_array *features, cstring_array *prev_tag_features, tagger_feature_function feature_function, tokenized_string_t *tokenized, cstring_array *labels) {
if (self == NULL || self->base_trainer == NULL) return false;
size_t num_tokens = tokenized->tokens->n;
if (cstring_array_num_strings(labels) != num_tokens) {
return false;
}
if (num_tokens == 0) {
return true;
}
uint32_array_clear(self->sequence_features_indptr);
uint32_array_push(self->sequence_features_indptr, 0);
cstring_array_clear(self->sequence_features);
uint32_array_clear(self->sequence_prev_tag_features_indptr);
uint32_array_push(self->sequence_prev_tag_features_indptr, 0);
cstring_array_clear(self->sequence_prev_tag_features);
crf_context_t *crf_context = self->base_trainer->context;
if (!uint32_array_resize(self->label_ids, num_tokens)) {
log_error("Resizing label_ids failed\n");
return false;
}
uint32_array_clear(self->label_ids);
if (!crf_context_set_num_items(crf_context, num_tokens)) {
return false;
}
crf_context_reset(crf_context, CRF_CONTEXT_RESET_ALL);
bool add_if_missing = true;
for (uint32_t i = 0; i < num_tokens; i++) {
cstring_array_clear(features);
cstring_array_clear(prev_tag_features);
if (!feature_function(tagger, tagger_context, tokenized, i)) {
log_error("Could not add address parser features\n");
return false;
}
char *label = cstring_array_get_string(labels, i);
if (label == NULL) {
log_error("label is NULL\n");
}
uint32_t class_id;
if (!crf_trainer_get_class_id(self->base_trainer, label, &class_id, add_if_missing)) {
log_error("Get class id failed\n");
return false;
}
uint32_array_push(self->label_ids, class_id);
if (!crf_averaged_perceptron_trainer_cache_features(self, features) ||
!crf_averaged_perceptron_trainer_cache_prev_tag_features(self, prev_tag_features)) {
log_error("Caching features failed\n");
return false;
}
}
if (!crf_averaged_perceptron_trainer_state_score(self)) {
log_error("Error in state score\n");
return false;
}
if (!crf_averaged_perceptron_trainer_state_trans_score(self)) {
log_error("Error in state_trans score\n");
return false;
}
if (!crf_averaged_perceptron_trainer_trans_score(self)) {
log_error("Error in trans score\n");
return false;
}
if (!uint32_array_resize_fixed(self->viterbi, num_tokens)) {
log_error("Error resizing Viterbi, num_tokens=%zu\n", num_tokens);
return false;
}
uint32_t *viterbi = self->viterbi->a;
double viterbi_score = crf_context_viterbi(crf_context, viterbi);
if (self->viterbi->n != num_tokens || self->label_ids->n != num_tokens) {
log_error("self->viterbi->n=%zu, num_tokens=%zu, self->label_ids->n=%zu\n", self->viterbi->n, num_tokens, self->label_ids->n);
return false;
}
uint32_t *true_labels = self->label_ids->a;
for (uint32_t i = 0; i < num_tokens; i++) {
uint32_t truth = true_labels[i];
// Technically this is supposed to be updated all at once
uint32_t guess = viterbi[i];
if (guess != truth) {
if (!crf_averaged_perceptron_trainer_update(self, 1.0)) {
log_error("Error in crf_averaged_perceptron_trainer_update\n");
return false;
}
break;
}
}
return true;
}
crf_t *crf_averaged_perceptron_trainer_finalize(crf_averaged_perceptron_trainer_t *self) {
if (self == NULL || self->base_trainer == NULL || self->base_trainer->num_classes == 0) {
log_error("Something was NULL\n");
return NULL;
}
uint32_t class_id;
class_weight_t weight;
khiter_t k;
size_t num_features = kh_size(self->base_trainer->features);
sparse_matrix_t *averaged_weights = sparse_matrix_new();
if (averaged_weights == NULL) {
log_error("Error creating averaged_weights\n");
return NULL;
}
log_info("Finalizing trainer, num_features=%zu\n", num_features);
char **feature_keys = malloc(sizeof(char *) * num_features);
uint32_t feature_id;
const char *feature;
kh_foreach(self->base_trainer->features, feature, feature_id, {
if (feature_id >= num_features) {
free(feature_keys);
log_error("Error populating feature_keys, feature_id=%u, num_features=%zu\n", feature_id, num_features);
return NULL;
}
feature_keys[feature_id] = (char *)feature;
})
khash_t(str_uint32) *features = self->base_trainer->features;
khash_t(str_uint32) *prev_tag_features = self->base_trainer->prev_tag_features;
uint64_t updates = self->num_updates;
khash_t(class_weights) *weights;
uint32_t next_feature_id = 0;
uint64_t *update_counts = self->update_counts->a;
log_info("Pruning weights with < min_updates = %" PRIu64 "\n", self->min_updates);
for (feature_id = 0; feature_id < num_features; feature_id++) {
k = kh_get(feature_class_weights, self->weights, feature_id);
if (k == kh_end(self->weights)) {
sparse_matrix_destroy(averaged_weights);
free(feature_keys);
log_error("Error in kh_get on self->weights, feature_id=%u, num_features=%zu\n", feature_id, num_features);
return NULL;
}
weights = kh_value(self->weights, k);
uint32_t class_id;
uint64_t update_count = update_counts[feature_id];
bool keep_feature = update_count >= self->min_updates;
uint32_t new_feature_id = next_feature_id;
if (keep_feature) {
kh_foreach(weights, class_id, weight, {
weight.total += (updates - weight.last_updated) * weight.value;
double value = weight.total / updates;
sparse_matrix_append(averaged_weights, class_id, value);
})
sparse_matrix_finalize_row(averaged_weights);
next_feature_id++;
}
if (!keep_feature || new_feature_id != feature_id) {
feature = feature_keys[feature_id];
k = kh_get(str_uint32, features, feature);
if (k != kh_end(features)) {
if (keep_feature) {
kh_value(features, k) = new_feature_id;
} else {
kh_del(str_uint32, features, k);
}
} else {
log_error("Error in kh_get on features\n");
crf_averaged_perceptron_trainer_destroy(self);
free(feature_keys);
return NULL;
}
}
}
free(feature_keys);
num_features = kh_size(features);
log_info("After pruning, num_features=%zu\n", num_features);
sparse_matrix_t *averaged_state_trans_weights = sparse_matrix_new();
if (averaged_state_trans_weights == NULL) {
log_error("Error creating averaged_state_trans_weights\n");
return NULL;
}
size_t num_prev_tag_features = kh_size(prev_tag_features);
char **prev_tag_feature_keys = malloc(sizeof(char *) * num_prev_tag_features);
kh_foreach(prev_tag_features, feature, feature_id, {
if (feature_id >= num_prev_tag_features) {
free(prev_tag_feature_keys);
log_error("Error populating prev_tag_feature_keys\n");
return NULL;
}
prev_tag_feature_keys[feature_id] = (char *)feature;
})
khash_t(prev_tag_class_weights) *prev_tag_weights;
log_info("Pruning previous tag features, num_prev_tag_features=%zu\n", num_prev_tag_features);
uint32_t next_prev_tag_feature_id = 0;
uint64_t *prev_tag_update_counts = self->prev_tag_update_counts->a;
tag_bigram_t tag_bigram;
uint64_t tag_bigram_key;
for (feature_id = 0; feature_id < num_prev_tag_features; feature_id++) {
k = kh_get(feature_prev_tag_class_weights, self->prev_tag_weights, feature_id);
if (k == kh_end(self->prev_tag_weights)) {
sparse_matrix_destroy(averaged_state_trans_weights);
free(prev_tag_feature_keys);
log_error("Error in kh_get self->prev_tag_weights\n");
return NULL;
}
prev_tag_weights = kh_value(self->prev_tag_weights, k);
uint64_t update_count = prev_tag_update_counts[feature_id];
bool keep_feature = update_count >= self->min_updates;
uint32_t new_feature_id = next_prev_tag_feature_id;
if (keep_feature) {
kh_foreach(prev_tag_weights, tag_bigram_key, weight, {
tag_bigram.value = tag_bigram_key;
weight.total += (updates - weight.last_updated) * weight.value;
double value = weight.total / updates;
class_id = tag_bigram_class_id(self, tag_bigram);
sparse_matrix_append(averaged_state_trans_weights, class_id, value);
})
sparse_matrix_finalize_row(averaged_state_trans_weights);
next_prev_tag_feature_id++;
}
if (!keep_feature || new_feature_id != feature_id) {
feature = prev_tag_feature_keys[feature_id];
k = kh_get(str_uint32, prev_tag_features, feature);
if (k != kh_end(prev_tag_features)) {
if (keep_feature) {
kh_value(prev_tag_features, k) = new_feature_id;
} else {
kh_del(str_uint32, prev_tag_features, k);
}
} else {
log_error("Error in kh_get on prev_tag_features\n");
crf_averaged_perceptron_trainer_destroy(self);
free(prev_tag_feature_keys);
return NULL;
}
}
}
free(prev_tag_feature_keys);
num_prev_tag_features = kh_size(prev_tag_features);
log_info("After pruning, num_prev_tag_features=%zu\n", num_prev_tag_features);
size_t num_classes = self->base_trainer->num_classes;
double_matrix_t *averaged_trans_weights = double_matrix_new_zeros(num_classes, num_classes);
if (averaged_trans_weights == NULL) {
log_error("Error creating double matrix for transition weights\n");
return NULL;
}
double *trans = averaged_trans_weights->values;
kh_foreach(self->trans_weights, tag_bigram_key, weight, {
tag_bigram.value = tag_bigram_key;
weight.total += (updates - weight.last_updated) * weight.value;
double value = weight.total / updates;
class_id = tag_bigram_class_id(self, tag_bigram);
trans[class_id] = value;
})
crf_t *crf = malloc(sizeof(crf_t));
crf->num_classes = num_classes;
crf->weights = averaged_weights;
crf->state_trans_weights = averaged_state_trans_weights;
crf->trans_weights = averaged_trans_weights;
crf->classes = self->base_trainer->class_strings;
self->base_trainer->class_strings = NULL;
trie_t *state_features = trie_new_from_hash(features);
if (state_features == NULL) {
crf_averaged_perceptron_trainer_destroy(self);
log_error("Error creating state_features\n");
return NULL;
}
crf->state_features = state_features;
trie_t *state_trans_features = trie_new_from_hash(prev_tag_features);
if (state_trans_features == NULL) {
crf_averaged_perceptron_trainer_destroy(self);
log_error("Error creating state_trans_features\n");
return NULL;
}
crf->state_trans_features = state_trans_features;
crf->viterbi = uint32_array_new();
crf->context = crf_context_new(CRF_CONTEXT_VITERBI | CRF_CONTEXT_MARGINALS, num_classes, CRF_CONTEXT_DEFAULT_NUM_ITEMS);
crf_averaged_perceptron_trainer_destroy(self);
return crf;
}
| 14,320 |
1,093 | <reponame>StandCN/spring-integration
/*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.integration.util;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.expression.BeanFactoryResolver;
import org.springframework.core.convert.ConversionService;
import org.springframework.core.log.LogAccessor;
import org.springframework.expression.EvaluationException;
import org.springframework.expression.Expression;
import org.springframework.expression.ExpressionParser;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.expression.spel.support.StandardEvaluationContext;
import org.springframework.integration.expression.ExpressionUtils;
import org.springframework.integration.support.DefaultMessageBuilderFactory;
import org.springframework.integration.support.MessageBuilderFactory;
import org.springframework.integration.support.utils.IntegrationUtils;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
/**
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
*
* @since 2.0
*/
public abstract class AbstractExpressionEvaluator implements BeanFactoryAware, InitializingBean {
protected final LogAccessor logger = new LogAccessor(this.getClass()); // NOSONAR final
protected static final ExpressionParser EXPRESSION_PARSER = new SpelExpressionParser();
private final BeanFactoryTypeConverter typeConverter = new BeanFactoryTypeConverter();
private volatile StandardEvaluationContext evaluationContext;
private volatile BeanFactory beanFactory;
private volatile MessageBuilderFactory messageBuilderFactory = new DefaultMessageBuilderFactory();
/**
* Specify a BeanFactory in order to enable resolution via <code>@beanName</code> in the expression.
*/
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
this.typeConverter.setBeanFactory(beanFactory);
if (this.evaluationContext != null && this.evaluationContext.getBeanResolver() == null) {
this.evaluationContext.setBeanResolver(new BeanFactoryResolver(beanFactory));
}
}
protected BeanFactory getBeanFactory() {
return this.beanFactory;
}
public void setConversionService(ConversionService conversionService) {
if (conversionService != null) {
this.typeConverter.setConversionService(conversionService);
}
}
protected MessageBuilderFactory getMessageBuilderFactory() {
return this.messageBuilderFactory;
}
@Override
public final void afterPropertiesSet() {
getEvaluationContext();
if (this.beanFactory != null) {
this.messageBuilderFactory = IntegrationUtils.getMessageBuilderFactory(this.beanFactory);
}
onInit();
}
protected StandardEvaluationContext getEvaluationContext() {
return getEvaluationContext(true);
}
/**
* Emits a WARN log if the beanFactory field is null, unless the argument is false.
* @param beanFactoryRequired set to false to suppress the warning.
* @return The evaluation context.
*/
protected final StandardEvaluationContext getEvaluationContext(boolean beanFactoryRequired) {
if (this.evaluationContext == null) {
if (this.beanFactory == null && !beanFactoryRequired) {
this.evaluationContext = ExpressionUtils.createStandardEvaluationContext();
}
else {
this.evaluationContext = ExpressionUtils.createStandardEvaluationContext(this.beanFactory);
}
this.evaluationContext.setTypeConverter(this.typeConverter);
if (this.beanFactory != null) {
ConversionService conversionService = IntegrationUtils.getConversionService(this.beanFactory);
if (conversionService != null) {
this.typeConverter.setConversionService(conversionService);
}
}
}
return this.evaluationContext;
}
@Nullable
protected <T> T evaluateExpression(Expression expression, Message<?> message, @Nullable Class<T> expectedType) {
try {
return evaluateExpression(expression, (Object) message, expectedType);
}
catch (Exception ex) {
this.logger.debug(ex, "SpEL Expression evaluation failed with Exception.");
Throwable cause = null;
if (ex instanceof EvaluationException) { // NOSONAR
cause = ex.getCause();
}
throw IntegrationUtils.wrapInHandlingExceptionIfNecessary(message,
() -> "Expression evaluation failed: " + expression.getExpressionString(),
cause == null ? ex : cause);
}
}
@Nullable
protected Object evaluateExpression(String expression, Object input) {
return evaluateExpression(expression, input, null);
}
@Nullable
protected <T> T evaluateExpression(String expression, Object input, @Nullable Class<T> expectedType) {
return EXPRESSION_PARSER.parseExpression(expression)
.getValue(getEvaluationContext(), input, expectedType);
}
@Nullable
protected Object evaluateExpression(Expression expression, Object input) {
return evaluateExpression(expression, input, null);
}
@Nullable
protected <T> T evaluateExpression(Expression expression, @Nullable Class<T> expectedType) {
return expression.getValue(getEvaluationContext(), expectedType);
}
@Nullable
protected Object evaluateExpression(Expression expression) {
return expression.getValue(getEvaluationContext());
}
@Nullable
protected <T> T evaluateExpression(Expression expression, Object input, @Nullable Class<T> expectedType) {
return expression.getValue(getEvaluationContext(), input, expectedType);
}
protected void onInit() {
}
}
| 1,798 |
5,169 | <filename>Specs/8/5/2/JJBadge/4.1/JJBadge.podspec.json
{
"name": "JJBadge",
"version": "4.1",
"summary": "Simple JJBadge.",
"description": "'JJBadge is a awesome view for make counters easier'",
"homepage": "https://github.com/only-icesoul/ios-jjbadge",
"license": {
"type": "Apache License 2.0",
"file": "LICENSE"
},
"authors": {
"only-icesoul": "<EMAIL>"
},
"source": {
"git": "https://github.com/only-icesoul/ios-jjbadge.git",
"tag": "4.1"
},
"platforms": {
"ios": "12.0"
},
"source_files": "src/**/*.swift",
"swift_versions": "5.0",
"swift_version": "5.0"
}
| 277 |
3,705 | <filename>chainer/types.py
import numbers
import typing as tp # NOQA
import typing_extensions as tpe # NOQA
try:
from typing import TYPE_CHECKING # NOQA
except ImportError:
# typing.TYPE_CHECKING doesn't exist before Python 3.5.2
TYPE_CHECKING = False
# import chainer modules only for type checkers to avoid circular import
if TYPE_CHECKING:
from types import ModuleType # NOQA
import numpy # NOQA
from chainer import backend # NOQA
from chainer.backends import cuda, intel64 # NOQA
from chainer import initializer # NOQA
import chainerx # NOQA
Shape = tp.Tuple[int, ...]
ShapeSpec = tp.Union[int, tp.Sequence[int]] # Sequence includes Tuple[int, ...] # NOQA
DTypeSpec = tp.Union[tp.Any] # TODO(okapies): encode numpy.dtype
NdArray = tp.Union[
'numpy.ndarray',
'cuda.ndarray',
# 'intel64.mdarray',
# TODO(okapies): mdarray is partially incompatible with other ndarrays
'chainerx.ndarray',
]
"""The ndarray types supported in :func:`chainer.get_array_types`
"""
Xp = tp.Union[tp.Any] # TODO(okapies): encode numpy/cupy/ideep/chainerx
class AbstractInitializer(tpe.Protocol):
"""Protocol class for Initializer.
It can be either an :class:`chainer.Initializer` or a callable object
that takes an ndarray.
This is only for PEP 544 compliant static type checkers.
"""
dtype = None # type: tp.Optional[DTypeSpec]
def __call__(self, array: NdArray) -> None:
pass
ScalarValue = tp.Union[
'numpy.generic',
bytes,
str,
memoryview,
numbers.Number,
]
"""The scalar types supported in :func:`numpy.isscalar`.
"""
InitializerSpec = tp.Union[AbstractInitializer, ScalarValue, 'numpy.ndarray']
DeviceSpec = tp.Union[
'backend.Device',
'chainerx.Device',
'cuda.Device',
str,
tp.Tuple[str, int],
'ModuleType', # numpy and intel64 module
tp.Tuple['ModuleType', int], # cupy module and device ID
]
"""The device specifier types supported in :func:`chainer.get_device`
"""
# TODO(okapies): Use Xp instead of ModuleType
CudaDeviceSpec = tp.Union['cuda.Device', int, 'numpy.integer'] # NOQA
"""
This type only for the deprecated :func:`chainer.cuda.get_device` API.
Use :class:`~chainer.types.DeviceSpec` instead.
"""
| 872 |
634 | <gh_stars>100-1000
/*
* Copyright 2013-2019 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package consulo.ui.web.internal;
import consulo.awt.TargetAWTFacade;
import consulo.ui.Rectangle2D;
import consulo.ui.Size;
import consulo.ui.color.ColorValue;
import consulo.ui.color.RGBColor;
import consulo.ui.image.Image;
import consulo.ui.image.ImageKey;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.swing.*;
import java.awt.*;
/**
* @author VISTALL
* @since 2019-02-21
*/
public class TargetAWTFacadeStub implements TargetAWTFacade {
private static class IconWrapper implements Icon {
private final Image myImage;
private IconWrapper(Image image) {
myImage = image;
}
@Override
public void paintIcon(Component c, Graphics g, int x, int y) {
}
@Override
public int getIconWidth() {
return myImage.getWidth();
}
@Override
public int getIconHeight() {
return myImage.getHeight();
}
}
@Nonnull
@Override
public Dimension to(@Nonnull Size size) {
throw new UnsupportedOperationException();
}
@Nonnull
@Override
public Color to(@Nonnull RGBColor color) {
int alpha = (int)color.getAlpha() * 255;
return new java.awt.Color(color.getRed(), color.getGreen(), color.getBlue(), alpha);
}
@Override
public Color to(@Nullable ColorValue colorValue) {
return colorValue == null ? null : to(colorValue.toRGB());
}
@Override
public Rectangle to(@Nullable Rectangle2D rectangle2D) {
throw new UnsupportedOperationException();
}
@Override
public Component to(@Nullable consulo.ui.Component component) {
return null;
}
@Override
public consulo.ui.Component from(@Nullable Component component) {
throw new UnsupportedOperationException();
}
@Override
public Window to(@Nullable consulo.ui.Window component) {
return null;
}
@Override
public consulo.ui.Window from(@Nullable Window component) {
return null;
}
@Override
public Rectangle2D from(@Nullable Rectangle rectangle) {
throw new UnsupportedOperationException();
}
@Override
public RGBColor from(@Nullable Color color) {
if(color == null) {
return null;
}
return new RGBColor(color.getRed(), color.getGreen(), color.getBlue(), color.getAlpha() / 255f);
}
@Override
public Icon to(@Nullable Image image) {
if (image == null) {
return null;
}
if (image instanceof Icon) {
return (Icon)image;
}
return new IconWrapper(image);
}
@Override
public Image from(@Nullable Icon icon) {
if (icon == null) {
return null;
}
if(icon instanceof IconWrapper) {
return ((IconWrapper)icon).myImage;
}
if (icon instanceof Image) {
return (Image)icon;
}
return null;
}
@Nonnull
@Override
public Font to(@Nonnull consulo.ui.font.Font font) {
throw new UnsupportedOperationException();
}
@Override
public java.awt.Image toImage(@Nonnull ImageKey key) {
throw new UnsupportedOperationException();
}
}
| 1,216 |
10,182 | <gh_stars>1000+
/*
* Copyright (c) 2009-2017 Panxiaobo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.d2j.node.insn;
import com.googlecode.d2j.MethodHandle;
import com.googlecode.d2j.Proto;
import com.googlecode.d2j.reader.Op;
import com.googlecode.d2j.visitors.DexCodeVisitor;
public class MethodCustomStmtNode extends AbstractMethodStmtNode {
public final String name;
public final Proto proto;
public final MethodHandle bsm;
public final Object[] bsmArgs;
public MethodCustomStmtNode(Op op, int[] args, String name, Proto proto, MethodHandle bsm, Object[] bsmArgs) {
super(op, args);
this.proto = proto;
this.name = name;
this.bsm = bsm;
this.bsmArgs = bsmArgs;
}
@Override
public void accept(DexCodeVisitor cv) {
cv.visitMethodStmt(op, args, name, proto, bsm, bsmArgs);
}
@Override
public Proto getProto() {
return proto;
}
}
| 515 |
6,989 | <reponame>erwinvanthiel/numpy
#ifndef _NPY_ARRAY_SHAPE_H_
#define _NPY_ARRAY_SHAPE_H_
/*
* Builds a string representation of the shape given in 'vals'.
* A negative value in 'vals' gets interpreted as newaxis.
*/
NPY_NO_EXPORT PyObject *
build_shape_string(npy_intp n, npy_intp const *vals);
/*
* Creates a sorted stride perm matching the KEEPORDER behavior
* of the NpyIter object. Because this operates based on multiple
* input strides, the 'stride' member of the npy_stride_sort_item
* would be useless and we simply argsort a list of indices instead.
*
* The caller should have already validated that 'ndim' matches for
* every array in the arrays list.
*/
NPY_NO_EXPORT void
PyArray_CreateMultiSortedStridePerm(int narrays, PyArrayObject **arrays,
int ndim, int *out_strideperm);
/*
* Just like PyArray_Squeeze, but allows the caller to select
* a subset of the size-one dimensions to squeeze out.
*/
NPY_NO_EXPORT PyObject *
PyArray_SqueezeSelected(PyArrayObject *self, npy_bool *axis_flags);
#endif
| 359 |
3,909 | <reponame>m-novikov/websockets
from __future__ import annotations
import logging
from typing import List, NewType, Optional, Tuple, Union
__all__ = [
"Data",
"LoggerLike",
"Origin",
"Subprotocol",
"ExtensionName",
"ExtensionParameter",
]
# Public types used in the signature of public APIs
Data = Union[str, bytes]
"""Types supported in a WebSocket message:
:class:`str` for a Text_ frame, :class:`bytes` for a Binary_.
.. _Text: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
.. _Binary : https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
"""
LoggerLike = Union[logging.Logger, logging.LoggerAdapter]
"""Types accepted where a :class:`~logging.Logger` is expected."""
Origin = NewType("Origin", str)
"""Value of a ``Origin`` header."""
Subprotocol = NewType("Subprotocol", str)
"""Subprotocol in a ``Sec-WebSocket-Protocol`` header."""
ExtensionName = NewType("ExtensionName", str)
"""Name of a WebSocket extension."""
ExtensionParameter = Tuple[str, Optional[str]]
"""Parameter of a WebSocket extension."""
# Private types
ExtensionHeader = Tuple[ExtensionName, List[ExtensionParameter]]
"""Extension in a ``Sec-WebSocket-Extensions`` header."""
ConnectionOption = NewType("ConnectionOption", str)
"""Connection option in a ``Connection`` header."""
UpgradeProtocol = NewType("UpgradeProtocol", str)
"""Upgrade protocol in an ``Upgrade`` header."""
| 463 |
2,072 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from pyhocon import ConfigFactory, ConfigTree
from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_executions_extractor import (
ModeDashboardExecutionsExtractor,
)
from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_utils import ModeDashboardUtils
from databuilder.extractor.restapi.rest_api_extractor import STATIC_RECORD_DICT
from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery
from databuilder.transformer.dict_to_model import MODEL_CLASS, DictToModel
from databuilder.transformer.timestamp_string_to_epoch import FIELD_NAME, TimestampStringToEpoch
LOGGER = logging.getLogger(__name__)
class ModeDashboardLastModifiedTimestampExtractor(ModeDashboardExecutionsExtractor):
"""
A Extractor that extracts Mode dashboard's last modified timestamp.
"""
def __init__(self) -> None:
super(ModeDashboardLastModifiedTimestampExtractor, self).__init__()
def init(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(
ConfigFactory.from_dict({
STATIC_RECORD_DICT: {'product': 'mode'},
f'{DictToModel().get_scope()}.{MODEL_CLASS}':
'databuilder.models.dashboard.dashboard_last_modified.DashboardLastModifiedTimestamp',
f'{TimestampStringToEpoch().get_scope()}.{FIELD_NAME}':
'last_modified_timestamp'
})
)
super(ModeDashboardLastModifiedTimestampExtractor, self).init(conf)
def get_scope(self) -> str:
return 'extractor.mode_dashboard_last_modified_timestamp_execution'
def _build_restapi_query(self) -> ModePaginatedRestApiQuery:
"""
Build REST API Query to get Mode Dashboard last modified timestamp
:return: A RestApiQuery that provides Mode Dashboard last successful execution (run)
"""
seed_query = ModeDashboardUtils.get_seed_query(conf=self._conf)
params = ModeDashboardUtils.get_auth_params(conf=self._conf, discover_auth=True)
# Reports
# https://mode.com/developer/discovery-api/analytics/reports/
url = 'https://app.mode.com/batch/{organization}/reports'
json_path = 'reports[*].[token, space_token, edited_at]'
field_names = ['dashboard_id', 'dashboard_group_id', 'last_modified_timestamp']
max_record_size = 1000
pagination_json_path = 'reports[*]'
last_modified_query = ModePaginatedRestApiQuery(query_to_join=seed_query, url=url, params=params,
json_path=json_path, field_names=field_names,
skip_no_result=True, max_record_size=max_record_size,
pagination_json_path=pagination_json_path)
return last_modified_query
| 1,257 |
852 | #ifndef CondFormats_L1TObjects_GlobalStableParameters_h
#define CondFormats_L1TObjects_GlobalStableParameters_h
/**
* \class GlobalStableParameters
*
*
* Description: L1 GT stable parameters.
*
* Implementation:
* <TODO: enter implementation details>
*
* \author: <NAME> - HEPHY Vienna
*
* $Date$
* $Revision$
*
*/
// system include files
#include <vector>
#include <ostream>
// user include files
// base class
// forward declarations
// class declaration
class GlobalStableParameters {
public:
// constructor
GlobalStableParameters();
// destructor
virtual ~GlobalStableParameters();
public:
/// get / set the number of physics trigger algorithms
inline unsigned int gtNumberPhysTriggers() const { return m_numberPhysTriggers; }
void setGtNumberPhysTriggers(const unsigned int&);
/// get / set the additional number of physics trigger algorithms
inline unsigned int gtNumberPhysTriggersExtended() const { return m_numberPhysTriggersExtended; }
void setGtNumberPhysTriggersExtended(const unsigned int&);
/// get / set the number of technical triggers
inline unsigned int gtNumberTechnicalTriggers() const { return m_numberTechnicalTriggers; }
void setGtNumberTechnicalTriggers(const unsigned int&);
/// get / set the number of L1 muons received by GT
inline unsigned int gtNumberL1Mu() const { return m_numberL1Mu; }
void setGtNumberL1Mu(const unsigned int&);
/// get / set the number of L1 e/gamma objects received by GT
inline unsigned int gtNumberL1NoIsoEG() const { return m_numberL1NoIsoEG; }
void setGtNumberL1NoIsoEG(const unsigned int&);
/// get / set the number of L1 isolated e/gamma objects received by GT
inline unsigned int gtNumberL1IsoEG() const { return m_numberL1IsoEG; }
void setGtNumberL1IsoEG(const unsigned int&);
/// get / set the number of L1 central jets received by GT
inline unsigned int gtNumberL1CenJet() const { return m_numberL1CenJet; }
void setGtNumberL1CenJet(const unsigned int&);
/// get / set the number of L1 forward jets received by GT
inline unsigned int gtNumberL1ForJet() const { return m_numberL1ForJet; }
void setGtNumberL1ForJet(const unsigned int&);
/// get / set the number of L1 tau jets received by GT
inline unsigned int gtNumberL1TauJet() const { return m_numberL1TauJet; }
void setGtNumberL1TauJet(const unsigned int&);
/// get / set the number of L1 jet counts received by GT
inline unsigned int gtNumberL1JetCounts() const { return m_numberL1JetCounts; }
void setGtNumberL1JetCounts(const unsigned int&);
/// hardware stuff
/// get / set the number of condition chips in GTL
inline unsigned int gtNumberConditionChips() const { return m_numberConditionChips; }
void setGtNumberConditionChips(const unsigned int&);
/// get / set the number of pins on the GTL condition chips
inline unsigned int gtPinsOnConditionChip() const { return m_pinsOnConditionChip; }
void setGtPinsOnConditionChip(const unsigned int&);
/// get / set the correspondence "condition chip - GTL algorithm word"
/// in the hardware
inline const std::vector<int>& gtOrderConditionChip() const { return m_orderConditionChip; }
void setGtOrderConditionChip(const std::vector<int>&);
/// get / set the number of PSB boards in GT
inline int gtNumberPsbBoards() const { return m_numberPsbBoards; }
void setGtNumberPsbBoards(const int&);
/// get / set the number of bits for eta of calorimeter objects
inline unsigned int gtIfCaloEtaNumberBits() const { return m_ifCaloEtaNumberBits; }
void setGtIfCaloEtaNumberBits(const unsigned int&);
/// get / set the number of bits for eta of muon objects
inline unsigned int gtIfMuEtaNumberBits() const { return m_ifMuEtaNumberBits; }
void setGtIfMuEtaNumberBits(const unsigned int&);
/// get / set WordLength
inline int gtWordLength() const { return m_wordLength; }
void setGtWordLength(const int&);
/// get / set one UnitLength
inline int gtUnitLength() const { return m_unitLength; }
void setGtUnitLength(const int&);
/// print all the L1 GT stable parameters
void print(std::ostream&) const;
private:
/// trigger decision
/// number of physics trigger algorithms
unsigned int m_numberPhysTriggers;
/// additional number of physics trigger algorithms
unsigned int m_numberPhysTriggersExtended;
/// number of technical triggers
unsigned int m_numberTechnicalTriggers;
/// trigger objects
/// muons
unsigned int m_numberL1Mu;
/// e/gamma and isolated e/gamma objects
unsigned int m_numberL1NoIsoEG;
unsigned int m_numberL1IsoEG;
/// central, forward and tau jets
unsigned int m_numberL1CenJet;
unsigned int m_numberL1ForJet;
unsigned int m_numberL1TauJet;
/// jet counts
unsigned int m_numberL1JetCounts;
private:
/// hardware
/// number of condition chips
unsigned int m_numberConditionChips;
/// number of pins on the GTL condition chips
unsigned int m_pinsOnConditionChip;
/// correspondence "condition chip - GTL algorithm word" in the hardware
/// chip 2: 0 - 95; chip 1: 96 - 128 (191)
std::vector<int> m_orderConditionChip;
/// number of PSB boards in GT
int m_numberPsbBoards;
/// number of bits for eta of calorimeter objects
unsigned int m_ifCaloEtaNumberBits;
/// number of bits for eta of muon objects
unsigned int m_ifMuEtaNumberBits;
private:
/// GT DAQ record organized in words of WordLength bits
int m_wordLength;
/// one unit in the word is UnitLength bits
int m_unitLength;
};
#endif /*CondFormats_L1TObjects_GlobalStableParameters_h*/
| 1,750 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.