max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
466 | <gh_stars>100-1000
from pycocotools.coco import COCO
from track_tools.colormap import colormap
import cv2
import os
annFile='mix/annotations/train.json'
coco=COCO(annFile)
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats]
print('categories: \n{}\n'.format(' '.join(nms)))
dirs = 'track_tools/shows'
if not os.path.exists(dirs):
os.makedirs(dirs)
max_img = 10000
color_list = colormap()
show_imgs = list(range(1,50)) + list(range(1+max_img,50+max_img))
for i in show_imgs:
# for i in range(1+10000,500+10000):
imgIds = coco.getImgIds(imgIds = [i])
img = coco.loadImgs(imgIds)[0]
annIds = coco.getAnnIds(imgIds=img['id'])
anns = coco.loadAnns(annIds)
image = cv2.imread('mix/'+img['file_name'])
flag = False
for ann in anns:
flag = True
bbox = ann['bbox']
category_id = int(ann['category_id'])
bbox[2] = bbox[2] + bbox[0]
bbox[3] = bbox[3] + bbox[1]
cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color_list[category_id%79].tolist(), thickness=2)
# cv2.putText(image, "{}".format(coco.cats[category_id]['name']), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color_list[category_id%79].tolist(), 2)
if flag:
cv2.imwrite(dirs + '/out{:0>6d}.png'.format(i), image) | 652 |
493 | #include <cxxtest/TestSuite.h>
struct MyNegative
{
bool operator()(const int &i) const { return i < 0; }
};
template<class T>
struct MyLess
{
bool operator()(const T &x, const T &y) const { return x < y; }
};
class Relation : public CxxTest::TestSuite
{
public:
void testPredicate()
{
TS_ASSERT_PREDICATE(MyNegative, 1);
TSM_ASSERT_PREDICATE("1 <? 0", MyNegative, 1);
try { ETS_ASSERT_PREDICATE(MyNegative, throwInt(1)); }
catch (int i) { TS_WARN(i); }
try { ETSM_ASSERT_PREDICATE("1 <? 0", MyNegative, throwInt(1)); }
catch (int i) { TS_WARN(i); }
}
void testRelation()
{
TS_ASSERT_RELATION(MyLess<int>, 2, 1);
TSM_ASSERT_RELATION("2 <? 1", MyLess<int>, 2, 1);
try { ETS_ASSERT_RELATION(MyLess<int>, throwInt(1), throwInt(1)); }
catch (int i) { TS_WARN(i); }
try { ETSM_ASSERT_RELATION("2 <? 1", MyLess<int>, throwInt(1), throwInt(1)); }
catch (int i) { TS_WARN(i); }
}
int throwInt(int i)
{
throw i;
}
};
| 516 |
1,199 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A linear layer for output projection.
This is based on code in tf.contrib.seq2seq.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.labs.exemplar_decoding.models.common import dimension_value
import tensorflow.compat.v1 as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
__all__ = [
"Linear",
"HyperDense",
]
class Linear(object):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of weight variable.
weights: (optional) a specified tensor.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""
def __init__(self,
args,
output_size,
build_bias,
weights=None,
weight_initializer=None,
bias_initializer=None):
self._build_bias = build_bias
if args is None or (tf.contrib.framework.nest.is_sequence(args) and
not args):
raise ValueError("`args` must be specified")
if not tf.contrib.framework.nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
self._is_sequence = True
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
if weights is None:
self._weights = tf.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=weight_initializer)
else:
self._weights = weights
if build_bias:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = tf.constant_initializer(0.0, dtype=dtype)
self._biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = tf.matmul(args[0], self._weights)
else:
# Explicitly creating a one for a minor performance improvement.
one = tf.constant(1, dtype=tf.int32)
res = tf.matmul(tf.concat(args, one), self._weights)
if self._build_bias:
res = tf.nn.bias_add(res, self._biases)
return res
class HyperDense(tf.keras.layers.Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
mem_input,
hps,
use_beam=False,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.pop("input_dim"),)
super(HyperDense, self).__init__(
activity_regularizer=tf.keras.regularizers.get(activity_regularizer),
**kwargs)
self.units = int(units)
self.activation = tf.keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self._mem_input = mem_input
self.supports_masking = True
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2)
self._can_use_graph_functions = True
self._decoder_dim = hps.decoder_dim
self._rank = hps.rank
self._tau = hps.tau
self._sigma_norm = hps.sigma_norm
self._beam_width = hps.beam_width
self._use_beam = use_beam
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if dimension_value(input_shape[-1]) is None:
raise ValueError("The last dimension of the inputs to `Dense` "
"should be defined. Found `None`.")
last_dim = dimension_value(input_shape[-1])
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2, axes={-1: last_dim})
self._c = tf.get_variable(
"c", [self._decoder_dim, self._rank],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
sigma = tf.matmul(self._mem_input, self._c)
if self._sigma_norm > 0.:
sigma = tf.nn.l2_normalize(sigma, axis=1) * self._sigma_norm
elif self._sigma_norm == -1.:
sigma = tf.nn.softmax(sigma / self._tau, axis=1)
sigma_diag = tf.matrix_diag(sigma)
self._u = tf.get_variable(
"u", [last_dim, self._rank],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
self._v = tf.get_variable(
"v", [self._rank, self.units],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
self.kernel = tf.einsum("ij,ajk,kl->ail", self._u, sigma_diag, self._v)
if self._use_beam and self._beam_width:
self.kernel = tf.contrib.seq2seq.tile_batch(
self.kernel, multiplier=self._beam_width)
if self.use_bias:
self._b = self.add_weight(
"b",
shape=[self.units, self._rank],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
self.bias = tf.einsum("ij,aj->ai", self._b, sigma)
if self._use_beam and self._beam_width:
self.bias = tf.contrib.seq2seq.tile_batch(
self.bias, multiplier=self._beam_width)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
rank = tf.rank(inputs)
if rank > 2:
outputs = tf.einsum("aki,aij->akj", inputs, self.kernel)
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
assert False
# outputs = tf.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if dimension_value(input_shape[-1]) is None:
raise ValueError(
"The innermost dimension of input_shape must be defined, but saw: %s"
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
"units":
self.units,
"activation":
tf.keras.activations.serialize(self.activation),
"use_bias":
self.use_bias,
"kernel_initializer":
tf.keras.initializers.serialize(self.kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self.bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self.kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self.bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self.activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self.kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self.bias_constraint)
}
base_config = super(HyperDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 4,912 |
479 | <reponame>balag91/gerrit
// Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.metrics;
import com.google.gerrit.extensions.registration.RegistrationHandle;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/** Exports no metrics, useful for running batch programs. */
public class DisabledMetricMaker extends MetricMaker {
@Override
public Counter0 newCounter(String name, Description desc) {
return new Counter0() {
@Override
public void incrementBy(long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1> Counter1<F1> newCounter(String name, Description desc, Field<F1> field1) {
return new Counter1<F1>() {
@Override
public void incrementBy(F1 field1, long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2> Counter2<F1, F2> newCounter(
String name, Description desc, Field<F1> field1, Field<F2> field2) {
return new Counter2<F1, F2>() {
@Override
public void incrementBy(F1 field1, F2 field2, long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2, F3> Counter3<F1, F2, F3> newCounter(
String name, Description desc, Field<F1> field1, Field<F2> field2, Field<F3> field3) {
return new Counter3<F1, F2, F3>() {
@Override
public void incrementBy(F1 field1, F2 field2, F3 field3, long value) {}
@Override
public void remove() {}
};
}
@Override
public Timer0 newTimer(String name, Description desc) {
return new Timer0() {
@Override
public void record(long value, TimeUnit unit) {}
@Override
public void remove() {}
};
}
@Override
public <F1> Timer1<F1> newTimer(String name, Description desc, Field<F1> field1) {
return new Timer1<F1>() {
@Override
public void record(F1 field1, long value, TimeUnit unit) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2> Timer2<F1, F2> newTimer(
String name, Description desc, Field<F1> field1, Field<F2> field2) {
return new Timer2<F1, F2>() {
@Override
public void record(F1 field1, F2 field2, long value, TimeUnit unit) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2, F3> Timer3<F1, F2, F3> newTimer(
String name, Description desc, Field<F1> field1, Field<F2> field2, Field<F3> field3) {
return new Timer3<F1, F2, F3>() {
@Override
public void record(F1 field1, F2 field2, F3 field3, long value, TimeUnit unit) {}
@Override
public void remove() {}
};
}
@Override
public Histogram0 newHistogram(String name, Description desc) {
return new Histogram0() {
@Override
public void record(long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1> Histogram1<F1> newHistogram(String name, Description desc, Field<F1> field1) {
return new Histogram1<F1>() {
@Override
public void record(F1 field1, long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2> Histogram2<F1, F2> newHistogram(
String name, Description desc, Field<F1> field1, Field<F2> field2) {
return new Histogram2<F1, F2>() {
@Override
public void record(F1 field1, F2 field2, long value) {}
@Override
public void remove() {}
};
}
@Override
public <F1, F2, F3> Histogram3<F1, F2, F3> newHistogram(
String name, Description desc, Field<F1> field1, Field<F2> field2, Field<F3> field3) {
return new Histogram3<F1, F2, F3>() {
@Override
public void record(F1 field1, F2 field2, F3 field3, long value) {}
@Override
public void remove() {}
};
}
@Override
public <V> CallbackMetric0<V> newCallbackMetric(
String name, Class<V> valueClass, Description desc) {
return new CallbackMetric0<V>() {
@Override
public void set(V value) {}
@Override
public void remove() {}
};
}
@Override
public <F1, V> CallbackMetric1<F1, V> newCallbackMetric(
String name, Class<V> valueClass, Description desc, Field<F1> field1) {
return new CallbackMetric1<F1, V>() {
@Override
public void set(F1 field1, V value) {}
@Override
public void forceCreate(F1 field1) {}
@Override
public void remove() {}
};
}
@Override
public RegistrationHandle newTrigger(Set<CallbackMetric<?>> metrics, Runnable trigger) {
return new RegistrationHandle() {
@Override
public void remove() {}
};
}
}
| 2,005 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.web.core;
import java.io.IOException;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.netbeans.modules.web.api.webmodule.WebFrameworks;
import org.netbeans.modules.web.api.webmodule.WebModule;
import org.netbeans.modules.web.core.jsploader.JspCompileUtil;
import org.netbeans.modules.web.spi.webmodule.WebFrameworkProvider;
import org.netbeans.modules.web.spi.webmodule.RequestParametersQueryImplementation;
import org.openide.filesystems.FileObject;
/** Static methods for execution parameters.
*
* @author <NAME>
*/
@org.openide.util.lookup.ServiceProvider(service=org.netbeans.modules.web.spi.webmodule.RequestParametersQueryImplementation.class)
public class WebExecSupport implements RequestParametersQueryImplementation {
private static final Logger LOG = Logger.getLogger(WebExecSupport.class.getName());
public static final String EA_REQPARAMS = "NetBeansAttrReqParams"; // NOI18N
/* Sets execution query string for the associated entry.
* @param qStr the query string
* @exception IOException if arguments cannot be set
*/
public static void setQueryString(FileObject fo, String qStr) throws IOException {
fo.setAttribute (EA_REQPARAMS, qStr);
}
/* Getter for query string associated with given file.
* @return the query string or empty string if no quesy string associated
*/
public static String getQueryString(FileObject fo) {
try {
String qStr = (String)fo.getAttribute (EA_REQPARAMS);
if (qStr != null) {
if ((qStr.length() > 0) && (!qStr.startsWith("?"))) // NOI18N
qStr = "?" + qStr; // NOI18N
return qStr;
}
} catch (Exception ex) {
LOG.log(Level.FINE, "error", ex);
}
return ""; // NOI18N
}
/** Returns a web execution URL for a file
* @param f file to run
* @return part of URL string corresponding to file and parameters to use for execution. May be null if it can not be determined.
*/
public String getFileAndParameters(FileObject f) {
List <WebFrameworkProvider> frameworks = WebFrameworks.getFrameworks();
String url = null;
WebModule wm = WebModule.getWebModule(f);
if (wm != null && frameworks.size() > 0){
for ( WebFrameworkProvider frameworkProvider : frameworks) {
if (frameworkProvider.isInWebModule(wm)){
url = frameworkProvider.getServletPath(f);
if (url != null)
break;
}
}
}
if (url == null & wm != null) {
FileObject docBase = wm.getDocumentBase();
if (docBase != null)
url = JspCompileUtil.findRelativeContextPath(docBase, f);
}
if (url != null) {
url = url + getQueryString(f);
url = url.replace(" ", "%20");
}
return url;
}
}
| 1,433 |
3,459 | #ifdef PPU_CPP
//INIDISP
void PPU::mmio_w2100(uint8 value) {
if(regs.display_disabled == true && cpu.vcounter() == (!overscan() ? 225 : 240)) {
regs.oam_addr = regs.oam_baseaddr << 1;
regs.oam_firstsprite = (regs.oam_priority == false) ? 0 : (regs.oam_addr >> 2) & 127;
}
regs.display_disabled = !!(value & 0x80);
regs.display_brightness = value & 15;
}
//OBSEL
void PPU::mmio_w2101(uint8 value) {
regs.oam_basesize = (value >> 5) & 7;
regs.oam_nameselect = (value >> 3) & 3;
regs.oam_tdaddr = (value & 3) << 14;
}
//OAMADDL
void PPU::mmio_w2102(uint8 data) {
regs.oam_baseaddr = (regs.oam_baseaddr & ~0xff) | (data << 0);
regs.oam_baseaddr &= 0x01ff;
regs.oam_addr = regs.oam_baseaddr << 1;
regs.oam_firstsprite = (regs.oam_priority == false) ? 0 : (regs.oam_addr >> 2) & 127;
}
//OAMADDH
void PPU::mmio_w2103(uint8 data) {
regs.oam_priority = !!(data & 0x80);
regs.oam_baseaddr = (regs.oam_baseaddr & 0xff) | (data << 8);
regs.oam_baseaddr &= 0x01ff;
regs.oam_addr = regs.oam_baseaddr << 1;
regs.oam_firstsprite = (regs.oam_priority == false) ? 0 : (regs.oam_addr >> 2) & 127;
}
//OAMDATA
void PPU::mmio_w2104(uint8 data) {
if(regs.oam_addr & 0x0200) {
oam_mmio_write(regs.oam_addr, data);
} else if((regs.oam_addr & 1) == 0) {
regs.oam_latchdata = data;
} else {
oam_mmio_write((regs.oam_addr & ~1) + 0, regs.oam_latchdata);
oam_mmio_write((regs.oam_addr & ~1) + 1, data);
}
regs.oam_addr++;
regs.oam_addr &= 0x03ff;
regs.oam_firstsprite = (regs.oam_priority == false) ? 0 : (regs.oam_addr >> 2) & 127;
}
//BGMODE
void PPU::mmio_w2105(uint8 value) {
regs.bg_tilesize[BG4] = !!(value & 0x80);
regs.bg_tilesize[BG3] = !!(value & 0x40);
regs.bg_tilesize[BG2] = !!(value & 0x20);
regs.bg_tilesize[BG1] = !!(value & 0x10);
regs.bg3_priority = !!(value & 0x08);
regs.bg_mode = (value & 7);
}
//MOSAIC
void PPU::mmio_w2106(uint8 value) {
regs.mosaic_size = (value >> 4) & 15;
regs.mosaic_enabled[BG4] = !!(value & 0x08);
regs.mosaic_enabled[BG3] = !!(value & 0x04);
regs.mosaic_enabled[BG2] = !!(value & 0x02);
regs.mosaic_enabled[BG1] = !!(value & 0x01);
}
//BG1SC
void PPU::mmio_w2107(uint8 value) {
regs.bg_scaddr[BG1] = (value & 0x7c) << 9;
regs.bg_scsize[BG1] = value & 3;
}
//BG2SC
void PPU::mmio_w2108(uint8 value) {
regs.bg_scaddr[BG2] = (value & 0x7c) << 9;
regs.bg_scsize[BG2] = value & 3;
}
//BG3SC
void PPU::mmio_w2109(uint8 value) {
regs.bg_scaddr[BG3] = (value & 0x7c) << 9;
regs.bg_scsize[BG3] = value & 3;
}
//BG4SC
void PPU::mmio_w210a(uint8 value) {
regs.bg_scaddr[BG4] = (value & 0x7c) << 9;
regs.bg_scsize[BG4] = value & 3;
}
//BG12NBA
void PPU::mmio_w210b(uint8 value) {
regs.bg_tdaddr[BG1] = (value & 0x07) << 13;
regs.bg_tdaddr[BG2] = (value & 0x70) << 9;
}
//BG34NBA
void PPU::mmio_w210c(uint8 value) {
regs.bg_tdaddr[BG3] = (value & 0x07) << 13;
regs.bg_tdaddr[BG4] = (value & 0x70) << 9;
}
//BG1HOFS
void PPU::mmio_w210d(uint8 value) {
regs.m7_hofs = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
regs.bg_hofs[BG1] = (value << 8) | (regs.bg_ofslatch & ~7) | ((regs.bg_hofs[BG1] >> 8) & 7);
regs.bg_ofslatch = value;
}
//BG1VOFS
void PPU::mmio_w210e(uint8 value) {
regs.m7_vofs = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
regs.bg_vofs[BG1] = (value << 8) | (regs.bg_ofslatch);
regs.bg_ofslatch = value;
}
//BG2HOFS
void PPU::mmio_w210f(uint8 value) {
regs.bg_hofs[BG2] = (value << 8) | (regs.bg_ofslatch & ~7) | ((regs.bg_hofs[BG2] >> 8) & 7);
regs.bg_ofslatch = value;
}
//BG2VOFS
void PPU::mmio_w2110(uint8 value) {
regs.bg_vofs[BG2] = (value << 8) | (regs.bg_ofslatch);
regs.bg_ofslatch = value;
}
//BG3HOFS
void PPU::mmio_w2111(uint8 value) {
regs.bg_hofs[BG3] = (value << 8) | (regs.bg_ofslatch & ~7) | ((regs.bg_hofs[BG3] >> 8) & 7);
regs.bg_ofslatch = value;
}
//BG3VOFS
void PPU::mmio_w2112(uint8 value) {
regs.bg_vofs[BG3] = (value << 8) | (regs.bg_ofslatch);
regs.bg_ofslatch = value;
}
//BG4HOFS
void PPU::mmio_w2113(uint8 value) {
regs.bg_hofs[BG4] = (value << 8) | (regs.bg_ofslatch & ~7) | ((regs.bg_hofs[BG4] >> 8) & 7);
regs.bg_ofslatch = value;
}
//BG4VOFS
void PPU::mmio_w2114(uint8 value) {
regs.bg_vofs[BG4] = (value << 8) | (regs.bg_ofslatch);
regs.bg_ofslatch = value;
}
//VMAIN
void PPU::mmio_w2115(uint8 value) {
regs.vram_incmode = !!(value & 0x80);
regs.vram_mapping = (value >> 2) & 3;
switch(value & 3) {
case 0: regs.vram_incsize = 1; break;
case 1: regs.vram_incsize = 32; break;
case 2: regs.vram_incsize = 128; break;
case 3: regs.vram_incsize = 128; break;
}
}
//VMADDL
void PPU::mmio_w2116(uint8 value) {
regs.vram_addr = (regs.vram_addr & 0xff00) | value;
uint16 addr = get_vram_address();
regs.vram_readbuffer = vram_mmio_read(addr + 0);
regs.vram_readbuffer |= vram_mmio_read(addr + 1) << 8;
}
//VMADDH
void PPU::mmio_w2117(uint8 value) {
regs.vram_addr = (value << 8) | (regs.vram_addr & 0x00ff);
uint16 addr = get_vram_address();
regs.vram_readbuffer = vram_mmio_read(addr + 0);
regs.vram_readbuffer |= vram_mmio_read(addr + 1) << 8;
}
//VMDATAL
void PPU::mmio_w2118(uint8 value) {
uint16 addr = get_vram_address();
vram_mmio_write(addr, value);
bg_tiledata_state[TILE_2BIT][(addr >> 4)] = 1;
bg_tiledata_state[TILE_4BIT][(addr >> 5)] = 1;
bg_tiledata_state[TILE_8BIT][(addr >> 6)] = 1;
if(regs.vram_incmode == 0) {
regs.vram_addr += regs.vram_incsize;
}
}
//VMDATAH
void PPU::mmio_w2119(uint8 value) {
uint16 addr = get_vram_address() + 1;
vram_mmio_write(addr, value);
bg_tiledata_state[TILE_2BIT][(addr >> 4)] = 1;
bg_tiledata_state[TILE_4BIT][(addr >> 5)] = 1;
bg_tiledata_state[TILE_8BIT][(addr >> 6)] = 1;
if(regs.vram_incmode == 1) {
regs.vram_addr += regs.vram_incsize;
}
}
//M7SEL
void PPU::mmio_w211a(uint8 value) {
regs.mode7_repeat = (value >> 6) & 3;
regs.mode7_vflip = !!(value & 0x02);
regs.mode7_hflip = !!(value & 0x01);
}
//M7A
void PPU::mmio_w211b(uint8 value) {
regs.m7a = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//M7B
void PPU::mmio_w211c(uint8 value) {
regs.m7b = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//M7C
void PPU::mmio_w211d(uint8 value) {
regs.m7c = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//M7D
void PPU::mmio_w211e(uint8 value) {
regs.m7d = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//M7X
void PPU::mmio_w211f(uint8 value) {
regs.m7x = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//M7Y
void PPU::mmio_w2120(uint8 value) {
regs.m7y = (value << 8) | regs.m7_latch;
regs.m7_latch = value;
}
//CGADD
void PPU::mmio_w2121(uint8 value) {
regs.cgram_addr = value << 1;
}
//CGDATA
//note: CGRAM palette data format is 15-bits
//(0,bbbbb,ggggg,rrrrr). Highest bit is ignored,
//as evidenced by $213b CGRAM data reads.
//
//anomie indicates writes to CGDATA work the same
//as writes to OAMDATA's low table. need to verify
//this on hardware.
void PPU::mmio_w2122(uint8 value) {
if(!(regs.cgram_addr & 1)) {
regs.cgram_latchdata = value;
} else {
cgram_mmio_write((regs.cgram_addr & 0x01fe), regs.cgram_latchdata);
cgram_mmio_write((regs.cgram_addr & 0x01fe) + 1, value & 0x7f);
}
regs.cgram_addr++;
regs.cgram_addr &= 0x01ff;
}
//W12SEL
void PPU::mmio_w2123(uint8 value) {
regs.window2_enabled[BG2] = !!(value & 0x80);
regs.window2_invert [BG2] = !!(value & 0x40);
regs.window1_enabled[BG2] = !!(value & 0x20);
regs.window1_invert [BG2] = !!(value & 0x10);
regs.window2_enabled[BG1] = !!(value & 0x08);
regs.window2_invert [BG1] = !!(value & 0x04);
regs.window1_enabled[BG1] = !!(value & 0x02);
regs.window1_invert [BG1] = !!(value & 0x01);
}
//W34SEL
void PPU::mmio_w2124(uint8 value) {
regs.window2_enabled[BG4] = !!(value & 0x80);
regs.window2_invert [BG4] = !!(value & 0x40);
regs.window1_enabled[BG4] = !!(value & 0x20);
regs.window1_invert [BG4] = !!(value & 0x10);
regs.window2_enabled[BG3] = !!(value & 0x08);
regs.window2_invert [BG3] = !!(value & 0x04);
regs.window1_enabled[BG3] = !!(value & 0x02);
regs.window1_invert [BG3] = !!(value & 0x01);
}
//WOBJSEL
void PPU::mmio_w2125(uint8 value) {
regs.window2_enabled[COL] = !!(value & 0x80);
regs.window2_invert [COL] = !!(value & 0x40);
regs.window1_enabled[COL] = !!(value & 0x20);
regs.window1_invert [COL] = !!(value & 0x10);
regs.window2_enabled[OAM] = !!(value & 0x08);
regs.window2_invert [OAM] = !!(value & 0x04);
regs.window1_enabled[OAM] = !!(value & 0x02);
regs.window1_invert [OAM] = !!(value & 0x01);
}
//WH0
void PPU::mmio_w2126(uint8 value) {
regs.window1_left = value;
}
//WH1
void PPU::mmio_w2127(uint8 value) {
regs.window1_right = value;
}
//WH2
void PPU::mmio_w2128(uint8 value) {
regs.window2_left = value;
}
//WH3
void PPU::mmio_w2129(uint8 value) {
regs.window2_right = value;
}
//WBGLOG
void PPU::mmio_w212a(uint8 value) {
regs.window_mask[BG4] = (value >> 6) & 3;
regs.window_mask[BG3] = (value >> 4) & 3;
regs.window_mask[BG2] = (value >> 2) & 3;
regs.window_mask[BG1] = (value ) & 3;
}
//WOBJLOG
void PPU::mmio_w212b(uint8 value) {
regs.window_mask[COL] = (value >> 2) & 3;
regs.window_mask[OAM] = (value ) & 3;
}
//TM
void PPU::mmio_w212c(uint8 value) {
regs.bg_enabled[OAM] = !!(value & 0x10);
regs.bg_enabled[BG4] = !!(value & 0x08);
regs.bg_enabled[BG3] = !!(value & 0x04);
regs.bg_enabled[BG2] = !!(value & 0x02);
regs.bg_enabled[BG1] = !!(value & 0x01);
}
//TS
void PPU::mmio_w212d(uint8 value) {
regs.bgsub_enabled[OAM] = !!(value & 0x10);
regs.bgsub_enabled[BG4] = !!(value & 0x08);
regs.bgsub_enabled[BG3] = !!(value & 0x04);
regs.bgsub_enabled[BG2] = !!(value & 0x02);
regs.bgsub_enabled[BG1] = !!(value & 0x01);
}
//TMW
void PPU::mmio_w212e(uint8 value) {
regs.window_enabled[OAM] = !!(value & 0x10);
regs.window_enabled[BG4] = !!(value & 0x08);
regs.window_enabled[BG3] = !!(value & 0x04);
regs.window_enabled[BG2] = !!(value & 0x02);
regs.window_enabled[BG1] = !!(value & 0x01);
}
//TSW
void PPU::mmio_w212f(uint8 value) {
regs.sub_window_enabled[OAM] = !!(value & 0x10);
regs.sub_window_enabled[BG4] = !!(value & 0x08);
regs.sub_window_enabled[BG3] = !!(value & 0x04);
regs.sub_window_enabled[BG2] = !!(value & 0x02);
regs.sub_window_enabled[BG1] = !!(value & 0x01);
}
//CGWSEL
void PPU::mmio_w2130(uint8 value) {
regs.color_mask = (value >> 6) & 3;
regs.colorsub_mask = (value >> 4) & 3;
regs.addsub_mode = !!(value & 0x02);
regs.direct_color = !!(value & 0x01);
}
//CGADDSUB
void PPU::mmio_w2131(uint8 value) {
regs.color_mode = !!(value & 0x80);
regs.color_halve = !!(value & 0x40);
regs.color_enabled[BACK] = !!(value & 0x20);
regs.color_enabled[OAM] = !!(value & 0x10);
regs.color_enabled[BG4] = !!(value & 0x08);
regs.color_enabled[BG3] = !!(value & 0x04);
regs.color_enabled[BG2] = !!(value & 0x02);
regs.color_enabled[BG1] = !!(value & 0x01);
}
//COLDATA
void PPU::mmio_w2132(uint8 value) {
if(value & 0x80) regs.color_b = value & 0x1f;
if(value & 0x40) regs.color_g = value & 0x1f;
if(value & 0x20) regs.color_r = value & 0x1f;
regs.color_rgb = (regs.color_r)
| (regs.color_g << 5)
| (regs.color_b << 10);
}
//SETINI
void PPU::mmio_w2133(uint8 value) {
regs.mode7_extbg = !!(value & 0x40);
regs.pseudo_hires = !!(value & 0x08);
regs.overscan = !!(value & 0x04);
regs.oam_interlace = !!(value & 0x02);
regs.interlace = !!(value & 0x01);
display.overscan = regs.overscan;
sprite_list_valid = false;
}
//MPYL
uint8 PPU::mmio_r2134() {
uint32 r;
r = ((int16)regs.m7a * (int8)(regs.m7b >> 8));
regs.ppu1_mdr = r;
return regs.ppu1_mdr;
}
//MPYM
uint8 PPU::mmio_r2135() {
uint32 r;
r = ((int16)regs.m7a * (int8)(regs.m7b >> 8));
regs.ppu1_mdr = r >> 8;
return regs.ppu1_mdr;
}
//MPYH
uint8 PPU::mmio_r2136() {
uint32 r;
r = ((int16)regs.m7a * (int8)(regs.m7b >> 8));
regs.ppu1_mdr = r >> 16;
return regs.ppu1_mdr;
}
//SLHV
uint8 PPU::mmio_r2137() {
if(cpu.pio() & 0x80) {
latch_counters();
}
return cpu.regs.mdr;
}
//OAMDATAREAD
uint8 PPU::mmio_r2138() {
regs.ppu1_mdr = oam_mmio_read(regs.oam_addr);
regs.oam_addr++;
regs.oam_addr &= 0x03ff;
regs.oam_firstsprite = (regs.oam_priority == false) ? 0 : (regs.oam_addr >> 2) & 127;
return regs.ppu1_mdr;
}
//VMDATALREAD
uint8 PPU::mmio_r2139() {
uint16 addr = get_vram_address();
regs.ppu1_mdr = regs.vram_readbuffer;
if(regs.vram_incmode == 0) {
addr &= 0xfffe;
regs.vram_readbuffer = vram_mmio_read(addr + 0);
regs.vram_readbuffer |= vram_mmio_read(addr + 1) << 8;
regs.vram_addr += regs.vram_incsize;
}
return regs.ppu1_mdr;
}
//VMDATAHREAD
uint8 PPU::mmio_r213a() {
uint16 addr = get_vram_address() + 1;
regs.ppu1_mdr = regs.vram_readbuffer >> 8;
if(regs.vram_incmode == 1) {
addr &= 0xfffe;
regs.vram_readbuffer = vram_mmio_read(addr + 0);
regs.vram_readbuffer |= vram_mmio_read(addr + 1) << 8;
regs.vram_addr += regs.vram_incsize;
}
return regs.ppu1_mdr;
}
//CGDATAREAD
//note: CGRAM palette data is 15-bits (0,bbbbb,ggggg,rrrrr)
//therefore, the high byte read from each color does not
//update bit 7 of the PPU2 MDR.
uint8 PPU::mmio_r213b() {
if(!(regs.cgram_addr & 1)) {
regs.ppu2_mdr = cgram_mmio_read(regs.cgram_addr) & 0xff;
} else {
regs.ppu2_mdr &= 0x80;
regs.ppu2_mdr |= cgram_mmio_read(regs.cgram_addr) & 0x7f;
}
regs.cgram_addr++;
regs.cgram_addr &= 0x01ff;
return regs.ppu2_mdr;
}
//OPHCT
uint8 PPU::mmio_r213c() {
if(!regs.latch_hcounter) {
regs.ppu2_mdr = regs.hcounter & 0xff;
} else {
regs.ppu2_mdr &= 0xfe;
regs.ppu2_mdr |= (regs.hcounter >> 8) & 1;
}
regs.latch_hcounter ^= 1;
return regs.ppu2_mdr;
}
//OPVCT
uint8 PPU::mmio_r213d() {
if(!regs.latch_vcounter) {
regs.ppu2_mdr = regs.vcounter & 0xff;
} else {
regs.ppu2_mdr &= 0xfe;
regs.ppu2_mdr |= (regs.vcounter >> 8) & 1;
}
regs.latch_vcounter ^= 1;
return regs.ppu2_mdr;
}
//STAT77
uint8 PPU::mmio_r213e() {
uint8 r = 0x00;
r |= (regs.time_over) ? 0x80 : 0x00;
r |= (regs.range_over) ? 0x40 : 0x00;
r |= (regs.ppu1_mdr & 0x10);
r |= (ppu1_version & 0x0f);
regs.ppu1_mdr = r;
return regs.ppu1_mdr;
}
//STAT78
uint8 PPU::mmio_r213f() {
uint8 r = 0x00;
regs.latch_hcounter = 0;
regs.latch_vcounter = 0;
r |= cpu.field() << 7;
if(!(cpu.pio() & 0x80)) {
r |= 0x40;
} else if(regs.counters_latched == true) {
r |= 0x40;
regs.counters_latched = false;
}
r |= (regs.ppu2_mdr & 0x20);
r |= (region << 4); //0 = NTSC, 1 = PAL
r |= (ppu2_version & 0x0f);
regs.ppu2_mdr = r;
return regs.ppu2_mdr;
}
uint8 PPU::mmio_read(unsigned addr) {
scheduler.sync_cpuppu();
switch(addr & 0xffff) {
case 0x2104:
case 0x2105:
case 0x2106:
case 0x2108:
case 0x2109:
case 0x210a:
case 0x2114:
case 0x2115:
case 0x2116:
case 0x2118:
case 0x2119:
case 0x211a:
case 0x2124:
case 0x2125:
case 0x2126:
case 0x2128:
case 0x2129:
case 0x212a: return regs.ppu1_mdr;
case 0x2134: return mmio_r2134(); //MPYL
case 0x2135: return mmio_r2135(); //MPYM
case 0x2136: return mmio_r2136(); //MPYH
case 0x2137: return mmio_r2137(); //SLHV
case 0x2138: return mmio_r2138(); //OAMDATAREAD
case 0x2139: return mmio_r2139(); //VMDATALREAD
case 0x213a: return mmio_r213a(); //VMDATAHREAD
case 0x213b: return mmio_r213b(); //CGDATAREAD
case 0x213c: return mmio_r213c(); //OPHCT
case 0x213d: return mmio_r213d(); //OPVCT
case 0x213e: return mmio_r213e(); //STAT77
case 0x213f: return mmio_r213f(); //STAT78
}
return cpu.regs.mdr;
}
void PPU::mmio_write(unsigned addr, uint8 data) {
scheduler.sync_cpuppu();
switch(addr & 0xffff) {
case 0x2100: return mmio_w2100(data); //INIDISP
case 0x2101: return mmio_w2101(data); //OBSEL
case 0x2102: return mmio_w2102(data); //OAMADDL
case 0x2103: return mmio_w2103(data); //OAMADDH
case 0x2104: return mmio_w2104(data); //OAMDATA
case 0x2105: return mmio_w2105(data); //BGMODE
case 0x2106: return mmio_w2106(data); //MOSAIC
case 0x2107: return mmio_w2107(data); //BG1SC
case 0x2108: return mmio_w2108(data); //BG2SC
case 0x2109: return mmio_w2109(data); //BG3SC
case 0x210a: return mmio_w210a(data); //BG4SC
case 0x210b: return mmio_w210b(data); //BG12NBA
case 0x210c: return mmio_w210c(data); //BG34NBA
case 0x210d: return mmio_w210d(data); //BG1HOFS
case 0x210e: return mmio_w210e(data); //BG1VOFS
case 0x210f: return mmio_w210f(data); //BG2HOFS
case 0x2110: return mmio_w2110(data); //BG2VOFS
case 0x2111: return mmio_w2111(data); //BG3HOFS
case 0x2112: return mmio_w2112(data); //BG3VOFS
case 0x2113: return mmio_w2113(data); //BG4HOFS
case 0x2114: return mmio_w2114(data); //BG4VOFS
case 0x2115: return mmio_w2115(data); //VMAIN
case 0x2116: return mmio_w2116(data); //VMADDL
case 0x2117: return mmio_w2117(data); //VMADDH
case 0x2118: return mmio_w2118(data); //VMDATAL
case 0x2119: return mmio_w2119(data); //VMDATAH
case 0x211a: return mmio_w211a(data); //M7SEL
case 0x211b: return mmio_w211b(data); //M7A
case 0x211c: return mmio_w211c(data); //M7B
case 0x211d: return mmio_w211d(data); //M7C
case 0x211e: return mmio_w211e(data); //M7D
case 0x211f: return mmio_w211f(data); //M7X
case 0x2120: return mmio_w2120(data); //M7Y
case 0x2121: return mmio_w2121(data); //CGADD
case 0x2122: return mmio_w2122(data); //CGDATA
case 0x2123: return mmio_w2123(data); //W12SEL
case 0x2124: return mmio_w2124(data); //W34SEL
case 0x2125: return mmio_w2125(data); //WOBJSEL
case 0x2126: return mmio_w2126(data); //WH0
case 0x2127: return mmio_w2127(data); //WH1
case 0x2128: return mmio_w2128(data); //WH2
case 0x2129: return mmio_w2129(data); //WH3
case 0x212a: return mmio_w212a(data); //WBGLOG
case 0x212b: return mmio_w212b(data); //WOBJLOG
case 0x212c: return mmio_w212c(data); //TM
case 0x212d: return mmio_w212d(data); //TS
case 0x212e: return mmio_w212e(data); //TMW
case 0x212f: return mmio_w212f(data); //TSW
case 0x2130: return mmio_w2130(data); //CGWSEL
case 0x2131: return mmio_w2131(data); //CGADDSUB
case 0x2132: return mmio_w2132(data); //COLDATA
case 0x2133: return mmio_w2133(data); //SETINI
}
}
#endif
| 9,434 |
528 | <filename>package.json
{
"name": "com.looooong.srp.vxgi",
"version": "0.1.0",
"displayName": "Voxel-based Global Illumination",
"unity": "2019.1",
"dependencies": {
"com.unity.postprocessing": "2.1.7"
},
"keywords": [
"graphics",
"global illumination",
"lighting",
"SRP",
"scriptable render pipeline"
],
"author": {
"name": "<NAME>",
"email": "<EMAIL>"
}
} | 178 |
416 | <reponame>ljz663/tencentcloud-sdk-java<gh_stars>100-1000
/*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.bizlive.v20190313.models;
import com.tencentcloudapi.common.AbstractModel;
import com.google.gson.annotations.SerializedName;
import com.google.gson.annotations.Expose;
import java.util.HashMap;
public class CreateSessionRequest extends AbstractModel{
/**
* 客户端session信息,从JSSDK请求中获得
*/
@SerializedName("ClientSession")
@Expose
private String ClientSession;
/**
* 游戏ID
*/
@SerializedName("GameId")
@Expose
private String GameId;
/**
* 游戏用户ID
*/
@SerializedName("UserId")
@Expose
private String UserId;
/**
* 游戏参数
*/
@SerializedName("GameParas")
@Expose
private String GameParas;
/**
* 游戏区域
*/
@SerializedName("GameRegion")
@Expose
private String GameRegion;
/**
* 背景图url
*/
@SerializedName("ImageUrl")
@Expose
private String ImageUrl;
/**
* 分辨率
*/
@SerializedName("Resolution")
@Expose
private String Resolution;
/**
* Get 客户端session信息,从JSSDK请求中获得
* @return ClientSession 客户端session信息,从JSSDK请求中获得
*/
public String getClientSession() {
return this.ClientSession;
}
/**
* Set 客户端session信息,从JSSDK请求中获得
* @param ClientSession 客户端session信息,从JSSDK请求中获得
*/
public void setClientSession(String ClientSession) {
this.ClientSession = ClientSession;
}
/**
* Get 游戏ID
* @return GameId 游戏ID
*/
public String getGameId() {
return this.GameId;
}
/**
* Set 游戏ID
* @param GameId 游戏ID
*/
public void setGameId(String GameId) {
this.GameId = GameId;
}
/**
* Get 游戏用户ID
* @return UserId 游戏用户ID
*/
public String getUserId() {
return this.UserId;
}
/**
* Set 游戏用户ID
* @param UserId 游戏用户ID
*/
public void setUserId(String UserId) {
this.UserId = UserId;
}
/**
* Get 游戏参数
* @return GameParas 游戏参数
*/
public String getGameParas() {
return this.GameParas;
}
/**
* Set 游戏参数
* @param GameParas 游戏参数
*/
public void setGameParas(String GameParas) {
this.GameParas = GameParas;
}
/**
* Get 游戏区域
* @return GameRegion 游戏区域
*/
public String getGameRegion() {
return this.GameRegion;
}
/**
* Set 游戏区域
* @param GameRegion 游戏区域
*/
public void setGameRegion(String GameRegion) {
this.GameRegion = GameRegion;
}
/**
* Get 背景图url
* @return ImageUrl 背景图url
*/
public String getImageUrl() {
return this.ImageUrl;
}
/**
* Set 背景图url
* @param ImageUrl 背景图url
*/
public void setImageUrl(String ImageUrl) {
this.ImageUrl = ImageUrl;
}
/**
* Get 分辨率
* @return Resolution 分辨率
*/
public String getResolution() {
return this.Resolution;
}
/**
* Set 分辨率
* @param Resolution 分辨率
*/
public void setResolution(String Resolution) {
this.Resolution = Resolution;
}
public CreateSessionRequest() {
}
/**
* NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy,
* and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy.
*/
public CreateSessionRequest(CreateSessionRequest source) {
if (source.ClientSession != null) {
this.ClientSession = new String(source.ClientSession);
}
if (source.GameId != null) {
this.GameId = new String(source.GameId);
}
if (source.UserId != null) {
this.UserId = new String(source.UserId);
}
if (source.GameParas != null) {
this.GameParas = new String(source.GameParas);
}
if (source.GameRegion != null) {
this.GameRegion = new String(source.GameRegion);
}
if (source.ImageUrl != null) {
this.ImageUrl = new String(source.ImageUrl);
}
if (source.Resolution != null) {
this.Resolution = new String(source.Resolution);
}
}
/**
* Internal implementation, normal users should not use it.
*/
public void toMap(HashMap<String, String> map, String prefix) {
this.setParamSimple(map, prefix + "ClientSession", this.ClientSession);
this.setParamSimple(map, prefix + "GameId", this.GameId);
this.setParamSimple(map, prefix + "UserId", this.UserId);
this.setParamSimple(map, prefix + "GameParas", this.GameParas);
this.setParamSimple(map, prefix + "GameRegion", this.GameRegion);
this.setParamSimple(map, prefix + "ImageUrl", this.ImageUrl);
this.setParamSimple(map, prefix + "Resolution", this.Resolution);
}
}
| 2,599 |
310 | <filename>concepts/basics/links.json
[
{
"url": "https://ruby-doc.org/core-2.7.0/Integer.html",
"description": "integers-docs"
}
]
| 64 |
320 | <reponame>yvesjores/AndroidSensorsProgramming
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the Lesser GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jjil.algorithm;
/**
* This is an implementation of stereo matching using dynamic programming.
* Copyright 2008 by <NAME>
* @author webb
*/
public class StereoDynProg {
private int rnLeft[], rnRight[];
public StereoDynProg(int[] rnLeft, int[] rnRight) {
this.rnLeft = rnLeft;
this.rnRight = rnRight;
}
public int[] doMatch() {
// first build an array matching every column of rnLeft with every
// column of rnRight
int[][] rnCost = new int[this.rnLeft.length][this.rnRight.length];
for (int i=0; i<this.rnLeft.length; i++) {
for (int j=0; j<this.rnRight.length; j++) {
rnCost[i][j] = Math.abs(this.rnLeft[i] - this.rnRight[i]);
}
}
// calculate minimal cost at each node
int[][] rnMin = new int[this.rnLeft.length][this.rnRight.length];
// initialize first row and column
for (int i=0; i<rnLeft.length; i++) {
rnMin[i][0] = rnCost[i][0];
}
for (int j=0; j<rnRight.length; j++) {
rnMin[0][j] = rnCost[0][j];
}
// calculate interior of the array
for (int i=1; i<this.rnLeft.length; i++) {
for (int j=1; j<this.rnRight.length; j++) {
rnMin[i][j] = rnCost[i][j] +
Math.min(
Math.min(
rnMin[i-1][j-1], rnMin[i-1][j]),
rnMin[i][j-1]);
}
}
// backtrack from terminal node to get match
int[] rnDepth = new int[rnRight.length];
int nMatchLeft = rnLeft.length-1;
int nMatchRight = rnRight.length - 1;
while (nMatchRight>=0) {
rnDepth[nMatchRight] = nMatchLeft;
// calculate the cost we should match
int nCost = rnMin[nMatchLeft][nMatchRight] -
rnCost[nMatchLeft][nMatchRight];
// check if next node is to the left, up, or up and to the left
if (nMatchRight>0 && nCost == rnMin[nMatchLeft][nMatchRight-1]) {
// nMatchLeft stays the same
nMatchRight--;
} else if (nMatchLeft>0 && nCost == rnMin[nMatchLeft-1][nMatchRight]) {
// nMatchRight stays the same
nMatchLeft--;
} else {
// nMatchRight > 0, nMatchLeft > 0
// nCost == rnMin[nMatchLeft-1][nMatchRight-1]
nMatchRight--;
nMatchLeft--;
}
}
return rnDepth;
}
}
| 1,589 |
392 | /*
* Run-time type information.
* Type info can be obtained using the typeid(...) operator.
*/
#ifndef LANG__TYPES__TYPE_INFO_HH
#define LANG__TYPES__TYPE_INFO_HH
#include <typeinfo>
namespace lang {
namespace types {
/*
* Export std::type_info class.
*/
using std::type_info;
} /* namespace types */
} /* namespace lang */
#endif
| 117 |
988 | <gh_stars>100-1000
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_coarseGus_notM_phase5: C<!M>=A*B, coarse Gustavson, phase5
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, <NAME>, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// phase5: coarse Gustavson task, C<!M>=A*B
//--------------------------------------------------------------------------
// Since the mask is !M:
// Hf [i] < mark : M(i,j)=0, C(i,j) is not yet seen.
// Hf [i] == mark : M(i,j)=1, so C(i,j) is ignored.
// Hf [i] == mark+1 : M(i,j)=0, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_B_j ; // get B(:,j)
#ifndef GB_GENERIC
if (cjnz == cvlen) // C(:,j) is dense
{
// This is not used for the generic saxpy3.
GB_COMPUTE_DENSE_C_j ; // C(:,j) = A*B(:,j)
continue ;
}
#endif
GB_GET_M_j ; // get M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
// scatter M(:,j) into the Gustavson workspace
GB_SCATTER_M_j (pM_start, pM_end, mark) ;
if (16 * cjnz > cvlen)
{
//------------------------------------------------------------------
// C(:,j) is not very sparse
//------------------------------------------------------------------
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
GB_GET_B_kj_INDEX ; // get k of B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
GB_GET_A_ik_INDEX ; // get i of A(i,k)
int64_t hf = Hf [i] ;
if (hf < mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark1 ; // mark as seen
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_HX_WRITE (i, t) ; // Hx [i] = t
}
else if (hf == mark1)
{
// C(i,j) += A(i,k) * B(k,j)
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_UPDATE (i, t) ; // Hx [i] += t
}
}
}
GB_GATHER_ALL_C_j(mark1) ; // gather into C(:,j)
}
else
{
//------------------------------------------------------------------
// C(:,j) is very sparse
//------------------------------------------------------------------
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
GB_GET_B_kj_INDEX ; // get k of B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
GB_GET_A_ik_INDEX ; // get i of A(i,k)
int64_t hf = Hf [i] ;
if (hf < mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark1 ; // mark as seen
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_HX_WRITE (i, t) ; // Hx [i] = t
Ci [pC++] = i ; // create C(:,j) pattern
}
else if (hf == mark1)
{
// C(i,j) += A(i,k) * B(k,j)
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_UPDATE (i, t) ; // Hx [i] += t
}
}
}
GB_SORT_AND_GATHER_C_j ; // gather into C(:,j)
}
}
}
| 2,778 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.MissingResourceException;
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.jar.Attributes;
import java.util.jar.Manifest;
import java.util.logging.Level;
import org.openide.modules.Dependency;
import org.openide.util.NbBundle;
/** Object representing one module, possibly installed.
* Responsible for opening of module JAR file; reading
* manifest; parsing basic information such as dependencies;
* and creating a classloader for use by the installer.
* Methods not defined in ModuleInfo must be called from within
* the module manager's read mutex as a rule.
* @author <NAME>
*/
final class FixedModule extends Module {
/** localized properties, only non-null if requested from disabled module */
private Properties localizedProps;
private final Manifest manifest;
/**
* Create a special-purpose "fixed" JAR which may nonetheless be marked eager or autoload.
* @since 2.7
*/
public FixedModule(ModuleManager mgr, Events ev, Manifest manifest, Object history, ClassLoader classloader, boolean autoload, boolean eager) throws InvalidException {
super(mgr, ev, history, classloader, autoload, eager);
this.manifest = manifest;
loadLocalizedPropsClasspath();
parseManifest();
}
public @Override Manifest getManifest() {
return manifest;
}
/** Get a localized attribute.
* First, if OpenIDE-Module-Localizing-Bundle was given, the specified
* bundle file (in all locale JARs as well as base JAR) is searched for
* a key of the specified name.
* Otherwise, the manifest's main attributes are searched for an attribute
* with the specified name, possibly with a locale suffix.
* If the attribute name contains a slash, and there is a manifest section
* named according to the part before the last slash, then this section's attributes
* are searched instead of the main attributes, and for the attribute listed
* after the slash. Currently this would only be useful for localized filesystem
* names. E.g. you may request the attribute org/foo/MyFileSystem.class/Display-Name.
* In the future certain attributes known to be dangerous could be
* explicitly suppressed from this list; should only be used for
* documented localizable attributes such as OpenIDE-Module-Name etc.
*/
public Object getLocalizedAttribute(String attr) {
String locb = getManifest().getMainAttributes().getValue("OpenIDE-Module-Localizing-Bundle"); // NOI18N
boolean usingLoader = false;
if (locb != null) {
if (classloader != null) {
if (locb.endsWith(".properties")) { // NOI18N
usingLoader = true;
String basename = locb.substring(0, locb.length() - 11).replace('/', '.');
try {
ResourceBundle bundle = NbBundle.getBundle(basename, Locale.getDefault(), classloader);
try {
return bundle.getString(attr);
} catch (MissingResourceException mre) {
// Fine, ignore.
}
} catch (MissingResourceException mre) {
Util.err.log(Level.WARNING, null, mre);
}
} else {
Util.err.warning("cannot efficiently load non-*.properties OpenIDE-Module-Localizing-Bundle: " + locb);
}
}
if (!usingLoader) {
if (localizedProps != null) {
String val = localizedProps.getProperty(attr);
if (val != null) {
return val;
}
}
}
}
// Try in the manifest now.
int idx = attr.lastIndexOf('/'); // NOI18N
if (idx == -1) {
// Simple main attribute.
return NbBundle.getLocalizedValue(getManifest().getMainAttributes(), new Attributes.Name(attr));
} else {
// Attribute of a manifest section.
String section = attr.substring(0, idx);
String realAttr = attr.substring(idx + 1);
Attributes attrs = getManifest().getAttributes(section);
if (attrs != null) {
return NbBundle.getLocalizedValue(attrs, new Attributes.Name(realAttr));
} else {
return null;
}
}
}
public boolean isFixed() {
return true;
}
/** Similar, but for fixed modules only.
* Should be very rarely used: only for classpath modules with a strangely
* named OpenIDE-Module-Localizing-Bundle (not *.properties).
*/
private void loadLocalizedPropsClasspath() throws InvalidException {
Attributes attr = manifest.getMainAttributes();
String locbundle = attr.getValue("OpenIDE-Module-Localizing-Bundle"); // NOI18N
if (locbundle != null) {
Util.err.fine("Localized props in " + locbundle + " for " + attr.getValue("OpenIDE-Module"));
try {
int idx = locbundle.lastIndexOf('.'); // NOI18N
String name, ext;
if (idx == -1) {
name = locbundle;
ext = ""; // NOI18N
} else {
name = locbundle.substring(0, idx);
ext = locbundle.substring(idx);
}
List<String> suffixes = new ArrayList<String>(10);
Iterator<String> it = NbBundle.getLocalizingSuffixes();
while (it.hasNext()) {
suffixes.add(it.next());
}
Collections.reverse(suffixes);
for (String suffix: suffixes) {
String resource = name + suffix + ext;
InputStream is = classloader.getResourceAsStream(resource);
if (is != null) {
Util.err.fine("Found " + resource);
if (localizedProps == null) {
localizedProps = new Properties();
}
localizedProps.load(is);
}
}
if (localizedProps == null) {
throw new IOException("Could not find localizing bundle: " + locbundle); // NOI18N
}
} catch (IOException ioe) {
throw (InvalidException) new InvalidException(ioe.toString()).initCause(ioe);
}
}
}
/** Get all JARs loaded by this module.
* Includes the module itself, any locale variants of the module,
* any extensions specified with Class-Path, any locale variants
* of those extensions.
* The list will be in classpath order (patches first).
* Currently the temp JAR is provided in the case of test modules, to prevent
* sporadic ZIP file exceptions when background threads (like Java parsing) tries
* to open libraries found in the library path.
* JARs already present in the classpath are <em>not</em> listed.
* @return a <code>List<File></code> of JARs
*/
public List<File> getAllJars() {
return Collections.emptyList();
}
/**
* This method can be overriden
* in subclasses in case they want to change the reloadable semantix
* of the fixed modules.
*
* @throws IllegalStateException as FixedModule cannot be reloaded
*/
public void setReloadable(boolean r) {
throw new IllegalStateException();
}
/** Reload this module. Access from ModuleManager.
* If an exception is thrown, the module is considered
* to be in an invalid state.
*
* @throws IllegalStateException as FixedModule cannot be reloaded
*/
public void reload() throws IOException {
throw new IOException("Fixed module cannot be reloaded!"); // NOI18N
}
// Access from ModuleManager:
/** Turn on the classloader. Passed a list of parent modules to use.
* The parents should already have had their classloaders initialized.
*/
protected void classLoaderUp(Set<Module> parents) throws IOException {
return; // no need
}
/** Turn off the classloader and release all resources. */
protected void classLoaderDown() {
return; // don't touch it
}
/** Should be called after turning off the classloader of one or more modules & GC'ing. */
protected void cleanup() {
return; // don't touch it
}
/** Notify the module that it is being deleted. */
protected void destroy() {
}
/** String representation for debugging. */
public @Override String toString() {
String s = "FixedModule:" + getCodeNameBase(); // NOI18N
if (!isValid()) s += "[invalid]"; // NOI18N
return s;
}
@Override
void refineDependencies(Set<Dependency> dependencies) {
}
}
| 4,096 |
5,279 | <reponame>hengfengli/beam<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.spark.structuredstreaming.translation.batch;
import static org.apache.beam.runners.spark.structuredstreaming.Constants.BEAM_SOURCE_OPTION;
import static org.apache.beam.runners.spark.structuredstreaming.Constants.DEFAULT_PARALLELISM;
import static org.apache.beam.runners.spark.structuredstreaming.Constants.PIPELINE_OPTIONS;
import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import org.apache.beam.runners.core.construction.SerializablePipelineOptions;
import org.apache.beam.runners.core.serialization.Base64Serializer;
import org.apache.beam.runners.spark.structuredstreaming.translation.helpers.RowHelpers;
import org.apache.beam.runners.spark.structuredstreaming.translation.helpers.SchemaHelpers;
import org.apache.beam.sdk.io.BoundedSource;
import org.apache.beam.sdk.io.BoundedSource.BoundedReader;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.util.WindowedValue;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.sources.v2.ContinuousReadSupport;
import org.apache.spark.sql.sources.v2.DataSourceOptions;
import org.apache.spark.sql.sources.v2.DataSourceV2;
import org.apache.spark.sql.sources.v2.ReadSupport;
import org.apache.spark.sql.sources.v2.reader.DataSourceReader;
import org.apache.spark.sql.sources.v2.reader.InputPartition;
import org.apache.spark.sql.sources.v2.reader.InputPartitionReader;
import org.apache.spark.sql.types.StructType;
/**
* This is a spark structured streaming {@link DataSourceV2} implementation that wraps an {@link
* BoundedSource}.As Continuous streaming is tagged experimental in spark (no aggregation support +
* no exactly once guaranty), this class does no implement {@link ContinuousReadSupport}.
*/
public class DatasetSourceBatch implements DataSourceV2, ReadSupport {
@Override
public DataSourceReader createReader(DataSourceOptions options) {
return new DatasetReader<>(options);
}
/** This class is mapped to Beam {@link BoundedSource}. */
private static class DatasetReader<T> implements DataSourceReader, Serializable {
private int numPartitions;
private BoundedSource<T> source;
private SerializablePipelineOptions serializablePipelineOptions;
@SuppressWarnings("unchecked")
private DatasetReader(DataSourceOptions options) {
if (!options.get(BEAM_SOURCE_OPTION).isPresent()) {
throw new RuntimeException("Beam source was not set in DataSource options");
}
this.source =
Base64Serializer.deserializeUnchecked(
options.get(BEAM_SOURCE_OPTION).get(), BoundedSource.class);
if (!options.get(DEFAULT_PARALLELISM).isPresent()) {
throw new RuntimeException("Spark default parallelism was not set in DataSource options");
}
this.numPartitions = Integer.parseInt(options.get(DEFAULT_PARALLELISM).get());
checkArgument(numPartitions > 0, "Number of partitions must be greater than zero.");
if (!options.get(PIPELINE_OPTIONS).isPresent()) {
throw new RuntimeException("Beam pipelineOptions were not set in DataSource options");
}
this.serializablePipelineOptions =
new SerializablePipelineOptions(options.get(PIPELINE_OPTIONS).get());
}
@Override
public StructType readSchema() {
// TODO: find a way to extend schema with a WindowedValue schema
return SchemaHelpers.binarySchema();
}
@Override
public List<InputPartition<InternalRow>> planInputPartitions() {
PipelineOptions options = serializablePipelineOptions.get();
List<InputPartition<InternalRow>> result = new ArrayList<>();
long desiredSizeBytes;
try {
desiredSizeBytes = source.getEstimatedSizeBytes(options) / numPartitions;
List<? extends BoundedSource<T>> splits = source.split(desiredSizeBytes, options);
for (BoundedSource<T> split : splits) {
result.add(
(InputPartition<InternalRow>)
() -> new DatasetPartitionReader<>(split, serializablePipelineOptions));
}
return result;
} catch (Exception e) {
throw new RuntimeException(
"Error in splitting BoundedSource " + source.getClass().getCanonicalName(), e);
}
}
}
/** This class can be mapped to Beam {@link BoundedReader}. */
private static class DatasetPartitionReader<T> implements InputPartitionReader<InternalRow> {
private boolean started;
private boolean closed;
private final BoundedSource<T> source;
private BoundedReader<T> reader;
DatasetPartitionReader(
BoundedSource<T> source, SerializablePipelineOptions serializablePipelineOptions) {
this.started = false;
this.closed = false;
this.source = source;
// reader is not serializable so lazy initialize it
try {
reader = source.createReader(serializablePipelineOptions.get().as(PipelineOptions.class));
} catch (IOException e) {
throw new RuntimeException("Error creating BoundedReader ", e);
}
}
@Override
public boolean next() throws IOException {
if (!started) {
started = true;
return reader.start();
} else {
return !closed && reader.advance();
}
}
@Override
public InternalRow get() {
WindowedValue<T> windowedValue =
WindowedValue.timestampedValueInGlobalWindow(
reader.getCurrent(), reader.getCurrentTimestamp());
return RowHelpers.storeWindowedValueInRow(windowedValue, source.getOutputCoder());
}
@Override
public void close() throws IOException {
closed = true;
reader.close();
}
}
}
| 2,253 |
5,169 | <reponame>Gantios/Specs
{
"name": "BsUItextViewPlaceholder",
"version": "0.1.0",
"summary": "BsUItextViewPlaceholder.",
"description": "a placeholder of UITextView",
"homepage": "http://gitlab.yonghui.cn/yhiosmodels/BsUItextViewPlaceholder",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"yangli": "80727655.yonghui.cn"
},
"source": {
"git": "http://gitlab.yonghui.cn/yhiosmodels/BsUItextViewPlaceholder.git",
"tag": "0.1.0"
},
"platforms": {
"ios": "8.0"
},
"source_files": "BsUItextViewPlaceholder/Classes/**/*"
}
| 255 |
402 | <reponame>HalleyYoung/musicautobot
def setup_musescore(musescore_path=None):
if not is_ipython(): return
import platform
from music21 import environment
from pathlib import Path
system = platform.system()
if system == 'Linux':
import os
os.environ['QT_QPA_PLATFORM']='offscreen' # https://musescore.org/en/node/29041
existing_path = environment.get('musicxmlPath')
if existing_path: return
if musescore_path is None:
if system == 'Darwin':
app_paths = list(Path('/Applications').glob('MuseScore *.app'))
if len(app_paths): musescore_path = app_paths[-1]/'Contents/MacOS/mscore'
elif system == 'Linux':
musescore_path = '/usr/bin/musescore'
if musescore_path is None or not Path(musescore_path).exists():
print('Warning: Could not find musescore installation. Please install musescore (see README) and/or update music21 environment paths')
else :
environment.set('musicxmlPath', musescore_path)
environment.set('musescoreDirectPNGPath', musescore_path)
def is_ipython():
try: get_ipython
except: return False
return True
def is_colab():
try: import google.colab
except: return False
return True
def setup_fluidsynth():
from midi2audio import FluidSynth
from IPython.display import Audio
def play_wav(stream):
out_midi = stream.write('midi')
out_wav = str(Path(out_midi).with_suffix('.wav'))
FluidSynth("font.sf2").midi_to_audio(out_midi, out_wav)
return Audio(out_wav)
| 646 |
543 | /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package javax.swing.event;
import java.awt.event.*;
import java.awt.*;
import javax.swing.*;
/**
* An event reported to a child component that originated from an
* ancestor in the component hierarchy.
* <p>
* <strong>Warning:</strong>
* Serialized objects of this class will not be compatible with
* future Swing releases. The current serialization support is
* appropriate for short term storage or RMI between applications running
* the same version of Swing. As of 1.4, support for long term storage
* of all JavaBeans
* has been added to the <code>java.beans</code> package.
* Please see {@link java.beans.XMLEncoder}.
*
* @author <NAME>
*/
@SuppressWarnings("serial")
public class AncestorEvent extends AWTEvent {
/**
* An ancestor-component was added to the hierarchy of
* visible objects (made visible), and is currently being displayed.
*/
public static final int ANCESTOR_ADDED = 1;
/**
* An ancestor-component was removed from the hierarchy
* of visible objects (hidden) and is no longer being displayed.
*/
public static final int ANCESTOR_REMOVED = 2;
/** An ancestor-component changed its position on the screen. */
public static final int ANCESTOR_MOVED = 3;
Container ancestor;
Container ancestorParent;
/**
* Constructs an AncestorEvent object to identify a change
* in an ancestor-component's display-status.
*
* @param source the JComponent that originated the event
* (typically <code>this</code>)
* @param id an int specifying {@link #ANCESTOR_ADDED},
* {@link #ANCESTOR_REMOVED} or {@link #ANCESTOR_MOVED}
* @param ancestor a Container object specifying the ancestor-component
* whose display-status changed
* @param ancestorParent a Container object specifying the ancestor's parent
*/
public AncestorEvent(JComponent source, int id, Container ancestor, Container ancestorParent) {
super(source, id);
this.ancestor = ancestor;
this.ancestorParent = ancestorParent;
}
/**
* Returns the ancestor that the event actually occurred on.
*
* @return the {@code Container} object specifying the ancestor component
*/
public Container getAncestor() {
return ancestor;
}
/**
* Returns the parent of the ancestor the event actually occurred on.
* This is most interesting in an ANCESTOR_REMOVED event, as
* the ancestor may no longer be in the component hierarchy.
*
* @return the {@code Container} object specifying the ancestor's parent
*/
public Container getAncestorParent() {
return ancestorParent;
}
/**
* Returns the component that the listener was added to.
*
* @return the {@code JComponent} on which the event occurred
*/
public JComponent getComponent() {
return (JComponent)getSource();
}
}
| 1,337 |
14,668 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "traceimpl_dependent_scope.h"
namespace blink {
// Template instantiation.
template class Derived<int>;
template class DerivedMissingTrace<int>;
}
| 92 |
14,668 | <gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ios/chrome/browser/browser_state/test_chrome_browser_state_manager.h"
#include <memory>
#include "base/files/file_path.h"
#include "ios/chrome/browser/browser_state/test_chrome_browser_state.h"
#include "ios/web/public/test/web_test.h"
#include "testing/gtest/include/gtest/gtest.h"
using TestChromeBrowserStateManagerTest = web::WebTest;
// Tests that the list of loaded browser states is empty after invoking the
// constructor that accepts a user data directory path.
TEST_F(TestChromeBrowserStateManagerTest, ConstructWithUserDataDirPath) {
TestChromeBrowserStateManager browser_state_manager((base::FilePath()));
EXPECT_EQ(0U, browser_state_manager.GetLoadedBrowserStates().size());
}
// Tests that the list of loaded browser states has one element after invoking
// the constructor that accepts a browser state.
TEST_F(TestChromeBrowserStateManagerTest, ConstructWithBrowserState) {
TestChromeBrowserStateManager browser_state_manager(
TestChromeBrowserState::Builder().Build());
EXPECT_EQ(1U, browser_state_manager.GetLoadedBrowserStates().size());
}
| 374 |
7,482 | /*
* Copyright 2018 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "fsl_puf.h"
#include "fsl_clock.h"
#include "fsl_reset.h"
#include "fsl_common.h"
/* Component ID definition, used by tools. */
#ifndef FSL_COMPONENT_ID
#define FSL_COMPONENT_ID "platform.drivers.puf"
#endif
static void puf_wait_usec(volatile uint32_t usec, uint32_t coreClockFrequencyMHz)
{
while (usec > 0)
{
usec--;
/* number of MHz is directly number of core clocks to wait 1 usec. */
/* the while loop below is actually 4 clocks so divide by 4 for ~1 usec */
register uint32_t ticksCount = coreClockFrequencyMHz / 4u + 1u;
while (ticksCount--)
{
}
}
}
static status_t puf_waitForInit(PUF_Type *base)
{
status_t status = kStatus_Fail;
/* wait until status register reads non-zero. All zero is not valid. It should be BUSY or OK or ERROR */
while (0 == base->STAT)
{
}
/* wait if busy */
while ((base->STAT & PUF_STAT_BUSY_MASK) != 0)
{
}
/* return status */
if (base->STAT & (PUF_STAT_SUCCESS_MASK | PUF_STAT_ERROR_MASK))
{
status = kStatus_Success;
}
return status;
}
static void puf_powerOn(PUF_Type *base)
{
#if defined(FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL) && (FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL > 0)
/* RT6xxs */
base->PWRCTRL = 0x5u;
base->PWRCTRL = 0xDu;
base->PWRCTRL = 0x9u;
#else /* !FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL */
/* Niobe4 & Aruba FL */
base->PWRCTRL = PUF_PWRCTRL_RAMON_MASK;
while (0 == (PUF_PWRCTRL_RAMSTAT_MASK & base->PWRCTRL))
{
}
#endif /* FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL */
}
static status_t puf_powerCycle(PUF_Type *base, uint32_t dischargeTimeMsec, uint32_t coreClockFrequencyHz)
{
#if defined(FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL) && (FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL > 0)
/* RT6xxs */
uint32_t coreClockFrequencyMHz = coreClockFrequencyHz / 1000000u;
base->PWRCTRL = 0xDu; /* disable RAM CK */
/* enter ASPS mode */
base->PWRCTRL = 0xCu; /* SLEEP = 1 */
base->PWRCTRL = 0x8u; /* enable RAM CK */
base->PWRCTRL = 0xF8u; /* SLEEP=1, PSW*=1 */
/* Wait enough time to discharge fully */
puf_wait_usec(dischargeTimeMsec * 1000u, coreClockFrequencyHz / 1000000u);
/* write PWRCTRL=0x38. wait time > 1 us */
base->PWRCTRL = 0x38u; /* SLEEP=1. PSWSMALL*=0. PSWLARGE*=1. */
puf_wait_usec(1, coreClockFrequencyMHz);
/* write PWRCTRL=0x8. wait time > 1 us */
base->PWRCTRL = 0x08u; /* SLEEP=1. PSWSMALL*=0. PSWLARGE*=0 */
puf_wait_usec(1, coreClockFrequencyMHz);
base->PWRCTRL = 0xCu;
base->PWRCTRL = 0xDu;
base->PWRCTRL = 0x9u;
/* Generate INITN low pulse */
base->PWRCTRL = 0xDu;
base->PWRCTRL = 0x5u;
base->PWRCTRL = 0x1u;
#else
/* Niobe4 & Aruba FL */
base->PWRCTRL = 0x0u;
while (PUF_PWRCTRL_RAMSTAT_MASK & base->PWRCTRL)
{
}
/* Wait enough time to discharge fully */
puf_wait_usec(dischargeTimeMsec * 1000u, coreClockFrequencyHz / 1000000u);
#endif
/* Reset PUF and reenable power to PUF SRAM */
RESET_PeripheralReset(kPUF_RST_SHIFT_RSTn);
puf_powerOn(base);
return kStatus_Success;
}
/*!
* brief Initialize PUF
*
* This function enables power to PUF block and waits until the block initializes.
*
* param base PUF peripheral base address
* param dischargeTimeMsec time in ms to wait for PUF SRAM to fully discharge
* param coreClockFrequencyHz core clock frequency in Hz
* return Status of the init operation
*/
status_t PUF_Init(PUF_Type *base, uint32_t dischargeTimeMsec, uint32_t coreClockFrequencyHz)
{
status_t status = kStatus_Fail;
#if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
CLOCK_EnableClock(kCLOCK_Puf);
#endif
/* Reset PUF */
RESET_PeripheralReset(kPUF_RST_SHIFT_RSTn);
/* Enable power to PUF SRAM */
puf_powerOn(base);
/* Wait for peripheral to become ready */
status = puf_waitForInit(base);
/* In case of error or enroll & start not allowed, do power-cycle */
if ((status != kStatus_Success) || ((PUF_ALLOW_ALLOWENROLL_MASK | PUF_ALLOW_ALLOWSTART_MASK) !=
(base->ALLOW & (PUF_ALLOW_ALLOWENROLL_MASK | PUF_ALLOW_ALLOWSTART_MASK))))
{
puf_powerCycle(base, dischargeTimeMsec, coreClockFrequencyHz);
status = puf_waitForInit(base);
}
return status;
}
/*!
* brief Denitialize PUF
*
* This function disables power to PUF SRAM and peripheral clock.
*
* param base PUF peripheral base address
*/
void PUF_Deinit(PUF_Type *base, uint32_t dischargeTimeMsec, uint32_t coreClockFrequencyHz)
{
#if defined(FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL) && (FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL > 0)
/* RT6xxs */
base->PWRCTRL = 0xDu; /* disable RAM CK */
/* enter ASPS mode */
base->PWRCTRL = 0xCu; /* SLEEP = 1 */
base->PWRCTRL = 0x8u; /* enable RAM CK */
base->PWRCTRL = 0xF8u; /* SLEEP=1, PSW*=1 */
#else /* !FSL_FEATURE_PUF_PWR_HAS_MANUAL_SLEEP_CONTROL */
/* Niobe4 & Aruba FL */
base->PWRCTRL = 0x00u;
#endif
/* Wait enough time to discharge fully */
puf_wait_usec(dischargeTimeMsec * 1000u, coreClockFrequencyHz / 1000000u);
RESET_SetPeripheralReset(kPUF_RST_SHIFT_RSTn);
#if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
CLOCK_DisableClock(kCLOCK_Puf);
#endif
}
/*!
* brief Enroll PUF
*
* This function derives a digital fingerprint, generates the corresponding Activation Code (AC)
* and returns it to be stored in an NVM or a file. This step needs to be
* performed only once for each device. This function may be permanently disallowed by a fuse.
*
* param base PUF peripheral base address
* param[out] activationCode Word aligned address of the resulting activation code.
* param activationCodeSize Size of the activationCode buffer in bytes. Shall be 1192 bytes.
* return Status of enroll operation.
*/
status_t PUF_Enroll(PUF_Type *base, uint8_t *activationCode, size_t activationCodeSize)
{
status_t status = kStatus_Fail;
uint32_t *activationCodeAligned = NULL;
register uint32_t temp32 = 0;
/* check that activation code buffer size is at least 1192 bytes */
if (activationCodeSize < PUF_ACTIVATION_CODE_SIZE)
{
return kStatus_InvalidArgument;
}
/* only work with aligned activationCode */
if (0x3u & (uintptr_t)activationCode)
{
return kStatus_InvalidArgument;
}
activationCodeAligned = (uint32_t *)(uintptr_t)activationCode;
/* check if ENROLL is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWENROLL_MASK))
{
return kStatus_Fail;
}
/* begin */
base->CTRL = PUF_CTRL_ENROLL_MASK;
/* check status */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* read out AC */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_CODEOUTAVAIL_MASK & base->STAT))
{
temp32 = base->CODEOUTPUT;
if (activationCodeSize >= sizeof(uint32_t))
{
*activationCodeAligned = temp32;
activationCodeAligned++;
activationCodeSize -= sizeof(uint32_t);
}
}
}
if ((base->STAT & PUF_STAT_SUCCESS_MASK) && (activationCodeSize == 0))
{
status = kStatus_Success;
}
return status;
}
/*!
* brief Start PUF
*
* The Activation Code generated during the Enroll operation is used to
* reconstruct the digital fingerprint. This needs to be done after every power-up
* and reset.
*
* param base PUF peripheral base address
* param activationCode Word aligned address of the input activation code.
* param activationCodeSize Size of the activationCode buffer in bytes. Shall be 1192 bytes.
* return Status of start operation.
*/
status_t PUF_Start(PUF_Type *base, const uint8_t *activationCode, size_t activationCodeSize)
{
status_t status = kStatus_Fail;
const uint32_t *activationCodeAligned = NULL;
register uint32_t temp32 = 0;
/* check that activation code size is at least 1192 bytes */
if (activationCodeSize < 1192)
{
return kStatus_InvalidArgument;
}
/* only work with aligned activationCode */
if (0x3u & (uintptr_t)activationCode)
{
return kStatus_InvalidArgument;
}
activationCodeAligned = (const uint32_t *)(uintptr_t)activationCode;
/* check if START is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWSTART_MASK))
{
return kStatus_Fail;
}
/* begin */
base->CTRL = PUF_CTRL_START_MASK;
/* check status */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* while busy send AC */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_CODEINREQ_MASK & base->STAT))
{
if (activationCodeSize >= sizeof(uint32_t))
{
temp32 = *activationCodeAligned;
activationCodeAligned++;
activationCodeSize -= sizeof(uint32_t);
}
base->CODEINPUT = temp32;
}
}
/* get status */
if (0 != (base->STAT & PUF_STAT_SUCCESS_MASK))
{
status = kStatus_Success;
}
return status;
}
/*!
* brief Set intrinsic key
*
* The digital fingerprint generated during the Enroll/Start
* operations is used to generate a Key Code (KC) that defines a unique intrinsic
* key. This KC is returned to be stored in an NVM or a file. This operation
* needs to be done only once for each intrinsic key.
* Each time a Set Intrinsic Key operation is executed a new unique key is
* generated.
*
* param base PUF peripheral base address
* param keyIndex PUF key index register
* param keySize Size of the intrinsic key to generate in bytes.
* param[out] keyCode Word aligned address of the resulting key code.
* param keyCodeSize Size of the keyCode buffer in bytes. Shall be PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(keySize).
* return Status of set intrinsic key operation.
*/
status_t PUF_SetIntrinsicKey(
PUF_Type *base, puf_key_index_register_t keyIndex, size_t keySize, uint8_t *keyCode, size_t keyCodeSize)
{
status_t status = kStatus_Fail;
uint32_t *keyCodeAligned = NULL;
register uint32_t temp32 = 0;
/* check if SET KEY is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWSETKEY_MASK))
{
return kStatus_Fail;
}
/* only work with aligned keyCode */
if (0x3u & (uintptr_t)keyCode)
{
return kStatus_InvalidArgument;
}
/* Check that keySize is in the correct range and that it is multiple of 8 */
if ((keySize < kPUF_KeySizeMin) || (keySize > kPUF_KeySizeMax) || (keySize & 0x7))
{
return kStatus_InvalidArgument;
}
/* check that keyCodeSize is correct for given keySize */
if (keyCodeSize < PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(keySize))
{
return kStatus_InvalidArgument;
}
if ((uint32_t)keyIndex > kPUF_KeyIndexMax)
{
return kStatus_InvalidArgument;
}
keyCodeAligned = (uint32_t *)(uintptr_t)keyCode;
/* program the key size and index */
base->KEYSIZE = keySize >> 3;
base->KEYINDEX = (uint32_t)keyIndex;
/* begin */
base->CTRL = PUF_CTRL_GENERATEKEY_MASK;
/* wait till command is accepted */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* while busy read KC */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_CODEOUTAVAIL_MASK & base->STAT))
{
temp32 = base->CODEOUTPUT;
if (keyCodeSize >= sizeof(uint32_t))
{
*keyCodeAligned = temp32;
keyCodeAligned++;
keyCodeSize -= sizeof(uint32_t);
}
}
}
/* get status */
if (0 != (base->STAT & PUF_STAT_SUCCESS_MASK))
{
status = kStatus_Success;
}
return status;
}
/*!
* brief Set user key
*
* The digital fingerprint generated during the Enroll/Start
* operations and a user key (UK) provided as input are used to
* generate a Key Code (KC). This KC is sent returned to be stored
* in an NVM or a file. This operation needs to be done only once for each user key.
*
* param base PUF peripheral base address
* param keyIndex PUF key index register
* param userKey Word aligned address of input user key.
* param userKeySize Size of the input user key in bytes.
* param[out] keyCode Word aligned address of the resulting key code.
* param keyCodeSize Size of the keyCode buffer in bytes. Shall be PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(userKeySize).
* return Status of set user key operation.
*/
status_t PUF_SetUserKey(PUF_Type *base,
puf_key_index_register_t keyIndex,
const uint8_t *userKey,
size_t userKeySize,
uint8_t *keyCode,
size_t keyCodeSize)
{
status_t status = kStatus_Fail;
uint32_t *keyCodeAligned = NULL;
const uint32_t *userKeyAligned = NULL;
register uint32_t temp32 = 0;
/* check if SET KEY is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWSETKEY_MASK))
{
return kStatus_Fail;
}
/* only work with aligned keyCode */
if (0x3u & (uintptr_t)keyCode)
{
return kStatus_InvalidArgument;
}
/* Check that userKeySize is in the correct range and that it is multiple of 8 */
if ((userKeySize < kPUF_KeySizeMin) || (userKeySize > kPUF_KeySizeMax) || (userKeySize & 0x7))
{
return kStatus_InvalidArgument;
}
/* check that keyCodeSize is correct for given userKeySize */
if (keyCodeSize < PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(userKeySize))
{
return kStatus_InvalidArgument;
}
if ((uint32_t)keyIndex > kPUF_KeyIndexMax)
{
return kStatus_InvalidArgument;
}
keyCodeAligned = (uint32_t *)(uintptr_t)keyCode;
userKeyAligned = (const uint32_t *)(uintptr_t)userKey;
/* program the key size and index */
base->KEYSIZE = userKeySize >> 3; /* convert to 64-bit blocks */
base->KEYINDEX = (uint32_t)keyIndex;
/* begin */
base->CTRL = PUF_CTRL_SETKEY_MASK;
/* wait till command is accepted */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* while busy write UK and read KC */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_KEYINREQ_MASK & base->STAT))
{
if (userKeySize >= sizeof(uint32_t))
{
temp32 = *userKeyAligned;
userKeyAligned++;
userKeySize -= sizeof(uint32_t);
}
base->KEYINPUT = temp32;
}
if (0 != (PUF_STAT_CODEOUTAVAIL_MASK & base->STAT))
{
temp32 = base->CODEOUTPUT;
if (keyCodeSize >= sizeof(uint32_t))
{
*keyCodeAligned = temp32;
keyCodeAligned++;
keyCodeSize -= sizeof(uint32_t);
}
}
}
/* get status */
if (0 != (base->STAT & PUF_STAT_SUCCESS_MASK))
{
status = kStatus_Success;
}
return status;
}
static status_t puf_getHwKey(PUF_Type *base, const uint8_t *keyCode, size_t keyCodeSize)
{
status_t status = kStatus_Fail;
uint32_t *keyCodeAligned = NULL;
register uint32_t temp32 = 0;
keyCodeAligned = (uint32_t *)(uintptr_t)keyCode;
/* begin */
base->CTRL = PUF_CTRL_GETKEY_MASK;
/* wait till command is accepted */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* while busy send KC, key is reconstructed to HW bus */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_CODEINREQ_MASK & base->STAT))
{
if (keyCodeSize >= sizeof(uint32_t))
{
temp32 = *keyCodeAligned;
keyCodeAligned++;
keyCodeSize -= sizeof(uint32_t);
}
base->CODEINPUT = temp32;
}
}
/* get status */
if (0 != (base->STAT & PUF_STAT_SUCCESS_MASK))
{
status = kStatus_Success;
}
return status;
}
/*!
* brief Reconstruct hw bus key from a key code
*
* The digital fingerprint generated during the Start operation and the KC
* generated during a Set Key operation (Set intrinsic key or Set user key) are used to retrieve a stored key. This
* operation needs to be done every time a key is needed.
* This function accepts only Key Codes created for PUF index register kPUF_KeyIndex_00.
* Such a key is output directly to a dedicated hardware bus. The reconstructed key is not exposed to system memory.
*
* param base PUF peripheral base address
* param keyCode Word aligned address of the input key code.
* param keyCodeSize Size of the keyCode buffer in bytes. Shall be PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(keySize).
* param keySlot key slot to output on hw bus. Parameter is ignored on devices with less than two key slots.
* param keyMask key masking value. Shall be random for each POR/reset. Value does not have to be cryptographicaly
* secure.
* return Status of get key operation.
*/
status_t PUF_GetHwKey(
PUF_Type *base, const uint8_t *keyCode, size_t keyCodeSize, puf_key_slot_t keySlot, uint32_t keyMask)
{
status_t status = kStatus_Fail;
uint32_t keyIndex;
/* check if GET KEY is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWGETKEY_MASK))
{
return kStatus_Fail;
}
/* only work with aligned keyCode */
if (0x3u & (uintptr_t)keyCode)
{
return kStatus_Fail;
}
/* check that keyCodeSize is at least PUF_MIN_KEY_CODE_SIZE */
if (keyCodeSize < PUF_MIN_KEY_CODE_SIZE)
{
return kStatus_InvalidArgument;
}
keyIndex = 0x0Fu & keyCode[1];
/* check the Key Code header byte 1. index must be zero for the hw key. */
if (kPUF_KeyIndex_00 != (puf_key_index_register_t)keyIndex)
{
return kStatus_Fail;
}
#if defined(FSL_FEATURE_PUF_HAS_KEYSLOTS) && (FSL_FEATURE_PUF_HAS_KEYSLOTS > 0)
volatile uint32_t *keyMask_reg = NULL;
uint32_t regVal = (2 << (2 * keySlot));
switch (keySlot)
{
case kPUF_KeySlot0:
keyMask_reg = &base->KEYMASK[0];
break;
case kPUF_KeySlot1:
keyMask_reg = &base->KEYMASK[1];
break;
#if (FSL_FEATURE_PUF_HAS_KEYSLOTS > 2)
case kPUF_KeySlot2:
keyMask_reg = &base->KEYMASK[2];
break;
case kPUF_KeySlot3:
keyMask_reg = &base->KEYMASK[3];
break;
#endif /* FSL_FEATURE_PUF_HAS_KEYSLOTS > 2 */
default:
status = kStatus_InvalidArgument;
break;
}
#endif /* FSL_FEATURE_PUF_HAS_KEYSLOTS */
if (status != kStatus_InvalidArgument)
{
#if defined(FSL_FEATURE_PUF_HAS_KEYSLOTS) && (FSL_FEATURE_PUF_HAS_KEYSLOTS > 0)
base->KEYRESET = regVal;
base->KEYENABLE = regVal;
*keyMask_reg = keyMask;
#endif /* FSL_FEATURE_PUF_HAS_KEYSLOTS */
status = puf_getHwKey(base, keyCode, keyCodeSize);
#if defined(FSL_FEATURE_PUF_HAS_SHIFT_STATUS) && (FSL_FEATURE_PUF_HAS_SHIFT_STATUS > 0)
size_t keyWords = 0;
if (status == kStatus_Success)
{
/* if the corresponding shift count does not match, return fail anyway */
keyWords = ((((size_t)keyCode[3]) * 2) - 1u) << (keySlot << 2);
if (keyWords != ((0x0Fu << (keySlot << 2)) & base->SHIFT_STATUS))
{
status = kStatus_Fail;
}
}
#endif /* FSL_FEATURE_PUF_HAS_SHIFT_STATUS */
}
return status;
}
/*!
* brief Checks if Get Key operation is allowed.
*
* This function returns true if get key operation is allowed.
*
* param base PUF peripheral base address
* return true if get key operation is allowed
*/
bool PUF_IsGetKeyAllowed(PUF_Type *base)
{
/* check if GET KEY is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWGETKEY_MASK))
{
return false;
}
return true;
}
/*!
* brief Reconstruct key from a key code
*
* The digital fingerprint generated during the Start operation and the KC
* generated during a Set Key operation (Set intrinsic key or Set user key) are used to retrieve a stored key. This
* operation needs to be done every time a key is needed.
* This function accepts only Key Codes created for PUF index registers kPUF_KeyIndex_01 to kPUF_KeyIndex_15.
*
* param base PUF peripheral base address
* param keyCode Word aligned address of the input key code.
* param keyCodeSize Size of the keyCode buffer in bytes. Shall be PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(keySize).
* param[out] key Word aligned address of output key.
* param keySize Size of the output key in bytes.
* return Status of get key operation.
*/
status_t PUF_GetKey(PUF_Type *base, const uint8_t *keyCode, size_t keyCodeSize, uint8_t *key, size_t keySize)
{
status_t status = kStatus_Fail;
uint32_t *keyCodeAligned = NULL;
uint32_t *keyAligned = NULL;
uint32_t keyIndex;
register uint32_t temp32 = 0;
/* check if GET KEY is allowed */
if (0x0u == (base->ALLOW & PUF_ALLOW_ALLOWGETKEY_MASK))
{
return kStatus_Fail;
}
/* only work with aligned keyCode */
if (0x3u & (uintptr_t)keyCode)
{
return kStatus_Fail;
}
/* only work with aligned key */
if (0x3u & (uintptr_t)key)
{
return kStatus_Fail;
}
/* check that keyCodeSize is correct for given keySize */
if (keyCodeSize < PUF_GET_KEY_CODE_SIZE_FOR_KEY_SIZE(keySize))
{
return kStatus_InvalidArgument;
}
keyIndex = 0x0Fu & keyCode[1];
/* check the Key Code header byte 1. index must be non-zero for the register key. */
if (kPUF_KeyIndex_00 == (puf_key_index_register_t)keyIndex)
{
return kStatus_Fail;
}
keyCodeAligned = (uint32_t *)(uintptr_t)keyCode;
keyAligned = (uint32_t *)(uintptr_t)key;
/* begin */
base->CTRL = PUF_CTRL_GETKEY_MASK;
/* wait till command is accepted */
while (0 == (base->STAT & (PUF_STAT_BUSY_MASK | PUF_STAT_ERROR_MASK)))
{
}
/* while busy send KC, read key */
while (0 != (base->STAT & PUF_STAT_BUSY_MASK))
{
if (0 != (PUF_STAT_CODEINREQ_MASK & base->STAT))
{
temp32 = 0;
if (keyCodeSize >= sizeof(uint32_t))
{
temp32 = *keyCodeAligned;
keyCodeAligned++;
keyCodeSize -= sizeof(uint32_t);
}
base->CODEINPUT = temp32;
}
if (0 != (PUF_STAT_KEYOUTAVAIL_MASK & base->STAT))
{
keyIndex = base->KEYOUTINDEX;
temp32 = base->KEYOUTPUT;
if (keySize >= sizeof(uint32_t))
{
*keyAligned = temp32;
keyAligned++;
keySize -= sizeof(uint32_t);
}
}
}
/* get status */
if ((keyIndex) && (0 != (base->STAT & PUF_STAT_SUCCESS_MASK)))
{
status = kStatus_Success;
}
return status;
}
/*!
* brief Zeroize PUF
*
* This function clears all PUF internal logic and puts the PUF to error state.
*
* param base PUF peripheral base address
* return Status of the zeroize operation.
*/
status_t PUF_Zeroize(PUF_Type *base)
{
status_t status = kStatus_Fail;
/* zeroize command is always allowed */
base->CTRL = PUF_CTRL_ZEROIZE_MASK;
/* check that command is accepted */
if ((0 != (base->STAT & PUF_STAT_ERROR_MASK)) && (0 == base->ALLOW))
{
status = kStatus_Success;
}
return status;
}
| 10,518 |
561 | <filename>common/tests/thrift_client_pool_test.cpp
/// Copyright 2016 Pinterest Inc.
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
//
// @author bol (<EMAIL>)
//
#include <atomic>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <tuple>
#include <vector>
#include "gtest/gtest.h"
#include "common/tests/thrift/gen-cpp2/DummyService.h"
#include "common/thrift_client_pool.h"
#include "thrift/lib/cpp2/server/ThriftServer.h"
using apache::thrift::HandlerCallback;
using apache::thrift::ThriftServer;
using apache::thrift::transport::TTransportException;
using common::ThriftClientPool;
using folly::Future;
using folly::Try;
using folly::Unit;
using dummy_service::thrift::DummyServiceAsyncClient;
using dummy_service::thrift::DummyServiceErrorCode;
using dummy_service::thrift::DummyServiceException;
using dummy_service::thrift::DummyServiceSvIf;
using apache::thrift::RpcOptions;
using std::atomic;
using std::chrono::milliseconds;
using std::make_shared;
using std::make_tuple;
using std::make_unique;
using std::move;
using std::shared_ptr;
using std::thread;
using std::this_thread::sleep_for;
using std::string;
using std::tie;
using std::tuple;
using std::unique_ptr;
using std::vector;
static const char* gLocalIp = "127.0.0.1";
static const uint16_t gPort = 9090;
class DummyServiceTestHandler : public DummyServiceSvIf {
public:
explicit DummyServiceTestHandler(const uint32_t delayMs) :
delayMs_(delayMs), nPings_(0) {
}
void async_tm_ping(unique_ptr<HandlerCallback<void>> callback)
override {
++nPings_;
auto delayMs = delayMs_.load();
if (delayMs > 0) {
sleep_for(milliseconds(delayMs));
}
callback->done();
}
void async_tm_getSomething(
unique_ptr<HandlerCallback<int64_t>> callback,
int64_t input) override {
DummyServiceException e;
e.errorCode = DummyServiceErrorCode::DUMMY_ERROR;
e.message = "Intended exception";
callback.release()->exceptionInThread(e);
}
atomic<uint32_t> delayMs_;
atomic<uint32_t> nPings_;
};
tuple<shared_ptr<DummyServiceTestHandler>,
shared_ptr<ThriftServer>,
unique_ptr<thread>>
makeServer(uint16_t port, uint32_t delayMs) {
auto handler = make_shared<DummyServiceTestHandler>(delayMs);
auto server = make_shared<ThriftServer>();
server->setPort(port);
server->setInterface(handler);
auto t = make_unique<thread>([server, port] {
LOG(INFO) << "Start server on port " << port;
server->serve();
LOG(INFO) << "Exit server on port " << port;
});
return make_tuple(handler, server, move(t));
}
void testBasics(ThriftClientPool<DummyServiceAsyncClient>* pool) {
const std::atomic<bool>* is_good;
auto client = pool->getClient(gLocalIp, gPort, 0, &is_good);
EXPECT_TRUE(client != nullptr);
sleep_for(milliseconds(100));
EXPECT_FALSE(is_good->load());
// Server is not available
EXPECT_THROW(client->future_ping().get(), TTransportException);
EXPECT_THROW(client->future_ping().get(), TTransportException);
// re-get client, server is still not available
client = pool->getClient(gLocalIp, gPort, 0, &is_good);
sleep_for(milliseconds(100));
EXPECT_TRUE(client != nullptr);
EXPECT_THROW(client->future_ping().get(), TTransportException);
EXPECT_FALSE(is_good->load());
// start the server
shared_ptr<DummyServiceTestHandler> handler;
shared_ptr<ThriftServer> server;
unique_ptr<thread> thr;
tie(handler, server, thr) = makeServer(gPort, 0);
sleep(1);
// The old connection should not work
EXPECT_THROW(client->future_ping().get(), TTransportException);
EXPECT_FALSE(is_good->load());
EXPECT_EQ(handler->nPings_.load(), 0);
// create a new connection, and it should work now
client = pool->getClient(gLocalIp, gPort, 0, &is_good);
sleep_for(milliseconds(100));
EXPECT_TRUE(client != nullptr);
EXPECT_TRUE(is_good->load());
EXPECT_NO_THROW(client->future_ping().get());
EXPECT_EQ(handler->nPings_.load(), 1);
// create a new connection again, and it should work
client = pool->getClient(gLocalIp, gPort, 0, &is_good);
sleep_for(milliseconds(100));
EXPECT_TRUE(client != nullptr);
EXPECT_TRUE(is_good->load());
EXPECT_NO_THROW(client->future_ping().get());
EXPECT_EQ(handler->nPings_.load(), 2);
EXPECT_TRUE(is_good->load());
// test exception
EXPECT_THROW(client->future_getSomething(1).get(),
DummyServiceException);
// client is still good after an exception
EXPECT_NO_THROW(client->future_ping().get());
EXPECT_TRUE(is_good->load());
EXPECT_EQ(handler->nPings_.load(), 3);
// 200 ms delay
handler->delayMs_.store(200);
// no timeout
EXPECT_NO_THROW(client->future_ping().get());
// 100 ms timeout
{
RpcOptions options;
options.setTimeout(milliseconds(100));
EXPECT_THROW(client->future_ping(options).get(), TTransportException);
EXPECT_TRUE(is_good->load());
}
// client is good
EXPECT_NO_THROW(client->future_ping().get());
EXPECT_TRUE(is_good->load());
// 300 ms timeout
{
RpcOptions options;
options.setTimeout(milliseconds(300));
EXPECT_NO_THROW(client->future_ping(options).get());
EXPECT_TRUE(is_good->load());
EXPECT_EQ(handler->nPings_.load(), 7);
}
// stop the server
server->stop();
sleep(1);
// exception
{
RpcOptions options;
options.setTimeout(milliseconds(300));
EXPECT_FALSE(is_good->load());
EXPECT_THROW(client->future_ping(options).get(), TTransportException);
EXPECT_THROW(client->future_ping().get(), TTransportException);
EXPECT_FALSE(is_good->load());
EXPECT_EQ(handler->nPings_.load(), 7);
}
// create a new client, and it should not work
client = pool->getClient(gLocalIp, gPort, 0, &is_good);
sleep_for(milliseconds(100));
EXPECT_THROW(client->future_ping().get(), TTransportException);
EXPECT_FALSE(is_good->load());
thr->join();
}
TEST(ThriftClientTest, Basics) {
ThriftClientPool<DummyServiceAsyncClient> pool_default;
testBasics(&pool_default);
auto pool_default_shared_1 =
pool_default.shareIOThreads<DummyServiceAsyncClient>();
testBasics(pool_default_shared_1.get());
auto pool_default_shared_2 =
pool_default_shared_1->shareIOThreads<DummyServiceAsyncClient>();
testBasics(pool_default_shared_2.get());
ThriftClientPool<DummyServiceAsyncClient> pool_1(1);
testBasics(&pool_1);
ThriftClientPool<DummyServiceAsyncClient> pool_100(100);
testBasics(&pool_100);
}
void stressTest(uint32_t nThreads, uint32_t nCalls, uint32_t nBatchSz,
ThriftClientPool<DummyServiceAsyncClient>* pool) {
LOG(INFO) << nThreads << " nThreads; "
<< nCalls << " nCalls; "
<< nBatchSz << " nBatchSz";
vector<thread> threads(nThreads);
auto shared_pool = pool->shareIOThreads<DummyServiceAsyncClient>();
ThriftClientPool<DummyServiceAsyncClient>* selected_pool = nullptr;
for (uint32_t i = 0 ; i < nThreads; ++i) {
selected_pool = i % 2 == 1 ? pool : shared_pool.get();
threads[i] = thread([nCalls, nBatchSz, pool = selected_pool] () {
auto client = pool->getClient(gLocalIp, gPort);
EXPECT_TRUE(client != nullptr);
auto nRounds = nCalls / nBatchSz;
for (uint32_t j = 0; j < nRounds; ++j) {
vector<Future<Unit>> v;
for (uint32_t k = 0; k < nBatchSz; ++k) {
v.push_back(client->future_ping());
}
for (auto& f : v) {
EXPECT_NO_THROW(f.get());
}
}
});
}
for (auto& thr : threads) {
thr.join();
}
}
TEST(ThriftClientTest, Stress) {
shared_ptr<DummyServiceTestHandler> handler;
shared_ptr<ThriftServer> server;
unique_ptr<thread> thr;
tie(handler, server, thr) = makeServer(gPort, 0);
sleep(1);
ThriftClientPool<DummyServiceAsyncClient> pool_1(1);
stressTest(100, 1000, 1, &pool_1);
stressTest(100, 1000, 100, &pool_1);
stressTest(100, 1000, 500, &pool_1);
ThriftClientPool<DummyServiceAsyncClient> pool_100(100);
stressTest(100, 1000, 1, &pool_100);
stressTest(100, 1000, 100, &pool_100);
stressTest(100, 1000, 500, &pool_100);
EXPECT_EQ(handler->nPings_.load(), 100 * 1000 * 6);
server->stop();
thr->join();
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
FLAGS_channel_cleanup_min_interval_seconds = -1;
return RUN_ALL_TESTS();
}
| 3,327 |
757 | <reponame>leoriohope/RandWireNN
import torch
import torch.nn as nn
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding,
dilation, groups=in_channels, bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x | 319 |
426 | package com.richardradics.commons.widget.echeckbox;
import android.content.Context;
import android.graphics.Typeface;
import android.os.Build;
import android.util.TypedValue;
import android.view.Gravity;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.CompoundButton;
import com.richardradics.commons.BuildConfig;
import com.richardradics.commons.util.ResourceUtil;
import com.richardradics.commons.util.ViewUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Created by radicsrichard on 15. 04. 15..
*/
public class CheckboxAdapter extends ArrayAdapter<String> {
private static int CB_INSTANCES = 312;
List<String> values;
Map<String, Boolean> valueMap = new HashMap<String, Boolean>();
Map<String, EnhancedCheckbox> viewMap = new HashMap<String, EnhancedCheckbox>();
Context context;
Integer checkBoxButtonRes;
Integer itemHeight;
int mSelectColor;
int mUnSelectColor;
float mTextSize;
EnhancedCheckbox allSelectCheckbox;
boolean isAllSelected = false;
Typeface mTypeFace;
public Typeface getCustomFont() {
return mTypeFace;
}
public void setCustomFont(Typeface mTypeFace) {
this.mTypeFace = mTypeFace;
}
private OnSelectionListener mListener;
public OnSelectionListener getSelectionListener() {
return mListener;
}
public void setSelectionListener(OnSelectionListener mListener) {
this.mListener = mListener;
}
public void setTextSize(float textSize) {
this.mTextSize = textSize;
}
public int getSelectColor() {
return mSelectColor;
}
public void setSelectColor(int mSelectColor) {
this.mSelectColor = mSelectColor;
}
public int getUnSelectColor() {
return mUnSelectColor;
}
public void setUnSelectColor(int mUnSelectColor) {
this.mUnSelectColor = mUnSelectColor;
}
public CheckboxAdapter(Context context, int resource, List<String> objects) {
super(context, resource, objects);
this.context = context;
mSelectColor = context.getResources().getColor(android.R.color.black);
mUnSelectColor = context.getResources().getColor(android.R.color.darker_gray);
mTextSize = 13;
checkBoxButtonRes = ResourceUtil.getAndroidDrawableId("btn_check");
this.values = new ArrayList<String>();
this.values.addAll(objects);
valueMap.clear();
viewMap.clear();
for (String s : values) {
valueMap.put(s, false);
}
}
public void setItemHeight(int itemHeight) {
this.itemHeight = itemHeight;
}
public void setCheckBoxButtonRes(int buttonRes) {
this.checkBoxButtonRes = buttonRes;
}
public void setAllSelectCheckbox(EnhancedCheckbox enhancedCheckbox) {
this.allSelectCheckbox = enhancedCheckbox;
}
@Override
public long getItemId(int position) {
return position + 1;
}
@Override
public String getItem(int position) {
return values.get(position);
}
@Override
public int getPosition(String item) {
return values.indexOf(item);
}
@Override
public int getCount() {
return values.size();
}
@Override
public void add(String object) {
values.add(object);
}
@Override
public void addAll(Collection<? extends String> collection) {
values.addAll(collection);
for (String s : values) {
if (!valueMap.containsKey(s)) {
valueMap.put(s, false);
}
}
notifyDataSetChanged();
}
@Override
public void remove(String object) {
values.remove(object);
}
@Override
public void clear() {
values.clear();
}
public boolean isSelectedAll() {
if (valueMap.isEmpty()) {
return false;
}
boolean isselected = true;
for (Map.Entry<String, Boolean> e : valueMap.entrySet()) {
if (!e.getValue()) {
isselected = false;
}
}
return isselected;
}
public void checkAll() {
valueMap.clear();
for (String s : values) {
valueMap.put(s, true);
}
if (allSelectCheckbox != null) {
allSelectCheckbox.setCheckedProgrammatically(true);
}
isAllSelected = true;
notifyDataSetChanged();
}
public void unCheckAll() {
valueMap.clear();
for (String s : values) {
valueMap.put(s, false);
}
if (allSelectCheckbox != null) {
allSelectCheckbox.setCheckedProgrammatically(false);
}
isAllSelected = false;
notifyDataSetChanged();
}
public void setSelectedItems(List<String> items) {
for (String s : values) {
valueMap.put(s, false);
}
for (String s : items) {
valueMap.put(s, true);
}
notifyDataSetChanged();
}
public List<String> getSelectedItems() {
List<String> selected = new ArrayList<String>();
for (Map.Entry<String, Boolean> e : valueMap.entrySet()) {
if (e.getValue()) {
selected.add(e.getKey());
}
}
return selected;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
String current = values.get(position);
EnhancedCheckbox enhancedCheckbox;
if (!viewMap.containsKey(current)) {
enhancedCheckbox = new EnhancedCheckbox(context);
enhancedCheckbox.setId(CB_INSTANCES++);
if (itemHeight != null) {
enhancedCheckbox.setHeight(ViewUtils.convertToPix(context.getResources().getDisplayMetrics().density, itemHeight));
}
enhancedCheckbox.setText(current);
if (mTypeFace != null) {
enhancedCheckbox.setTypeface(mTypeFace);
}
enhancedCheckbox.setGravity(Gravity.START);
enhancedCheckbox.setTextSize(TypedValue.COMPLEX_UNIT_SP, mTextSize);
enhancedCheckbox.setTag(current);
if (checkBoxButtonRes != null) {
enhancedCheckbox.setButtonDrawable(context.getResources().getDrawable(checkBoxButtonRes));
// if (Build.VERSION.SDK_INT >= 17) {
final float scale = context.getResources().getDisplayMetrics().density;
enhancedCheckbox.setPadding(enhancedCheckbox.getCompoundPaddingLeft(), //+ (int) (10.0f * scale + 0.5f),
0,
enhancedCheckbox.getPaddingRight(),
enhancedCheckbox.getPaddingBottom());
// }
}
enhancedCheckbox.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
if (allSelectCheckbox != null) {
if (isAllSelected) {
isAllSelected = false;
allSelectCheckbox.setCheckedProgrammatically(false);
allSelectCheckbox.setTextColor(context.getResources().getColor(android.R.color.darker_gray));
}
}
String s = (String) buttonView.getTag();
valueMap.put(s, isChecked);
if (mListener != null) {
mListener.onSelectionChange(s, isChecked, false);
}
if (isChecked) {
if (allSelectCheckbox != null) {
if (isSelectedAll()) {
allSelectCheckbox.setTextColor(context.getResources().getColor(android.R.color.black));
allSelectCheckbox.setCheckedProgrammatically(true);
isAllSelected = true;
} else {
}
}
} else {
}
notifyDataSetChanged();
}
});
viewMap.put(current, enhancedCheckbox);
} else {
enhancedCheckbox = viewMap.get(current);
}
enhancedCheckbox.setCheckedProgrammatically(valueMap.get(current));
if (enhancedCheckbox.isChecked()) {
//TODO szinek behozasa valahogy rendesen
enhancedCheckbox.setTextColor(context.getResources().getColor(android.R.color.black));
if (isSelectedAll()) {
allSelectCheckbox.setTextColor(context.getResources().getColor(android.R.color.black));
allSelectCheckbox.setChecked(true);
isAllSelected = true;
}
} else {
enhancedCheckbox.setTextColor(context.getResources().getColor(android.R.color.darker_gray));
}
enhancedCheckbox.invalidate();
enhancedCheckbox.refreshDrawableState();
return enhancedCheckbox;
}
}
| 4,270 |
1,127 | // Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/builders.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
using RollCPUTestParams = typename std::tuple<
InputShape, // Input shape
ov::element::Type, // Input precision
std::vector<int64_t>, // Shift
std::vector<int64_t>, // Axes
std::string>; // Device name
class RollLayerCPUTest : public testing::WithParamInterface<RollCPUTestParams>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<RollCPUTestParams> obj) {
InputShape inputShape;
ov::element::Type inputPrecision;
std::vector<int64_t> shift;
std::vector<int64_t> axes;
std::string targetDevice;
std::tie(inputShape, inputPrecision, shift, axes, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::partialShape2str({inputShape.first}) << "_";
result << "TS=";
for (const auto& item : inputShape.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
result << "Precision=" << inputPrecision.get_type_name() << "_";
result << "Shift=" << CommonTestUtils::vec2str(shift) << "_";
result << "Axes=" << CommonTestUtils::vec2str(axes) << "_";
result << "TargetDevice=" << targetDevice;
return result.str();
}
protected:
void SetUp() override {
InputShape inputShape;
ov::element::Type inputPrecision;
std::vector<int64_t> shift;
std::vector<int64_t> axes;
std::tie(inputShape, inputPrecision, shift, axes, targetDevice) = GetParam();
init_input_shapes({inputShape});
const auto paramsIn = ngraph::builder::makeDynamicParams(inputPrecision, inputDynamicShapes);
auto shiftNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0);
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
const auto paramsOut = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
const auto roll = std::dynamic_pointer_cast<ngraph::op::v7::Roll>(ngraph::builder::makeRoll(paramsOut[0], shiftNode, axesNode));
const ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(roll)};
function = std::make_shared<ngraph::Function>(results, paramsIn, "roll");
}
};
TEST_P(RollLayerCPUTest, CompareWithRefs) {
run();
}
namespace {
const std::vector<ov::element::Type> inputPrecisions = {
ov::element::i8,
ov::element::u8,
ov::element::i16,
ov::element::i32,
ov::element::f32,
ov::element::bf16
};
const std::vector<InputShape> data2DZeroShiftShapes = {{{}, {{17, 19}}}, {{-1, -1}, {{5, 17}, {10, 20}}}};
const std::vector<InputShape> data1DShapes = {{{}, {{12}}}, {{-1}, {{10}, {20}}}};
const std::vector<InputShape> data2DShapes = {{{}, {{100, 200}}}, {{{100, 500}, 450}, {{250, 450}, {120, 450}}}};
const std::vector<InputShape> data3DShapes = {{{}, {{2, 300, 320}}},
{{2, {100, 500}, -1}, {{2, 320, 420}, {2, 500, 200}}}};
const std::vector<InputShape> data4DNegativeAxesShapes = {{{}, {{3, 11, 6, 4}}},
{{-1, -1, {5, 6}, -1}, {{5, 10, 6, 15}, {10, 20, 5, 7}}}};
const std::vector<InputShape> data5DRepeatingAxesNegativeShiftShapes = {{{}, {{2, 7, 32, 32, 5}}},
{{2, -1, -1, -1, {2, 7}}, {{2, 5, 20, 17, 3}, {2, 10, 18, 40, 7}}}};
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_2DZeroShift, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data2DZeroShiftShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{0, 0}), // Shift
::testing::Values(std::vector<int64_t>{0, 1}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_1D, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data1DShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{5}), // Shift
::testing::Values(std::vector<int64_t>{0}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_2D, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data2DShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{50, 150}), // Shift
::testing::Values(std::vector<int64_t>{0, 1}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_3D, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data3DShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{160, 150}), // Shift
::testing::Values(std::vector<int64_t>{1, 2}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_4DNegativeAxes, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data4DNegativeAxesShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{7, 3}), // Shift
::testing::Values(std::vector<int64_t>{-3, -2}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_5DRepeatingAxesNegativeShift, RollLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(data5DRepeatingAxesNegativeShiftShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(std::vector<int64_t>{4, -1, 7, 2, -5}), // Shift
::testing::Values(std::vector<int64_t>{-1, 0, 3, 3, 2}), // Axes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RollLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions
| 3,753 |
3,651 | package com.orientechnologies.orient.client.remote.message;
import com.orientechnologies.orient.client.binary.OBinaryRequestExecutor;
import com.orientechnologies.orient.client.remote.OBinaryRequest;
import com.orientechnologies.orient.client.remote.OBinaryResponse;
import com.orientechnologies.orient.client.remote.OStorageRemoteSession;
import com.orientechnologies.orient.core.serialization.serializer.record.ORecordSerializer;
import com.orientechnologies.orient.enterprise.channel.binary.OChannelBinaryProtocol;
import com.orientechnologies.orient.enterprise.channel.binary.OChannelDataInput;
import com.orientechnologies.orient.enterprise.channel.binary.OChannelDataOutput;
import java.io.IOException;
/** Created by tglman on 30/12/16. */
public class OFetchTransactionRequest implements OBinaryRequest<OFetchTransactionResponse> {
private int txId;
public OFetchTransactionRequest() {}
public OFetchTransactionRequest(int txId) {
this.txId = txId;
}
@Override
public void write(OChannelDataOutput network, OStorageRemoteSession session) throws IOException {
network.writeInt(txId);
}
@Override
public void read(OChannelDataInput channel, int protocolVersion, ORecordSerializer serializer)
throws IOException {
this.txId = channel.readInt();
}
@Override
public byte getCommand() {
return OChannelBinaryProtocol.REQUEST_TX_FETCH;
}
@Override
public OFetchTransactionResponse createResponse() {
return new OFetchTransactionResponse();
}
@Override
public OBinaryResponse execute(OBinaryRequestExecutor executor) {
return executor.executeFetchTransaction(this);
}
@Override
public String getDescription() {
return "Fetch Transaction";
}
public int getTxId() {
return txId;
}
}
| 529 |
816 | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Install source code for the Tapas paper."""
from distutils import spawn
import glob
import os
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
def find_protoc():
"""Find the Protocol Compiler."""
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
return os.environ["PROTOC"]
elif os.path.exists("../src/protoc"):
return "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
return "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
return "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
return "../vsprojects/Release/protoc.exe"
else:
return spawn.find_executable("protoc")
def needs_update(source, target):
"""Returns wheter target file is old or does not exist."""
if not os.path.exists(target):
return True
if not os.path.exists(source):
return False
return os.path.getmtime(source) > os.path.getmtime(target)
def fail(message):
"""Write message to stderr and finish."""
sys.stderr.write(message + "\n")
sys.exit(-1)
def generate_proto(protoc, source):
"""Invokes the Protocol Compiler to generate a _pb2.py."""
target = source.replace(".proto", "_pb2.py")
if needs_update(source, target):
print(f"Generating {target}...")
if not os.path.exists(source):
fail(f"Cannot find required file: {source}")
if protoc is None:
fail("protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.")
protoc_command = [protoc, "-I.", "--python_out=.", source]
if subprocess.call(protoc_command) != 0:
fail(f"Command fail: {' '.join(protoc_command)}")
def prepare():
"""Find all proto files and generate the pb2 ones."""
proto_file_patterns = ["./tapas/protos/*.proto"]
protoc = find_protoc()
for file_pattern in proto_file_patterns:
for proto_file in glob.glob(file_pattern, recursive=True):
generate_proto(protoc, proto_file)
def read(fname):
return open(
os.path.join(os.path.dirname(__file__), fname), encoding="utf-8").read()
prepare()
setup(
name="tapas-table-parsing",
version="0.0.1.dev",
packages=find_packages(),
description="Tapas: Table-based Question Answering.",
long_description_content_type="text/markdown",
long_description=read("README.md"),
author="Google Inc.",
url="https://github.com/google-research/tapas",
license="Apache 2.0",
install_requires=read("requirements.txt").strip().split("\n"))
| 1,116 |
1,444 | <filename>Mage.Sets/src/mage/cards/b/BackFromTheBrink.java
package mage.cards.b;
import java.util.UUID;
import mage.abilities.Ability;
import mage.abilities.common.ActivateAsSorceryActivatedAbility;
import mage.abilities.costs.Cost;
import mage.abilities.costs.CostImpl;
import mage.abilities.effects.Effect;
import mage.abilities.effects.common.CreateTokenCopyTargetEffect;
import mage.cards.Card;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.Outcome;
import mage.constants.Zone;
import mage.filter.StaticFilters;
import mage.game.Game;
import mage.players.Player;
import mage.target.Target;
import mage.target.common.TargetCardInYourGraveyard;
import mage.target.targetpointer.FixedTarget;
/**
*
* @author BetaSteward
*/
public final class BackFromTheBrink extends CardImpl {
public BackFromTheBrink(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.ENCHANTMENT}, "{4}{U}{U}");
// Exile a creature card from your graveyard and pay its mana cost: Create a token that's a copy of that card. Activate this ability only any time you could cast a sorcery.
Effect effect = new CreateTokenCopyTargetEffect();
effect.setText("create a token that's a copy of that card");
this.addAbility(new ActivateAsSorceryActivatedAbility(Zone.BATTLEFIELD, effect, new BackFromTheBrinkCost()));
}
private BackFromTheBrink(final BackFromTheBrink card) {
super(card);
}
@Override
public BackFromTheBrink copy() {
return new BackFromTheBrink(this);
}
}
class BackFromTheBrinkCost extends CostImpl {
public BackFromTheBrinkCost() {
Target target = new TargetCardInYourGraveyard(StaticFilters.FILTER_CARD_CREATURE_YOUR_GRAVEYARD);
target.setNotTarget(true);
this.addTarget(target);
this.text = "Exile a creature card from your graveyard and pay its mana cost";
}
public BackFromTheBrinkCost(final BackFromTheBrinkCost cost) {
super(cost);
}
@Override
public BackFromTheBrinkCost copy() {
return new BackFromTheBrinkCost(this);
}
@Override
public boolean canPay(Ability ability, Ability source, UUID controllerId, Game game) {
return targets.canChoose(source.getSourceId(), controllerId, game);
}
@Override
public boolean pay(Ability ability, Game game, Ability source, UUID controllerId, boolean noMana, Cost costToPay) {
if (targets.choose(Outcome.Exile, controllerId, source.getSourceId(), game)) {
Player controller = game.getPlayer(controllerId);
if (controller != null) {
Card card = controller.getGraveyard().get(targets.getFirstTarget(), game);
if (card != null && controller.moveCards(card, Zone.EXILED, ability, game)) {
ability.getEffects().get(0).setTargetPointer(new FixedTarget(card.getId(), game.getState().getZoneChangeCounter(card.getId())));
paid = card.getManaCost().pay(ability, game, source, controllerId, noMana);
}
}
}
return paid;
}
}
| 1,165 |
778 | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: <NAME>
//
//
// Project includes
#include "testing/testing.h"
#include "containers/model.h"
#include "includes/checks.h"
#include "utilities/divide_triangle_2d_3.h"
namespace Kratos
{
namespace Testing
{
KRATOS_TEST_CASE_IN_SUITE(DivideGeometryTriangle2D3Horizontal, KratosCoreFastSuite)
{
Model current_model;
// Generate a model part with the previous
ModelPart& base_model_part = current_model.CreateModelPart("Triangle");
base_model_part.AddNodalSolutionStepVariable(DISTANCE);
// Fill the model part geometry data
base_model_part.CreateNewNode(1, 0.0, 0.0, 0.0);
base_model_part.CreateNewNode(2, 1.0, 0.0, 0.0);
base_model_part.CreateNewNode(3, 0.0, 1.0, 0.0);
Properties::Pointer p_properties(new Properties(0));
base_model_part.CreateNewElement("Element2D3N", 1, {1, 2, 3}, p_properties);
// Set the DISTANCE field
base_model_part.Nodes()[1].FastGetSolutionStepValue(DISTANCE) = -1.0;
base_model_part.Nodes()[2].FastGetSolutionStepValue(DISTANCE) = -1.0;
base_model_part.Nodes()[3].FastGetSolutionStepValue(DISTANCE) = 1.0;
// Set the elemental distances vector
Geometry < Node < 3 > >& r_geometry = base_model_part.Elements()[1].GetGeometry();
array_1d<double, 3> distances_vector;
for (unsigned int i = 0; i < r_geometry.size(); ++i) {
distances_vector(i) = r_geometry[i].FastGetSolutionStepValue(DISTANCE);
}
base_model_part.Elements()[1].SetValue(ELEMENTAL_DISTANCES, distances_vector);
Vector& r_elemental_distances = base_model_part.Elements()[1].GetValue(ELEMENTAL_DISTANCES);
// Build the triangle splitting utility
DivideTriangle2D3 triangle_splitter(r_geometry, r_elemental_distances);
// Call the divide geometry method
triangle_splitter.GenerateDivision();
// Call the intersection generation method
triangle_splitter.GenerateIntersectionsSkin();
// Call the positive exterior faces generation method
std::vector < unsigned int > pos_ext_faces_parent_ids;
std::vector < DivideTriangle2D3::IndexedPointGeometryPointerType > pos_ext_faces;
triangle_splitter.GenerateExteriorFaces(
pos_ext_faces,
pos_ext_faces_parent_ids,
triangle_splitter.GetPositiveSubdivisions());
// Call the negative exterior faces generation method
std::vector < unsigned int > neg_ext_faces_parent_ids;
std::vector < DivideTriangle2D3::IndexedPointGeometryPointerType > neg_ext_faces;
triangle_splitter.GenerateExteriorFaces(
neg_ext_faces,
neg_ext_faces_parent_ids,
triangle_splitter.GetNegativeSubdivisions());
const double tolerance = 1e-10;
// Check general splitting values
KRATOS_CHECK(triangle_splitter.mIsSplit);
KRATOS_CHECK_EQUAL(triangle_splitter.mDivisionsNumber, 3);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdgesNumber, 2);
// Check split edges
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[0], 0);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[1], 1);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[2], 2);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[3], -1);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[4], 4);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[5], 5);
// Check subdivisions
const auto &r_positive_subdivision_0 = *(triangle_splitter.GetPositiveSubdivisions()[0]);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[1].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[2].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[2].Y(), 1.0, tolerance);
const auto &r_negative_subdivision_0 = *(triangle_splitter.GetNegativeSubdivisions()[0]);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[1].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[2].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[2].Y(), 0.5, tolerance);
const auto &r_negative_subdivision_1 = *(triangle_splitter.GetNegativeSubdivisions()[1]);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[2].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[2].Y(), 0.0, tolerance);
// Check interfaces
const auto &r_positive_interface_0 = *(triangle_splitter.GetPositiveInterfaces()[0]);
KRATOS_CHECK_NEAR(r_positive_interface_0[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[1].Y(), 0.5, tolerance);
const auto &r_negative_interface_0 = *(triangle_splitter.GetNegativeInterfaces()[0]);
KRATOS_CHECK_NEAR(r_negative_interface_0[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[1].Y(), 0.5, tolerance);
KRATOS_CHECK_EQUAL(triangle_splitter.GetPositiveInterfacesParentIds()[0], 0);
KRATOS_CHECK_EQUAL(triangle_splitter.GetNegativeInterfacesParentIds()[0], 0);
// Check exterior faces
KRATOS_CHECK_EQUAL(pos_ext_faces.size(), 2);
KRATOS_CHECK_EQUAL(neg_ext_faces.size(), 3);
KRATOS_CHECK_EQUAL(pos_ext_faces_parent_ids[0], 0);
KRATOS_CHECK_EQUAL(pos_ext_faces_parent_ids[1], 0);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[0], 0);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[1], 1);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[2], 1);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[1].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[0].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[1].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[0].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[1].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[1].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[1].Y(), 0.0, tolerance);
}
KRATOS_TEST_CASE_IN_SUITE(DivideGeometryTriangle2D3Vertical, KratosCoreFastSuite)
{
Model current_model;
// Generate a model part with the previous
ModelPart& base_model_part = current_model.CreateModelPart("Triangle");
base_model_part.AddNodalSolutionStepVariable(DISTANCE);
// Fill the model part geometry data
base_model_part.CreateNewNode(1, 0.0, 0.0, 0.0);
base_model_part.CreateNewNode(2, 1.0, 0.0, 0.0);
base_model_part.CreateNewNode(3, 0.0, 1.0, 0.0);
Properties::Pointer p_properties(new Properties(0));
base_model_part.CreateNewElement("Element2D3N", 1, {1, 2, 3}, p_properties);
// Set the DISTANCE field
base_model_part.Nodes()[1].FastGetSolutionStepValue(DISTANCE) = -1.0;
base_model_part.Nodes()[2].FastGetSolutionStepValue(DISTANCE) = 1.0;
base_model_part.Nodes()[3].FastGetSolutionStepValue(DISTANCE) = -1.0;
// Set the elemental distances vector
Geometry < Node < 3 > >& r_geometry = base_model_part.Elements()[1].GetGeometry();
array_1d<double, 3> distances_vector;
for (unsigned int i = 0; i < r_geometry.size(); ++i) {
distances_vector(i) = r_geometry[i].FastGetSolutionStepValue(DISTANCE);
}
base_model_part.Elements()[1].SetValue(ELEMENTAL_DISTANCES, distances_vector);
Vector& r_elemental_distances = base_model_part.Elements()[1].GetValue(ELEMENTAL_DISTANCES);
// Build the triangle splitting utility
DivideTriangle2D3 triangle_splitter(r_geometry, r_elemental_distances);
// Call the divide geometry method
triangle_splitter.GenerateDivision();
// Call the intersection generation method
triangle_splitter.GenerateIntersectionsSkin();
// Call the positive exterior faces generation method
std::vector < unsigned int > pos_ext_faces_parent_ids;
std::vector < DivideTriangle2D3::IndexedPointGeometryPointerType > pos_ext_faces;
triangle_splitter.GenerateExteriorFaces(
pos_ext_faces,
pos_ext_faces_parent_ids,
triangle_splitter.GetPositiveSubdivisions());
// Call the negative exterior faces generation method
std::vector < unsigned int > neg_ext_faces_parent_ids;
std::vector < DivideTriangle2D3::IndexedPointGeometryPointerType > neg_ext_faces;
triangle_splitter.GenerateExteriorFaces(
neg_ext_faces,
neg_ext_faces_parent_ids,
triangle_splitter.GetNegativeSubdivisions());
const double tolerance = 1e-10;
// Check general splitting values
KRATOS_CHECK(triangle_splitter.mIsSplit);
KRATOS_CHECK_EQUAL(triangle_splitter.mDivisionsNumber, 3);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdgesNumber, 2);
// Check split edges
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[0], 0);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[1], 1);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[2], 2);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[3], 3);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[4], 4);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdges[5], -1);
// Check subdivisions
const auto &r_positive_subdivision_0 = *(triangle_splitter.GetPositiveSubdivisions()[0]);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[2].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR(r_positive_subdivision_0[2].Y(), 0.0, tolerance);
const auto &r_negative_subdivision_0 = *(triangle_splitter.GetNegativeSubdivisions()[0]);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[1].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[2].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_0[2].Y(), 0.0, tolerance);
const auto &r_negative_subdivision_1 = *(triangle_splitter.GetNegativeSubdivisions()[1]);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[1].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[2].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_subdivision_1[2].Y(), 0.0, tolerance);
// Check interfaces
const auto &r_positive_interface_0 = *(triangle_splitter.GetPositiveInterfaces()[0]);
KRATOS_CHECK_NEAR(r_positive_interface_0[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_positive_interface_0[1].Y(), 0.0, tolerance);
const auto &r_negative_interface_0 = *(triangle_splitter.GetNegativeInterfaces()[0]);
KRATOS_CHECK_NEAR(r_negative_interface_0[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR(r_negative_interface_0[1].Y(), 0.5, tolerance);
KRATOS_CHECK_EQUAL(triangle_splitter.GetPositiveInterfacesParentIds()[0], 0);
KRATOS_CHECK_EQUAL(triangle_splitter.GetNegativeInterfacesParentIds()[0], 0);
// Check exterior faces
KRATOS_CHECK_EQUAL(pos_ext_faces.size(), 2);
KRATOS_CHECK_EQUAL(neg_ext_faces.size(), 3);
KRATOS_CHECK_EQUAL(pos_ext_faces_parent_ids[0], 0);
KRATOS_CHECK_EQUAL(pos_ext_faces_parent_ids[1], 0);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[0], 0);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[1], 1);
KRATOS_CHECK_EQUAL(neg_ext_faces_parent_ids[2], 1);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[0].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[0])[1].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[1].X(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*pos_ext_faces[1])[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[0].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[0].Y(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[0])[1].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[0].Y(), 1.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[1].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[1])[1].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[0].X(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[0].Y(), 0.0, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[1].X(), 0.5, tolerance);
KRATOS_CHECK_NEAR((*neg_ext_faces[2])[1].Y(), 0.0, tolerance);
}
KRATOS_TEST_CASE_IN_SUITE(DivideGeometryTriangle2D3NoDivision, KratosCoreFastSuite)
{
Model current_model;
// Generate a model part with the previous
ModelPart& base_model_part = current_model.CreateModelPart("Triangle");
base_model_part.AddNodalSolutionStepVariable(DISTANCE);
// Fill the model part geometry data
base_model_part.CreateNewNode(1, 0.0, 0.0, 0.0);
base_model_part.CreateNewNode(2, 1.0, 0.0, 0.0);
base_model_part.CreateNewNode(3, 0.0, 1.0, 0.0);
Properties::Pointer p_properties(new Properties(0));
base_model_part.CreateNewElement("Element2D3N", 1, {1, 2, 3}, p_properties);
// Set the DISTANCE field
base_model_part.Nodes()[1].FastGetSolutionStepValue(DISTANCE) = 1.0;
base_model_part.Nodes()[2].FastGetSolutionStepValue(DISTANCE) = 1.0;
base_model_part.Nodes()[3].FastGetSolutionStepValue(DISTANCE) = 1.0;
// Set the elemental distances vector
Geometry < Node < 3 > >& r_geometry = base_model_part.Elements()[1].GetGeometry();
array_1d<double, 3> distances_vector;
for (unsigned int i = 0; i < r_geometry.PointsNumber(); ++i) {
distances_vector(i) = r_geometry[i].FastGetSolutionStepValue(DISTANCE);
}
base_model_part.Elements()[1].SetValue(ELEMENTAL_DISTANCES, distances_vector);
Vector& r_elemental_distances = base_model_part.Elements()[1].GetValue(ELEMENTAL_DISTANCES);
// Build the triangle splitting utility
DivideTriangle2D3 triangle_splitter(r_geometry, r_elemental_distances);
// Call the divide geometry method
triangle_splitter.GenerateDivision();
// Check general splitting values
KRATOS_CHECK_IS_FALSE(triangle_splitter.mIsSplit);
KRATOS_CHECK_EQUAL(triangle_splitter.mDivisionsNumber, 1);
KRATOS_CHECK_EQUAL(triangle_splitter.mSplitEdgesNumber, 0);
}
}
} // namespace Kratos.
| 7,190 |
372 | /*
*
* (c) Copyright 1990 OPEN SOFTWARE FOUNDATION, INC.
* (c) Copyright 1990 HEWLETT-PACKARD COMPANY
* (c) Copyright 1990 DIGITAL EQUIPMENT CORPORATION
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this
* file for any purpose is hereby granted without fee, provided that
* the above copyright notices and this notice appears in all source
* code copies, and that none of the names of Open Software
* Foundation, Inc., Hewlett-Packard Company, or Digital Equipment
* Corporation be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Neither Open Software Foundation, Inc., Hewlett-
* Packard Company, nor Digital Equipment Corporation makes any
* representations about the suitability of this software for any
* purpose.
*
*/
/*
*/
#ifndef _RPCRAND_H
#define _RPCRAND_H
/*
**
** NAME:
**
** rpcrand.h
**
** FACILITY:
**
** Remote Procedure Call (RPC)
**
** ABSTRACT:
**
** Random number generator abstraction to isolate random number generation
** routines and allow alternate implementations to be substituted more
** easily.
**
** This package provides the following PRIVATE operations:
**
** void RPC_RANDOM_INIT(seed)
** unsigned32 RPC_RANDOM_GET(lower, upper)
**
**
*/
/*
* R P C _ R A N D O M _ I N I T
*
* Used for random number 'seed' routines or any other one time
* initialization required.
*/
#define RPC_RANDOM_INIT(seed) \
rpc__random_init(seed)
/*
* R P C _ R A N D O M _ G E T
*
* Get a random number in the range lower - upper (inclusive)
*/
#define RPC_RANDOM_GET(lower, upper) \
(((rpc__random_get(lower, upper)) % (upper - lower + 1)) + lower)
/*
* Prototype for the private 'c' routines used by the RPC_RANDOM macros.
*/
#include <dce/dce.h>
PRIVATE void rpc__random_init ( unsigned32 /*seed*/ );
PRIVATE unsigned32 rpc__random_get (
unsigned32 /*lower*/,
unsigned32 /*upper*/
);
#endif /* _RPCRAND_H */
| 721 |
5,169 | {
"name": "CNSDK",
"version": "1.0.0",
"summary": "This is a CNSDK",
"description": "it is a test Demo this is a test Demo,and it is implemented by OC",
"homepage": "https://github.com/YuqianChen/CNSDK",
"license": "MIT",
"authors": {
"chenyuqian": "<EMAIL>"
},
"source": {
"git": "https://github.com/YuqianChen/CNSDK.git",
"tag": "1.0.0"
},
"requires_arc": true,
"platforms": {
"ios": "8.0"
},
"source_files": "CNSDK/**/*",
"resources": "Resources/HRSDKResource.bundle"
}
| 226 |
5,823 | <gh_stars>1000+
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#import <Cocoa/Cocoa.h>
/**
* An interface for a responder that can process a key event and decides whether
* to handle an event synchronously.
*
* To use this class, add it to a |FlutterKeyboardManager| with
* |addSecondaryResponder|.
*/
@protocol FlutterKeySecondaryResponder
/**
* Informs the receiver that the user has interacted with a key.
*
* The return value indicates whether it has handled the given event.
*
* Default implementation returns NO.
*/
@required
- (BOOL)handleKeyEvent:(nonnull NSEvent*)event;
@end
| 205 |
4,036 | <filename>java/ql/test/query-tests/UselessNullCheck/A.java<gh_stars>1000+
public class A {
void f() {
Object o = new Object();
if (o == null) { } // Useless check
if (o != null) { } // Useless check
try {
new Object();
} catch(Exception e) {
if (e == null) { // Useless check
throw new Error();
}
}
}
void g(Object o) {
if (o instanceof A) {
A a = (A)o;
if (a != null) { // Useless check
throw new Error();
}
}
}
interface I {
A get();
}
I h() {
final A x = this;
return () -> {
if (x != null) { // Useless check
return x;
}
return new A();
};
}
Object f2(Object x) {
if (x == null) {
return this != null ? this : null; // Useless check
}
if (x != null) { // Useless check
return x;
}
return null;
}
private final Object finalObj = new Object();
public void ex12() {
finalObj.hashCode();
if (finalObj != null) { // Useless check
finalObj.hashCode();
}
}
}
| 474 |
2,453 | <filename>XVim2/XcodeHeader/IDEKit/IDEBatchFindSourceRuleEditorCriterion.h
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 30 2020 21:18:12).
//
// Copyright (C) 1997-2019 <NAME>.
//
#import <objc/NSObject.h>
@class NSString;
@interface IDEBatchFindSourceRuleEditorCriterion : NSObject
{
NSString *_identifier;
Class _fileSourceClass;
id _value;
}
+ (id)uniqueSeparatorItem;
- (void).cxx_destruct;
@property(readonly) id value; // @synthesize value=_value;
@property(readonly) Class fileSourceClass; // @synthesize fileSourceClass=_fileSourceClass;
@property(readonly) NSString *identifier; // @synthesize identifier=_identifier;
- (id)description;
- (BOOL)isEqual:(id)arg1;
- (BOOL)isEqualToBatchFindSourceRuleEditorCriterion:(id)arg1;
- (unsigned long long)hash;
- (id)copyWithZone:(struct _NSZone *)arg1;
- (BOOL)isSeparatorItem;
- (id)initWithIdentifier:(id)arg1 fileSourceClass:(Class)arg2;
- (id)initWithIdentifier:(id)arg1 value:(id)arg2 fileSourceClass:(Class)arg3;
@end
| 382 |
435 | {
"copyright_text": null,
"description": "",
"duration": 5683,
"language": "eng",
"recorded": "2013-07-05",
"related_urls": [
{
"label": "Conference schedule",
"url": "https://www.pycon.it/en/ep2013/"
}
],
"speakers": [
"<NAME>"
],
"tags": [
"pythonscripts",
"graphics"
],
"thumbnail_url": "https://i.ytimg.com/vi/4e6z3Ada00I/hqdefault.jpg",
"title": "An Intro to Blender Modeling and Scripting - Part 1",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=4e6z3Ada00I"
}
]
}
| 273 |
407 | package com.alibaba.tesla.appmanager.server.repository;
import com.alibaba.tesla.appmanager.server.repository.condition.AppPackageTagQueryCondition;
import com.alibaba.tesla.appmanager.server.repository.domain.AppPackageTagDO;
import java.util.List;
public interface AppPackageTagRepository {
int insert(AppPackageTagDO record);
List<AppPackageTagDO> query(List<Long> appPackageIdList, String tag);
List<AppPackageTagDO> query(List<Long> appPackageIdList);
int deleteByCondition(AppPackageTagQueryCondition condition);
} | 169 |
3,056 | <gh_stars>1000+
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lyra_wavegru.h"
#include <memory>
#include <string>
#include <tuple>
#if !defined(USE_FIXED16) && !defined(USE_BFLOAT16)
#include "exported_layers_test.h"
#endif // !defined(USE_FIXED16) && !defined(USE_BFLOAT16)
// Placeholder for get runfiles header.
#include "absl/strings/str_format.h"
#include "gtest/gtest.h"
#include "include/ghc/filesystem.hpp"
#include "lyra_config.h"
#include "sparse_matmul/sparse_matmul.h" // IWYU pragma: keep
namespace chromemedia {
namespace codec {
namespace {
static const int kNumThreads[] = {1, 2, 4};
static const char kPrefixTemplate[] = "lyra_%dkhz";
#ifdef USE_FIXED16
using ComputeType = csrblocksparse::fixed16_type;
#elif USE_BFLOAT16
using ComputeType = csrblocksparse::bfloat16;
#else
using ComputeType = float;
#endif // USE_FIXED16
class LyraWavegruTest
: public testing::TestWithParam<testing::tuple<int, int>> {
protected:
LyraWavegruTest()
: sample_rate_hz_(GetInternalSampleRate(std::get<1>(GetParam()))),
lyra_wavegru_(LyraWavegru<ComputeType>::Create(
std::get<0>(GetParam()),
ghc::filesystem::current_path() / "wavegru",
absl::StrFormat(kPrefixTemplate, sample_rate_hz_ / 1000))) {}
const int sample_rate_hz_;
std::unique_ptr<LyraWavegru<ComputeType>> lyra_wavegru_;
};
TEST_P(LyraWavegruTest, ModelExistsProdFeatures) {
EXPECT_NE(lyra_wavegru_, nullptr);
}
INSTANTIATE_TEST_SUITE_P(
ThreadsAndSampleRates, LyraWavegruTest,
testing::Combine(testing::ValuesIn(kNumThreads),
testing::ValuesIn(kSupportedSampleRates)));
// Test that exported layers with fixed-point and float weights produce
// matching results.
// Run only in the first of the three related test targets: {lyra_wavegru_test,
// lyra_wavegru_test_fixed16, lyra_wavegru_test_bfloat16}.
#if !defined(USE_FIXED16) && !defined(USE_BFLOAT16)
using csrblocksparse::fixed16_type;
static constexpr int kNumGruHiddens = 1024;
static constexpr int kNumSplitBands = 4;
struct ArLayerTypes {
using FloatLayerType = LyraWavegru<float>::ArLayerType;
using FixedLayerType = LyraWavegru<fixed16_type>::ArLayerType;
static LayerParams Params(const std::string& model_path) {
return LayerParams{
.num_input_channels = kNumSplitBands,
.num_filters = 3 * kNumGruHiddens,
.length = 1,
.kernel_size = 1,
.dilation = 1,
.stride = 1,
.relu = false,
.skip_connection = false,
.type = LayerType::kConv1D,
.num_threads = 1,
.per_column_barrier = false,
.from = LayerParams::FromDisk{.path = model_path, .zipped = true},
.prefix = "lyra_16khz_ar_to_gates_"};
}
};
struct GruLayerTypes {
using FloatLayerType = LyraWavegru<float>::GruLayerType;
using FixedLayerType = LyraWavegru<fixed16_type>::GruLayerType;
static LayerParams Params(const std::string& model_path) {
return LayerParams{
.num_input_channels = kNumGruHiddens,
.num_filters = 3 * kNumGruHiddens,
.length = 1,
.kernel_size = 1,
.dilation = 1,
.stride = 1,
.relu = false,
.skip_connection = false,
.type = LayerType::kConv1D,
.num_threads = 1,
.per_column_barrier = false,
.from = LayerParams::FromDisk{.path = model_path, .zipped = true},
.prefix = "lyra_16khz_gru_layer_"};
}
};
using LayerTypesList = testing::Types<ArLayerTypes, GruLayerTypes>;
INSTANTIATE_TYPED_TEST_SUITE_P(Wavegru, ExportedLayersTest, LayerTypesList);
#endif // !defined(USE_FIXED16) && !defined(USE_BFLOAT16)
} // namespace
} // namespace codec
} // namespace chromemedia
| 1,697 |
2,205 | <reponame>repouniverse/tut
"""
Generate Fancytree test data.
Example:
$ python ft_fixtures.py
"""
import json
import os
from pprint import pformat
import random
from fabulist import Fabulist
class GenerateTreeData(object):
"""."""
def __init__(self):
self.fab = Fabulist()
self.count = 0
self.data = {}
@staticmethod
def get_count(count):
if type(count) in ("tuple", "list"):
count = random.randrange(count[0], count[1])
return int(count)
def _generate(self, parent_node, child_def, common):
fab = self.fab
child_node_list = parent_node["children"] = []
child_count = self.get_count(child_def["count"])
sub_child_def = child_def.get("children")
for i in range(child_count):
node = {
"title": fab.get_quote(child_def["title"]),
"key": "{}.{}".format(parent_node["key"], i+1) if parent_node.get("key") else str(i+1)
}
child_node_list.append(node)
self.count += 1
if sub_child_def:
self._generate(node, sub_child_def, common)
return child_node_list
def generate(self, opts, meta=None):
self.data = {}
if meta:
self.data.update(meta)
common = opts.get("common", {})
child_def = opts.get("children", [])
self._generate(self.data, child_def, common)
if meta is None:
return self.data["children"]
return self.data
def write(self, path, opts, meta=None):
with open(path, "wt") as f:
res = self.generate(opts, meta)
json.dump(res, f)
return
def test():
g = GenerateTreeData()
opts = {
"common": {},
"children": {
"title": "$(Noun)",
"count": 10,
"children": {
"title": "$(Adj) $(noun)",
"count": 100,
"children": {
"title": "$(Verb:ing) $(adv)",
"count": 100,
},
},
},
}
# res = g.generate(opts)
# print(pformat(res))
path = "/Users/martin/prj/git/fabulist-demo-flask/fabulator/trees.json"
print("Generate data in {}...".format(path))
res = g.write(path, opts)
print("Generated {} nodes.".format(g.count))
if __name__ == "__main__":
test()
| 1,226 |
315 | <gh_stars>100-1000
/* Simple program -- figure out what kind of video display we have */
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "SDL.h"
#define NUM_BLITS 10
#define NUM_UPDATES 500
#define FLAG_MASK (SDL_HWSURFACE | SDL_FULLSCREEN | SDL_DOUBLEBUF | \
SDL_SRCCOLORKEY | SDL_SRCALPHA | SDL_RLEACCEL | \
SDL_RLEACCELOK)
void PrintFlags(Uint32 flags)
{
printf("0x%8.8x", (flags & FLAG_MASK));
if ( flags & SDL_HWSURFACE ) {
printf(" SDL_HWSURFACE");
} else {
printf(" SDL_SWSURFACE");
}
if ( flags & SDL_FULLSCREEN ) {
printf(" | SDL_FULLSCREEN");
}
if ( flags & SDL_DOUBLEBUF ) {
printf(" | SDL_DOUBLEBUF");
}
if ( flags & SDL_SRCCOLORKEY ) {
printf(" | SDL_SRCCOLORKEY");
}
if ( flags & SDL_SRCALPHA ) {
printf(" | SDL_SRCALPHA");
}
if ( flags & SDL_RLEACCEL ) {
printf(" | SDL_RLEACCEL");
}
if ( flags & SDL_RLEACCELOK ) {
printf(" | SDL_RLEACCELOK");
}
}
int RunBlitTests(SDL_Surface *screen, SDL_Surface *bmp, int blitcount)
{
int i, j;
int maxx;
int maxy;
SDL_Rect dst;
maxx = (int)screen->w - bmp->w + 1;
maxy = (int)screen->h - bmp->h + 1;
for ( i = 0; i < NUM_UPDATES; ++i ) {
for ( j = 0; j < blitcount; ++j ) {
if ( maxx ) {
dst.x = rand() % maxx;
} else {
dst.x = 0;
}
if ( maxy ) {
dst.y = rand() % maxy;
} else {
dst.y = 0;
}
dst.w = bmp->w;
dst.h = bmp->h;
SDL_BlitSurface(bmp, NULL, screen, &dst);
}
SDL_Flip(screen);
}
return i;
}
int RunModeTests(SDL_Surface *screen)
{
Uint32 then, now;
Uint32 frames;
float seconds;
int i;
Uint8 r, g, b;
SDL_Surface *bmp, *bmpcc, *tmp;
SDL_Event event;
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* First test fills and screen update speed */
printf("Running color fill and fullscreen update test\n");
then = SDL_GetTicks();
frames = 0;
for ( i = 0; i < 256; ++i ) {
r = i;
g = 0;
b = 0;
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, r, g, b));
SDL_Flip(screen);
++frames;
}
for ( i = 0; i < 256; ++i ) {
r = 0;
g = i;
b = 0;
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, r, g, b));
SDL_Flip(screen);
++frames;
}
for ( i = 0; i < 256; ++i ) {
r = 0;
g = 0;
b = i;
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, r, g, b));
SDL_Flip(screen);
++frames;
}
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d fills and flips in %2.2f seconds, %2.2f FPS\n", frames, seconds, (float)frames / seconds);
} else {
printf("%d fills and flips in zero seconds!n", frames);
}
/* clear the screen after fill test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the generic blit test */
bmp = SDL_LoadBMP("sample.bmp");
if ( ! bmp ) {
printf("Couldn't load sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running freshly loaded blit test: %dx%d at %d bpp, flags: ",
bmp->w, bmp->h, bmp->format->BitsPerPixel);
PrintFlags(bmp->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmp, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
/* clear the screen after blit test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the colorkeyed blit test */
bmpcc = SDL_LoadBMP("sample.bmp");
if ( ! bmpcc ) {
printf("Couldn't load sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running freshly loaded cc blit test: %dx%d at %d bpp, flags: ",
bmpcc->w, bmpcc->h, bmpcc->format->BitsPerPixel);
SDL_SetColorKey(bmpcc, SDL_SRCCOLORKEY | SDL_RLEACCEL, *(Uint8 *)bmpcc->pixels);
PrintFlags(bmpcc->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmpcc, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d cc blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d cc blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
/* clear the screen after cc blit test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the generic blit test */
tmp = bmp;
bmp = SDL_DisplayFormat(bmp);
SDL_FreeSurface(tmp);
if ( ! bmp ) {
printf("Couldn't convert sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running display format blit test: %dx%d at %d bpp, flags: ",
bmp->w, bmp->h, bmp->format->BitsPerPixel);
PrintFlags(bmp->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmp, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
/* clear the screen after blit test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the colorkeyed blit test */
tmp = bmpcc;
bmpcc = SDL_DisplayFormat(bmpcc);
SDL_FreeSurface(tmp);
if ( ! bmpcc ) {
printf("Couldn't convert sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running display format cc blit test: %dx%d at %d bpp, flags: ",
bmpcc->w, bmpcc->h, bmpcc->format->BitsPerPixel);
PrintFlags(bmpcc->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmpcc, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d cc blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d cc blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
/* clear the screen after cc blit test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the alpha blit test only if screen bpp>8 */
if (bmp->format->BitsPerPixel>8)
{
SDL_FreeSurface(bmp);
bmp = SDL_LoadBMP("sample.bmp");
SDL_SetAlpha(bmp, SDL_SRCALPHA, 85); /* 85 - 33% alpha */
tmp = bmp;
bmp = SDL_DisplayFormat(bmp);
SDL_FreeSurface(tmp);
if ( ! bmp ) {
printf("Couldn't convert sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running display format alpha blit test: %dx%d at %d bpp, flags: ",
bmp->w, bmp->h, bmp->format->BitsPerPixel);
PrintFlags(bmp->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmp, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d alpha blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d alpha blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
}
/* clear the screen after alpha blit test */
SDL_FillRect(screen, NULL, SDL_MapRGB(screen->format, 0, 0, 0));
SDL_Flip(screen);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
/* run the cc+alpha blit test only if screen bpp>8 */
if (bmp->format->BitsPerPixel>8)
{
SDL_FreeSurface(bmpcc);
bmpcc = SDL_LoadBMP("sample.bmp");
SDL_SetAlpha(bmpcc, SDL_SRCALPHA, 85); /* 85 - 33% alpha */
SDL_SetColorKey(bmpcc, SDL_SRCCOLORKEY | SDL_RLEACCEL, *(Uint8 *)bmpcc->pixels);
tmp = bmpcc;
bmpcc = SDL_DisplayFormat(bmpcc);
SDL_FreeSurface(tmp);
if ( ! bmpcc ) {
printf("Couldn't convert sample.bmp: %s\n", SDL_GetError());
return 0;
}
printf("Running display format cc+alpha blit test: %dx%d at %d bpp, flags: ",
bmpcc->w, bmpcc->h, bmpcc->format->BitsPerPixel);
PrintFlags(bmpcc->flags);
printf("\n");
then = SDL_GetTicks();
frames = RunBlitTests(screen, bmpcc, NUM_BLITS);
now = SDL_GetTicks();
seconds = (float)(now - then) / 1000.0f;
if ( seconds > 0.0f ) {
printf("%d cc+alpha blits / %d updates in %2.2f seconds, %2.2f FPS\n", NUM_BLITS*frames, frames, seconds, (float)frames / seconds);
} else {
printf("%d cc+alpha blits / %d updates in zero seconds!\n", NUM_BLITS*frames, frames);
}
}
SDL_FreeSurface(bmpcc);
SDL_FreeSurface(bmp);
while ( SDL_PollEvent(&event) ) {
if ( event.type == SDL_KEYDOWN )
return 0;
}
return 1;
}
void RunVideoTests()
{
static const struct {
int w, h, bpp;
} mode_list[] = {
{ 640, 480, 8 }, { 640, 480, 16 }, { 640, 480, 32 },
{ 800, 600, 8 }, { 800, 600, 16 }, { 800, 600, 32 },
{ 1024, 768, 8 }, { 1024, 768, 16 }, { 1024, 768, 32 }
};
static const Uint32 flags[] = {
(SDL_SWSURFACE),
(SDL_SWSURFACE | SDL_FULLSCREEN),
(SDL_HWSURFACE | SDL_FULLSCREEN),
(SDL_HWSURFACE | SDL_FULLSCREEN | SDL_DOUBLEBUF)
};
int i, j;
SDL_Surface *screen;
/* Test out several different video mode combinations */
SDL_WM_SetCaption("SDL Video Benchmark", "vidtest");
SDL_ShowCursor(0);
for ( i = 0; i < SDL_TABLESIZE(mode_list); ++i ) {
for ( j = 0; j < SDL_TABLESIZE(flags); ++j ) {
printf("===================================\n");
printf("Setting video mode: %dx%d at %d bpp, flags: ",
mode_list[i].w,
mode_list[i].h,
mode_list[i].bpp);
PrintFlags(flags[j]);
printf("\n");
screen = SDL_SetVideoMode(mode_list[i].w,
mode_list[i].h,
mode_list[i].bpp,
flags[j]);
if ( ! screen ) {
printf("Setting video mode failed: %s\n", SDL_GetError());
continue;
}
if ( (screen->flags & FLAG_MASK) != flags[j] ) {
printf("Flags didn't match: ");
PrintFlags(screen->flags);
printf("\n");
continue;
}
if ( ! RunModeTests(screen) ) {
return;
}
}
}
}
int main(int argc, char *argv[])
{
const SDL_VideoInfo *info;
int i;
SDL_Rect **modes;
char driver[128];
if ( SDL_Init(SDL_INIT_VIDEO) < 0 ) {
fprintf(stderr,
"Couldn't initialize SDL: %s\n", SDL_GetError());
exit(1);
}
if ( SDL_VideoDriverName(driver, sizeof(driver)) ) {
printf("Video driver: %s\n", driver);
}
info = SDL_GetVideoInfo();
printf(
"Current display: %dx%d, %d bits-per-pixel\n",
info->current_w, info->current_h, info->vfmt->BitsPerPixel);
if ( info->vfmt->palette == NULL ) {
printf(" Red Mask = 0x%.8x\n", info->vfmt->Rmask);
printf(" Green Mask = 0x%.8x\n", info->vfmt->Gmask);
printf(" Blue Mask = 0x%.8x\n", info->vfmt->Bmask);
}
/* Print available fullscreen video modes */
modes = SDL_ListModes(NULL, SDL_FULLSCREEN);
if ( modes == (SDL_Rect **)0 ) {
printf("No available fullscreen video modes\n");
} else
if ( modes == (SDL_Rect **)-1 ) {
printf("No special fullscreen video modes\n");
} else {
printf("Fullscreen video modes:\n");
for ( i=0; modes[i]; ++i ) {
printf("\t%dx%dx%d\n", modes[i]->w, modes[i]->h, info->vfmt->BitsPerPixel);
}
}
if ( info->wm_available ) {
printf("A window manager is available\n");
}
if ( info->hw_available ) {
printf("Hardware surfaces are available (%dK video memory)\n",
info->video_mem);
}
if ( info->blit_hw ) {
printf(
"Copy blits between hardware surfaces are accelerated\n");
}
if ( info->blit_hw_CC ) {
printf(
"Colorkey blits between hardware surfaces are accelerated\n");
}
if ( info->blit_hw_A ) {
printf(
"Alpha blits between hardware surfaces are accelerated\n");
}
if ( info->blit_sw ) {
printf(
"Copy blits from software surfaces to hardware surfaces are accelerated\n");
}
if ( info->blit_sw_CC ) {
printf(
"Colorkey blits from software surfaces to hardware surfaces are accelerated\n");
}
if ( info->blit_sw_A ) {
printf(
"Alpha blits from software surfaces to hardware surfaces are accelerated\n");
}
if ( info->blit_fill ) {
printf(
"Color fills on hardware surfaces are accelerated\n");
}
if ( argv[1] && (strcmp(argv[1], "-benchmark") == 0) ) {
RunVideoTests();
}
SDL_Quit();
return(0);
}
| 5,804 |
2,381 | package com.github.dockerjava.core.command;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.InputStream;
import com.github.dockerjava.api.command.LoadImageCmd;
import javax.annotation.Nonnull;
public class LoadImageCmdImpl extends AbstrDockerCmd<LoadImageCmd, Void> implements LoadImageCmd {
private InputStream imageStream;
/**
* @param imageStream
* the InputStream of the tar file
*/
public LoadImageCmdImpl(LoadImageCmd.Exec exec, InputStream imageStream) {
super(exec);
withImageStream(imageStream);
}
@Override
public InputStream getImageStream() {
return imageStream;
}
/**
* @param imageStream
* the InputStream of the tar file
*/
@Override
public LoadImageCmdImpl withImageStream(@Nonnull InputStream imageStream) {
checkNotNull(imageStream, "imageStream was not specified");
this.imageStream = imageStream;
return this;
}
}
| 374 |
523 | #pragma once
#include "Common.h"
namespace UAlbertaBot
{
class AutoObserver
{
int m_cameraLastMoved = 0;
int m_unitFollowFrames = 0;
BWAPI::Unit m_observerFollowingUnit = nullptr;
public:
AutoObserver();
void onFrame();
};
} | 118 |
875 | <reponame>christeoh/sqoop
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sqoop.validation;
/**
* A specific implementation of ValidationFailureHandler that aborts the
* processing by throwing an exception with failure message and the reason.
*
* This is used as the default handler unless overridden in configuration.
*/
public class AbortOnFailureHandler implements ValidationFailureHandler {
static final ValidationFailureHandler INSTANCE = new AbortOnFailureHandler();
/**
* Method that handles the validation failure.
*
* @param validationContext validation context
* @return if failure was handled or not
* @throws ValidationException
*/
@Override
public boolean handle(ValidationContext validationContext)
throws ValidationException {
StringBuilder messageBuffer = new StringBuilder();
messageBuffer.append("Validation failed by ");
messageBuffer.append(validationContext.getMessage());
messageBuffer.append(". Reason: ").append(validationContext.getReason());
messageBuffer.append(", Row Count at Source: ");
messageBuffer.append(validationContext.getSourceRowCount());
messageBuffer.append(", Row Count at Target: ");
messageBuffer.append(validationContext.getTargetRowCount());
throw new ValidationException(messageBuffer.toString());
}
}
| 539 |
26,932 | //
// lzssdec.h
// img4tool
//
// Code borrowed from: http://newosxbook.com/src.jl?tree=listings&file=joker.c
// Coded by <NAME> (a.k.a @Morpheus______), http://newosxbook.com
#include "lzssdec.h"
#include <string.h>
#include <stdlib.h>
/**************************************************************
LZSS.C -- A Data Compression Program
***************************************************************
4/6/1989 <NAME>
Use, distribute, and modify this program freely.
Please send me your improved versions.
PC-VAN SCIENCE
NIFTY-Serve PAF01022
CompuServe 74050,1022
**************************************************************/
/*
* lzss.c - Package for decompressing lzss compressed objects
*
* Copyright (c) 2003 Apple Computer, Inc.
*
* DRI: <NAME>
*/
#define N 4096 /* size of ring buffer - must be power of 2 */
#define F 18 /* upper limit for match_length */
#define THRESHOLD 2 /* encode string into position and length
if match_length is greater than this */
#define NIL N /* index for root of binary search trees */
int decompress_lzss(u_int8_t *dst, u_int8_t *src, u_int32_t srclen){
/* ring buffer of size N, with extra F-1 bytes to aid string comparison */
u_int8_t text_buf[N + F - 1];
u_int8_t *dststart = dst;
u_int8_t *srcend = src + srclen;
int i, j, k, r, c;
unsigned int flags;
dst = dststart;
srcend = src + srclen;
for (i = 0; i < N - F; i++)
text_buf[i] = ' ';
r = N - F;
flags = 0;
for ( ; ; ) {
if (((flags >>= 1) & 0x100) == 0) {
if (src < srcend) c = *src++; else break;
flags = c | 0xFF00; /* uses higher byte cleverly */
} /* to count eight */
if (flags & 1) {
if (src < srcend) c = *src++; else break;
*dst++ = c;
text_buf[r++] = c;
r &= (N - 1);
} else {
if (src < srcend) i = *src++; else break;
if (src < srcend) j = *src++; else break;
i |= ((j & 0xF0) << 4);
j = (j & 0x0F) + THRESHOLD;
for (k = 0; k <= j; k++) {
c = text_buf[(i + k) & (N - 1)];
*dst++ = c;
text_buf[r++] = c;
r &= (N - 1);
}
}
}
return (int)(dst - dststart);
}
struct compHeader {
char sig[8] ; // "complzss"
uint32_t unknown; // Likely CRC32. But who cares, anyway?
uint32_t uncompressedSize;
uint32_t compressedSize;
uint32_t unknown1; // 1
};
char *tryLZSS(char *compressed, size_t *filesize){
struct compHeader *compHeader = (struct compHeader*)compressed;
if (!compHeader) return NULL;
int sig[2] = { 0xfeedfacf, 0x0100000c };
char *decomp = malloc (ntohl(compHeader->uncompressedSize));
char *feed = memmem(compressed+64, 1024, sig, sizeof(sig));
if (!feed)
return NULL;
feed--;
int rc = decompress_lzss((void*)decomp, (void*)feed, ntohl(compHeader->compressedSize));
if (rc != ntohl(compHeader->uncompressedSize)) {
return NULL;
}
*filesize = rc;
return (decomp);
} // compLZSS
| 1,465 |
1,694 | <gh_stars>1000+
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 17 2017 16:24:48).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by <NAME>.
//
#import <objc/NSObject.h>
@class WAWhatsNewConfig;
@interface WAWhatsNewInfo : NSObject
{
WAWhatsNewConfig *_config;
}
@property(retain, nonatomic) WAWhatsNewConfig *config; // @synthesize config=_config;
- (void).cxx_destruct;
- (id)description;
@end
| 178 |
475 | <reponame>shanteswarrao/allure_plugin<gh_stars>100-1000
package ru.yandex.qatools.allure.junit;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import ru.yandex.qatools.allure.config.AllureModelUtils;
import ru.yandex.qatools.allure.junit.testdata.SimpleTestClass;
import ru.yandex.qatools.allure.junit.testdata.TestClassWithExceptionInBefore;
import javax.xml.transform.stream.StreamSource;
import javax.xml.validation.Validator;
import java.io.File;
import java.util.Arrays;
import java.util.Collection;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static ru.yandex.qatools.allure.commons.AllureFileUtils.listTestSuiteFiles;
/**
* @author <NAME> <EMAIL>
* Date: 20.01.14
*/
@RunWith(Parameterized.class)
public class AllureListenerXmlValidationTest extends BasicListenerTest {
public Class<?> testClass;
public AllureListenerXmlValidationTest(Class<?> testClass) {
this.testClass = testClass;
}
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(
new Object[]{SimpleTestClass.class},
new Object[]{TestClassWithExceptionInBefore.class}
);
}
@Test
public void suiteFilesCountTest() throws Exception {
assertThat(listTestSuiteFiles(resultsDirectory).size(), is(1));
}
@Test
public void validateSuiteFilesTest() throws Exception {
Validator validator = AllureModelUtils.getAllureSchemaValidator();
for (File each : listTestSuiteFiles(resultsDirectory)) {
validator.validate(new StreamSource(each));
}
}
@Override
public Class<?> getTestClass() {
return testClass;
}
}
| 719 |
587 | import test_util
import os
def before_feature(context, feature):
print("\nRunning go build")
cmd = ["go", "build", "../dump_db_stats.go"]
test_util.cli_call(context, cmd, expect_success=True)
print("go build complete")
def after_feature(context, feature):
print("Deleting utility binary")
os.remove("./dump_db_stats")
| 124 |
30,023 | """Calculates mold growth indication from temperature and humidity."""
| 14 |
488 | <reponame>ouankou/rose
#include "sage3basic.h"
#include "DLX/TileK/language.hpp"
#include "KLT/TileK/generator-basic.hpp"
namespace KLT {
namespace TileK {
void Generator::addUserStaticData(
MFB::Driver<MFB::Sage> & driver,
const std::string & klt_rtl_path, const std::string & user_rtl_path,
const std::string & static_file_name, MFB::file_id_t static_file_id,
const std::string & kernel_file_name, MFB::file_id_t kernel_file_id
) {}
Generator::Generator(MFB::Driver<MFB::KLT::KLT> & driver, ::MDCG::Tools::ModelBuilder & model_builder) :
KLT::Generator(driver, model_builder)
{}
void Generator::loadExtraModel(const std::string & usr_inc_dir) {
model_builder.add(tilek_model, "tilek-rtl", usr_inc_dir + "/RTL/Host", "h");
}
void Generator::insertUserConfig(::DLX::TileK::language_t::directive_t * directive, SgVariableSymbol * kernel_sym, KLT::API::host_t * host_api_, SgScopeStatement * scope) {}
std::string Generator::kernel_file_tag("kernel");
std::string Generator::kernel_file_ext("c");
std::string Generator::static_file_tag("static");
std::string Generator::static_file_ext("c");
} // namespace KLT::TileK
} // namespace KLT
| 435 |
1,338 | /*
* Copyright 2001-2008 pinc Software. All Rights Reserved.
* Released under the terms of the MIT license.
*/
//! BFS Inode classes
#include "Inode.h"
#include "BPlusTree.h"
#include <Directory.h>
#include <SymLink.h>
#include <Entry.h>
#include <Path.h>
#include <String.h>
#include <new>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
class NodeGetter {
public:
NodeGetter(Inode* inode)
:
fInode(inode)
{
fInode->AcquireBuffer();
}
~NodeGetter()
{
fInode->ReleaseBuffer();
}
private:
Inode* fInode;
};
// #pragma mark -
Inode::Inode(Disk* disk, bfs_inode* inode, bool ownBuffer)
:
fDisk(disk),
fInode(inode),
fOwnBuffer(ownBuffer),
fPath(NULL),
fRefCount(1),
fCurrentSmallData(NULL),
fAttributes(NULL),
fAttributeBuffer(NULL)
{
if (inode != NULL)
fBlockRun = inode->inode_num;
}
Inode::Inode(const Inode& inode)
:
fDisk(inode.fDisk),
fInode(inode.fInode),
fOwnBuffer(false),
fPath(NULL),
fBlockRun(inode.fBlockRun),
fRefCount(1),
fCurrentSmallData(NULL),
fAttributes(NULL),
fAttributeBuffer(NULL)
{
}
Inode::~Inode()
{
_Unset();
}
void
Inode::_Unset()
{
if (fOwnBuffer)
free(fInode);
fInode = NULL;
fBlockRun.SetTo(0, 0, 0);
free(fPath);
fPath = NULL;
delete fAttributes;
fAttributes = NULL;
}
status_t
Inode::SetTo(bfs_inode *inode)
{
_Unset();
fInode = inode;
fBlockRun = inode->inode_num;
return B_OK;
}
status_t
Inode::InitCheck() const
{
if (!fInode)
return B_ERROR;
// test inode magic and flags
if (fInode->magic1 != INODE_MAGIC1
|| !(fInode->flags & INODE_IN_USE)
|| fInode->inode_num.length != 1)
return B_ERROR;
if (fDisk->BlockSize()) {
// matches known block size?
if (fInode->inode_size != fDisk->SuperBlock()->inode_size
// parent resides on disk?
|| fInode->parent.allocation_group > fDisk->SuperBlock()->num_ags
|| fInode->parent.allocation_group < 0
|| fInode->parent.start > (1L << fDisk->SuperBlock()->ag_shift)
|| fInode->parent.length != 1
// attributes, too?
|| fInode->attributes.allocation_group > fDisk->SuperBlock()->num_ags
|| fInode->attributes.allocation_group < 0
|| fInode->attributes.start > (1L << fDisk->SuperBlock()->ag_shift))
return B_ERROR;
} else {
// is inode size one of the valid values?
switch (fInode->inode_size) {
case 1024:
case 2048:
case 4096:
case 8192:
break;
default:
return B_ERROR;
}
}
return B_OK;
// is inode on a boundary matching it's size?
//return (Offset() % fInode->inode_size) == 0 ? B_OK : B_ERROR;
}
status_t
Inode::CopyBuffer()
{
if (!fInode)
return B_ERROR;
bfs_inode *buffer = (bfs_inode *)malloc(fInode->inode_size);
if (!buffer)
return B_NO_MEMORY;
memcpy(buffer, fInode, fInode->inode_size);
fInode = buffer;
fOwnBuffer = true;
BufferClobbered();
// this must not be deleted anymore
return B_OK;
}
/*static*/ bool
Inode::_LowMemory()
{
static bigtime_t lastChecked;
static int32 percentUsed;
if (system_time() > lastChecked + 1000000LL) {
system_info info;
get_system_info(&info);
percentUsed = 100 * info.used_pages / info.max_pages;
}
return percentUsed > 75;
}
void
Inode::ReleaseBuffer()
{
if (atomic_add(&fRefCount, -1) != 1)
return;
if (fOwnBuffer) {
if (!_LowMemory())
return;
free(fInode);
fInode = NULL;
}
}
status_t
Inode::AcquireBuffer()
{
if (atomic_add(&fRefCount, 1) != 0)
return B_OK;
if (!fOwnBuffer || fInode != NULL)
return B_OK;
fInode = (bfs_inode*)malloc(fDisk->BlockSize());
if (fInode == NULL)
return B_NO_MEMORY;
ssize_t bytesRead = fDisk->ReadAt(Offset(), fInode, fDisk->BlockSize());
if (bytesRead < B_OK)
return bytesRead;
return B_OK;
}
void
Inode::BufferClobbered()
{
AcquireBuffer();
}
void
Inode::SetParent(const block_run& run)
{
fInode->parent = run;
BufferClobbered();
}
void
Inode::SetBlockRun(const block_run& run)
{
fInode->inode_num = run;
fBlockRun = run;
BufferClobbered();
}
void
Inode::SetMode(uint32 mode)
{
fInode->mode = mode;
BufferClobbered();
}
status_t
Inode::SetName(const char *name)
{
if (name == NULL || *name == '\0')
return B_BAD_VALUE;
small_data *data = fInode->small_data_start, *nameData = NULL;
BufferClobbered();
while (!data->IsLast(fInode)) {
if (data->type == FILE_NAME_TYPE
&& data->name_size == FILE_NAME_NAME_LENGTH
&& *data->Name() == FILE_NAME_NAME)
nameData = data;
data = data->Next();
}
int32 oldLength = nameData == NULL ? 0 : nameData->data_size;
int32 newLength = strlen(name) + (nameData == NULL ? sizeof(small_data) + 5 : 0);
if ((addr_t)data + newLength - oldLength >= (addr_t)(fInode
+ fDisk->BlockSize()))
return B_NO_MEMORY;
if (nameData == NULL) {
memmove(newLength + (uint8 *)fInode->small_data_start,
fInode->small_data_start,
(addr_t)data - (addr_t)fInode->small_data_start);
nameData = fInode->small_data_start;
} else {
memmove(newLength + (uint8 *)nameData, nameData,
(addr_t)data - (addr_t)fInode->small_data_start);
}
memset(nameData, 0, sizeof(small_data) + 5 + strlen(name));
nameData->type = FILE_NAME_TYPE;
nameData->name_size = FILE_NAME_NAME_LENGTH;
nameData->data_size = strlen(name);
*nameData->Name() = FILE_NAME_NAME;
strcpy((char *)nameData->Data(),name);
return B_OK;
}
const char *
Inode::Name() const
{
if (InitCheck() != B_OK) {
puts("Not getting name because node is invalid");
return NULL;
}
small_data *data = fInode->small_data_start;
while (!data->IsLast(fInode)) {
if (data->type == FILE_NAME_TYPE
&& data->name_size == FILE_NAME_NAME_LENGTH
&& *data->Name() == FILE_NAME_NAME)
return (const char *)data->Data();
data = data->Next();
}
return NULL;
}
status_t
Inode::GetNextSmallData(small_data **smallData)
{
if (!fInode)
return B_ERROR;
small_data *data = *smallData;
// begin from the start?
if (data == NULL)
data = fInode->small_data_start;
else
data = data->Next();
// is already last item?
if (data->IsLast(fInode))
return B_ENTRY_NOT_FOUND;
*smallData = data;
return B_OK;
}
status_t
Inode::RewindAttributes()
{
fCurrentSmallData = NULL;
if (fAttributes != NULL)
fAttributes->Rewind();
return B_OK;
}
status_t
Inode::GetNextAttribute(char *name, uint32 *type, void **data, size_t *length)
{
// read attributes out of the small data section
if (fCurrentSmallData == NULL || !fCurrentSmallData->IsLast(fInode)) {
if (fCurrentSmallData == NULL)
fCurrentSmallData = fInode->small_data_start;
else
fCurrentSmallData = fCurrentSmallData->Next();
// skip name attribute
if (!fCurrentSmallData->IsLast(fInode)
&& fCurrentSmallData->name_size == FILE_NAME_NAME_LENGTH
&& *fCurrentSmallData->Name() == FILE_NAME_NAME)
fCurrentSmallData = fCurrentSmallData->Next();
if (!fCurrentSmallData->IsLast(fInode)) {
strncpy(name,fCurrentSmallData->Name(), B_FILE_NAME_LENGTH);
*type = fCurrentSmallData->type;
*data = fCurrentSmallData->Data();
*length = fCurrentSmallData->data_size;
return B_OK;
}
}
// read attributes out of the attribute directory
if (Attributes().IsZero())
return B_ENTRY_NOT_FOUND;
if (fAttributes == NULL)
fAttributes = (Directory *)Inode::Factory(fDisk, Attributes());
status_t status = fAttributes ? fAttributes->InitCheck() : B_ERROR;
if (status < B_OK)
return status;
block_run run;
status = fAttributes->GetNextEntry(name, &run);
if (status < B_OK) {
free(fAttributeBuffer);
fAttributeBuffer = NULL;
return status;
}
Attribute *attribute = (Attribute *)Inode::Factory(fDisk, run);
if (attribute == NULL || attribute->InitCheck() < B_OK)
return B_IO_ERROR;
*type = attribute->Type();
void *buffer = realloc(fAttributeBuffer, attribute->Size());
if (buffer == NULL) {
free(fAttributeBuffer);
fAttributeBuffer = NULL;
delete attribute;
return B_NO_MEMORY;
}
fAttributeBuffer = buffer;
ssize_t size = attribute->Read(fAttributeBuffer, attribute->Size());
delete attribute;
*length = size;
*data = fAttributeBuffer;
return size < B_OK ? size : B_OK;
}
status_t
Inode::_FindPath(Inode::Source *source)
{
BString path;
block_run parent = Parent();
while (!parent.IsZero() && parent != fDisk->Root()) {
Inode *inode;
if (source)
inode = source->InodeAt(parent);
else
inode = Inode::Factory(fDisk, parent);
if (inode == NULL
|| inode->InitCheck() < B_OK
|| inode->Name() == NULL
|| !*inode->Name()) {
BString sub;
sub << "__recovered " << parent.allocation_group << ":"
<< (int32)parent.start << "/";
path.Prepend(sub);
delete inode;
break;
}
parent = inode->Parent();
path.Prepend("/");
path.Prepend(inode->Name());
delete inode;
}
fPath = strdup(path.String());
return B_OK;
}
const char *
Inode::Path(Inode::Source *source)
{
if (fPath == NULL)
_FindPath(source);
return fPath;
}
status_t
Inode::CopyTo(const char *root, bool fullPath, Inode::Source *source)
{
if (root == NULL)
return B_ENTRY_NOT_FOUND;
BString path;
if (fullPath)
path.Append(Path(source));
if (*(root + strlen(root) - 1) != '/')
path.Prepend("/");
path.Prepend(root);
return create_directory(path.String(), 0777);
}
status_t
Inode::CopyAttributesTo(BNode *node)
{
// copy attributes
RewindAttributes();
char name[B_FILE_NAME_LENGTH];
const uint32 kMaxBrokenAttributes = 64;
// sanity max value
uint32 count = 0;
uint32 type;
void *data;
size_t size;
status_t status;
while ((status = GetNextAttribute(name, &type, &data, &size))
!= B_ENTRY_NOT_FOUND) {
if (status != B_OK) {
printf("could not open attribute (possibly: %s): %s!\n",
name, strerror(status));
if (count++ > kMaxBrokenAttributes)
break;
continue;
}
ssize_t written = node->WriteAttr(name, type, 0, data, size);
if (written < B_OK) {
printf("could not write attribute \"%s\": %s\n", name,
strerror(written));
} else if ((size_t)written < size) {
printf("could only write %ld bytes (from %ld) at attribute \"%s\"\n",
written, size, name);
}
}
// copy stats
node->SetPermissions(fInode->mode);
node->SetOwner(fInode->uid);
node->SetGroup(fInode->gid);
node->SetModificationTime(fInode->last_modified_time >> 16);
node->SetCreationTime(fInode->create_time >> 16);
return B_OK;
}
Inode *
Inode::Factory(Disk *disk, bfs_inode *inode, bool ownBuffer)
{
// attributes (of a file)
if ((inode->mode & (S_ATTR | S_ATTR_DIR)) == S_ATTR)
return new Attribute(disk, inode, ownBuffer);
// directories, attribute directories, indices
if (S_ISDIR(inode->mode) || inode->mode & S_ATTR_DIR)
return new Directory(disk, inode, ownBuffer);
// regular files
if (S_ISREG(inode->mode))
return new File(disk, inode, ownBuffer);
// symlinks (short and link in data-stream)
if (S_ISLNK(inode->mode))
return new Symlink(disk, inode, ownBuffer);
return NULL;
}
Inode *
Inode::Factory(Disk *disk, block_run run)
{
bfs_inode *inode = (bfs_inode *)malloc(disk->BlockSize());
if (!inode)
return NULL;
if (disk->ReadAt(disk->ToOffset(run), inode, disk->BlockSize()) <= 0)
return NULL;
Inode *object = Factory(disk, inode);
if (object == NULL)
free(inode);
return object;
}
Inode *
Inode::Factory(Disk *disk, Inode *inode, bool copyBuffer)
{
bfs_inode *inodeBuffer = inode->fInode;
if (copyBuffer) {
bfs_inode *inodeCopy = (bfs_inode *)malloc(inodeBuffer->inode_size);
if (!inodeCopy)
return NULL;
memcpy(inodeCopy, inodeBuffer, inodeBuffer->inode_size);
inodeBuffer = inodeCopy;
}
return Factory(disk, inodeBuffer, copyBuffer);
}
Inode *
Inode::EmptyInode(Disk *disk, const char *name, int32 mode)
{
bfs_inode *inode = (bfs_inode *)malloc(disk->BlockSize());
if (!inode)
return NULL;
memset(inode, 0, sizeof(bfs_inode));
inode->magic1 = INODE_MAGIC1;
inode->inode_size = disk->BlockSize();
inode->mode = mode;
inode->flags = INODE_IN_USE | (mode & S_IFDIR ? INODE_LOGGED : 0);
if (name) {
small_data *data = inode->small_data_start;
data->type = FILE_NAME_TYPE;
data->name_size = FILE_NAME_NAME_LENGTH;
*data->Name() = FILE_NAME_NAME;
data->data_size = strlen(name);
strcpy((char *)data->Data(), name);
}
Inode *object = new (std::nothrow) Inode(disk, inode);
if (object == NULL) {
free(inode);
return NULL;
}
object->AcquireBuffer();
// this must not be deleted anymore!
return object;
}
// #pragma mark -
DataStream::DataStream(Disk *disk, bfs_inode *inode, bool ownBuffer)
: Inode(disk,inode,ownBuffer),
fCurrent(-1),
fPosition(0LL)
{
}
DataStream::DataStream(const Inode &inode)
: Inode(inode),
fCurrent(-1),
fPosition(0LL)
{
}
DataStream::~DataStream()
{
}
status_t
DataStream::FindBlockRun(off_t pos)
{
NodeGetter _(this);
if (pos > fInode->data.size)
return B_ENTRY_NOT_FOUND;
if (fCurrent < 0)
fLevel = 0;
fRunBlockEnd = fCurrent >= 0
? fRunFileOffset + (fRun.length << fDisk->BlockShift()) : 0LL;
// access in current block run?
if (fCurrent >= 0 && pos >= fRunFileOffset && pos < fRunBlockEnd)
return B_OK;
// find matching block run
if (fInode->data.max_direct_range > 0
&& pos >= fInode->data.max_direct_range) {
if (fInode->data.max_double_indirect_range > 0
&& pos >= fInode->data.max_indirect_range) {
// read from double indirect blocks
//printf("find double indirect block: %ld,%d!\n",fInode->data.double_indirect.allocation_group,fInode->data.double_indirect.start);
block_run *indirect = (block_run *)fDisk->ReadBlockRun(fInode->data.double_indirect);
if (indirect == NULL)
return B_ERROR;
off_t start = pos - fInode->data.max_indirect_range;
int32 indirectSize = fDisk->BlockSize() * 16 * (fDisk->BlockSize() / sizeof(block_run));
int32 directSize = fDisk->BlockSize() * 4;
int32 index = start / indirectSize;
//printf("\tstart = %Ld, indirectSize = %ld, directSize = %ld, index = %ld\n",start,indirectSize,directSize,index);
//printf("\tlook for indirect block at %ld,%d\n",indirect[index].allocation_group,indirect[index].start);
indirect = (block_run *)fDisk->ReadBlockRun(indirect[index]);
if (indirect == NULL)
return B_ERROR;
fCurrent = (start % indirectSize) / directSize;
fRunFileOffset = fInode->data.max_indirect_range + (index * indirectSize) + (fCurrent * directSize);
fRunBlockEnd = fRunFileOffset + directSize;
fRun = indirect[fCurrent];
//printf("\tfCurrent = %ld, fRunFileOffset = %Ld, fRunBlockEnd = %Ld, fRun = %ld,%d\n",fCurrent,fRunFileOffset,fRunBlockEnd,fRun.allocation_group,fRun.start);
} else {
// access from indirect blocks
block_run *indirect = (block_run *)fDisk->ReadBlockRun(fInode->data.indirect);
if (!indirect)
return B_ERROR;
int32 indirectRuns = (fInode->data.indirect.length << fDisk->BlockShift()) / sizeof(block_run);
if (fLevel != 1 || pos < fRunFileOffset) {
fRunBlockEnd = fInode->data.max_direct_range;
fCurrent = -1;
fLevel = 1;
}
while (++fCurrent < indirectRuns) {
if (indirect[fCurrent].IsZero())
break;
fRunFileOffset = fRunBlockEnd;
fRunBlockEnd += indirect[fCurrent].length << fDisk->BlockShift();
if (fRunBlockEnd > pos)
break;
}
if (fCurrent == indirectRuns || indirect[fCurrent].IsZero())
return B_ERROR;
fRun = indirect[fCurrent];
//printf("reading from indirect block: %ld,%d\n",fRun.allocation_group,fRun.start);
//printf("### indirect-run[%ld] = (%ld,%d,%d), offset = %Ld\n",fCurrent,fRun.allocation_group,fRun.start,fRun.length,fRunFileOffset);
}
} else {
// access from direct blocks
if (fRunFileOffset > pos) {
fRunBlockEnd = 0LL;
fCurrent = -1;
}
fLevel = 0;
while (++fCurrent < NUM_DIRECT_BLOCKS) {
if (fInode->data.direct[fCurrent].IsZero())
break;
fRunFileOffset = fRunBlockEnd;
fRunBlockEnd += fInode->data.direct[fCurrent].length << fDisk->BlockShift();
if (fRunBlockEnd > pos)
break;
}
if (fCurrent == NUM_DIRECT_BLOCKS || fInode->data.direct[fCurrent].IsZero())
return B_ERROR;
fRun = fInode->data.direct[fCurrent];
//printf("### run[%ld] = (%ld,%d,%d), offset = %Ld\n",fCurrent,fRun.allocation_group,fRun.start,fRun.length,fRunFileOffset);
}
return B_OK;
}
ssize_t
DataStream::ReadAt(off_t pos, void *buffer, size_t size)
{
NodeGetter _(this);
//printf("DataStream::ReadAt(pos = %Ld,buffer = %p,size = %ld);\n",pos,buffer,size);
// truncate size to read
if (pos + (off_t)size > fInode->data.size) {
if (pos > fInode->data.size) // reading outside the file
return B_ERROR;
size = fInode->data.size - pos;
if (!size) // there is nothing left to read
return 0;
}
ssize_t read = 0;
//printf("### read %ld bytes at %Ld\n",size,pos);
while (size > 0) {
status_t status = FindBlockRun(pos);
if (status < B_OK)
return status;
ssize_t bytes = min_c((off_t)size, fRunBlockEnd - pos);
//printf("### read %ld bytes from %Ld\n### --\n",bytes,fDisk->ToOffset(fRun) + pos - fRunFileOffset);
bytes = fDisk->ReadAt(fDisk->ToOffset(fRun) + pos - fRunFileOffset,
buffer, bytes);
if (bytes <= 0) {
if (bytes == 0) {
printf("could not read bytes at: %" B_PRId32 ",%d\n",
fRun.allocation_group, fRun.start);
}
return bytes < 0 ? bytes : B_BAD_DATA;
}
buffer = (void *)((uint8 *)buffer + bytes);
size -= bytes;
pos += bytes;
read += bytes;
}
if (read >= 0)
return read;
return B_IO_ERROR;
}
ssize_t
DataStream::WriteAt(off_t pos, const void *buffer, size_t size)
{
NodeGetter _(this);
// FIXME: truncate size -> should enlargen the file
if (pos + (off_t)size > fInode->data.size) {
if (pos > fInode->data.size) // writing outside the file
return B_ERROR;
size = fInode->data.size - pos;
if (!size) // there is nothing left to write
return 0;
}
ssize_t written = 0;
//printf("### write %ld bytes at %Ld\n",size,pos);
while (size > 0) {
status_t status = FindBlockRun(pos);
if (status < B_OK)
return status;
ssize_t bytes = min_c((off_t)size, fRunBlockEnd - pos);
//printf("### write %ld bytes to %Ld\n### --\n",bytes,fDisk->ToOffset(fRun) + pos - fRunFileOffset);
bytes = fDisk->WriteAt(fDisk->ToOffset(fRun) + pos - fRunFileOffset,buffer,bytes);
if (bytes < 0)
return bytes;
buffer = (void *)((uint8 *)buffer + bytes);
size -= bytes;
pos += bytes;
written += bytes;
}
if (written >= 0)
return written;
return B_IO_ERROR;
}
off_t
DataStream::Seek(off_t position, uint32 seekMode)
{
NodeGetter _(this);
if (seekMode == SEEK_SET)
fPosition = position;
else if (seekMode == SEEK_END)
fPosition = fInode->data.size + position;
else
fPosition += position;
return fPosition;
}
off_t
DataStream::Position() const
{
return fPosition;
}
status_t
DataStream::SetSize(off_t size)
{
NodeGetter _(this);
// FIXME: not yet supported
if (size > fInode->data.size || size > fInode->data.max_direct_range)
return B_ERROR;
if (size == fInode->data.size)
return B_OK;
BufferClobbered();
fInode->data.size = size;
fInode->data.max_direct_range = size;
fInode->data.max_indirect_range = 0;
fInode->data.max_double_indirect_range = 0;
for (int32 i = 0;i < NUM_DIRECT_BLOCKS;i++) {
if (size <= 0)
fInode->data.direct[i].SetTo(0, 0, 0);
else if ((fInode->data.direct[i].length << fDisk->BlockShift()) >= size) {
off_t blocks = (size + fDisk->BlockSize() - 1) / fDisk->BlockSize();
fInode->data.direct[i].length = blocks;
size = 0;
} else
size -= fInode->data.direct[i].length << fDisk->BlockShift();
}
return B_OK;
}
// #pragma mark -
File::File(Disk *disk, bfs_inode *inode,bool ownBuffer)
: DataStream(disk,inode,ownBuffer)
{
}
File::File(const Inode &inode)
: DataStream(inode)
{
}
File::~File()
{
}
status_t
File::InitCheck() const
{
status_t status = DataStream::InitCheck();
if (status == B_OK)
return IsFile() ? B_OK : B_ERROR;
return status;
}
status_t
File::CopyTo(const char *root, bool fullPath, Inode::Source *source)
{
status_t status = Inode::CopyTo(root, fullPath, source);
if (status < B_OK)
return status;
BPath path(root);
if (fullPath && Path(source))
path.Append(Path(source));
char *name = (char *)Name();
if (name != NULL) {
// changes the filename in the inode buffer (for deleted entries)
if (!*name)
*name = '_';
path.Append(name);
} else {
BString sub;
sub << "__untitled " << BlockRun().allocation_group << ":"
<< (int32)BlockRun().start;
path.Append(sub.String());
}
printf("%" B_PRId32 ",%d -> %s\n", BlockRun().allocation_group,
BlockRun().start, path.Path());
BFile file;
status = file.SetTo(path.Path(),
B_WRITE_ONLY | B_CREATE_FILE | B_FAIL_IF_EXISTS);
if (status < B_OK)
return status;
char buffer[fDisk->BlockSize()];
ssize_t size;
Seek(0, SEEK_SET);
while ((size = Read(buffer, sizeof(buffer))) > B_OK) {
ssize_t written = file.Write(buffer, size);
if (written < B_OK)
return written;
}
return CopyAttributesTo(&file);
}
// #pragma mark -
Attribute::Attribute(Disk *disk, bfs_inode *inode, bool ownBuffer)
: File(disk, inode, ownBuffer)
{
}
Attribute::Attribute(const Inode &inode)
: File(inode)
{
}
Attribute::~Attribute()
{
}
status_t
Attribute::InitCheck() const
{
status_t status = DataStream::InitCheck();
if (status == B_OK)
return IsAttribute() ? B_OK : B_ERROR;
return status;
}
status_t
Attribute::CopyTo(const char */*path*/, bool /*fullPath*/,
Inode::Source */*source*/)
{
// files and directories already copy all attributes
// eventually, this method should be implemented to recover lost
// attributes on the disk
return B_OK;
}
// #pragma mark -
Directory::Directory(Disk *disk, bfs_inode *inode, bool ownBuffer)
: DataStream(disk, inode, ownBuffer),
fTree(NULL)
{
}
Directory::Directory(const Inode &inode)
: DataStream(inode),
fTree(NULL)
{
}
Directory::~Directory()
{
delete fTree;
}
status_t
Directory::InitCheck() const
{
status_t status = DataStream::InitCheck();
if (status == B_OK)
return (IsDirectory() || IsAttributeDirectory()) ? B_OK : B_ERROR;
return status;
}
status_t
Directory::CopyTo(const char *root, bool fullPath, Inode::Source *source)
{
// don't copy attributes or indices
// the recovery program should make empty files to recover lost attributes
if (IsAttributeDirectory() || IsIndex())
return B_OK;
status_t status = Inode::CopyTo(root, fullPath, source);
if (status < B_OK)
return status;
BPath path(root);
if (fullPath && Path(source))
path.Append(Path(source));
char *name = (char *)Name();
if (name != NULL) {
// changes the filename in the inode buffer (for deleted entries)
if (!*name)
*name = '_';
path.Append(name);
} else {
// create unique name
BString sub;
sub << "__untitled " << BlockRun().allocation_group << ":"
<< (int32)BlockRun().start;
path.Append(sub.String());
}
BEntry entry(path.Path());
BDirectory directory;
if ((status = entry.GetParent(&directory)) < B_OK)
return status;
status = directory.CreateDirectory(path.Leaf(), NULL);
if (status < B_OK && status != B_FILE_EXISTS)
return status;
if ((status = directory.SetTo(&entry)) < B_OK)
return status;
return CopyAttributesTo(&directory);
}
status_t
Directory::Rewind()
{
if (!fTree) {
status_t status = CreateTree();
if (status < B_OK)
return status;
}
return fTree->Rewind();
}
status_t
Directory::GetNextEntry(char *name, block_run *run)
{
status_t status;
if (!fTree) {
if ((status = Rewind()) < B_OK)
return status;
}
uint16 length;
off_t offset;
if ((status = fTree->GetNextEntry(name, &length, B_FILE_NAME_LENGTH - 1,
&offset)) < B_OK)
return status;
*run = fDisk->ToBlockRun(offset);
return B_OK;
}
status_t
Directory::GetNextEntry(block_run *run)
{
char name[B_FILE_NAME_LENGTH];
return GetNextEntry(name, run);
}
status_t
Directory::Contains(const block_run *run)
{
status_t status;
if (!fTree) {
if ((status = Rewind()) < B_OK)
return status;
}
block_run searchRun;
while (GetNextEntry(&searchRun) == B_OK) {
if (searchRun == *run)
return B_OK;
}
return B_ENTRY_NOT_FOUND;
}
status_t
Directory::Contains(const Inode *inode)
{
status_t status;
if (!fTree) {
if ((status = CreateTree()) < B_OK)
return status;
}
off_t value;
const char *name = inode->Name();
status = B_ENTRY_NOT_FOUND;
if (name && (status = fTree->Find((uint8 *)name, (uint16)strlen(name),
&value)) == B_OK) {
if (fDisk->ToBlockRun(value) == inode->InodeBuffer()->inode_num)
return B_OK;
printf("inode address do not match (%s)!\n", inode->Name());
}
if (status != B_OK && status != B_ENTRY_NOT_FOUND)
return status;
return Contains(&inode->InodeBuffer()->inode_num);
}
status_t
Directory::FindEntry(const char *name, block_run *run)
{
status_t status;
if (!name)
return B_BAD_VALUE;
if (!fTree) {
if ((status = CreateTree()) < B_OK)
return status;
}
off_t value;
if ((status = fTree->Find((uint8 *)name, (uint16)strlen(name),
&value)) >= B_OK) {
if (run)
*run = fDisk->ToBlockRun(value);
return B_OK;
}
return status;
}
status_t
Directory::AddEntry(Inode *inode)
{
status_t status;
bool created = false;
if (!fTree) {
status = CreateTree();
if (status == B_OK)
status = fTree->Validate();
if (status == B_BAD_DATA) {
//puts("bplustree corrupted!");
fTree = new BPlusTree(BPLUSTREE_STRING_TYPE, BPLUSTREE_NODE_SIZE,
false);
if ((status = fTree->InitCheck()) < B_OK) {
delete fTree;
fTree = NULL;
} else
created = true;
}
if (status < B_OK)
return status;
}
// keep all changes in memory
fTree->SetHoldChanges(true);
if (created) {
// add . and ..
fTree->Insert(".", Block());
fTree->Insert("..", fDisk->ToBlock(Parent()));
}
if (inode->Flags() & INODE_DELETED)
return B_ENTRY_NOT_FOUND;
BString name = inode->Name();
if (name == "") {
name << "__file " << inode->BlockRun().allocation_group << ":"
<< (int32)inode->BlockRun().start;
}
return fTree->Insert(name.String(), inode->Block());
}
status_t
Directory::CreateTree()
{
fTree = new BPlusTree(this);
status_t status = fTree->InitCheck();
if (status < B_OK) {
delete fTree;
fTree = NULL;
return status;
}
return B_OK;
}
status_t
Directory::GetTree(BPlusTree **tree)
{
if (!fTree) {
status_t status = CreateTree();
if (status < B_OK)
return status;
}
*tree = fTree;
return B_OK;
}
// #pragma mark -
Symlink::Symlink(Disk *disk, bfs_inode *inode,bool ownBuffer)
: Inode(disk,inode,ownBuffer)
{
}
Symlink::Symlink(const Inode &inode)
: Inode(inode)
{
}
Symlink::~Symlink()
{
}
status_t
Symlink::InitCheck() const
{
status_t status = Inode::InitCheck();
if (status == B_OK)
return IsSymlink() ? B_OK : B_ERROR;
return status;
}
status_t
Symlink::CopyTo(const char *root, bool fullPath,Inode::Source *source)
{
status_t status = Inode::CopyTo(root,fullPath,source);
if (status < B_OK)
return status;
BPath path(root);
if (fullPath && Path(source))
path.Append(Path(source));
char *name = (char *)Name();
if (name != NULL) {
// changes the filename in the inode buffer (for deleted entries)
if (!*name)
*name = '_';
path.Append(name);
} else {
// create unique name
BString sub;
sub << "__symlink " << BlockRun().allocation_group << ":"
<< (int32)BlockRun().start;
path.Append(sub.String());
}
BEntry entry(path.Path());
BDirectory directory;
if ((status = entry.GetParent(&directory)) < B_OK)
return status;
char to[2048];
if (LinksTo(to,sizeof(to)) < B_OK)
return B_ERROR;
BSymLink link;
status = directory.CreateSymLink(path.Leaf(),to,&link);
if (status < B_OK && status != B_FILE_EXISTS)
return status;
if ((status = link.SetTo(&entry)) < B_OK)
return status;
return CopyAttributesTo(&link);
}
status_t
Symlink::LinksTo(char *to,size_t maxLength)
{
if ((fInode->flags & INODE_LONG_SYMLINK) == 0) {
strcpy(to,fInode->short_symlink);
return B_OK;
}
DataStream stream(*this);
status_t status = stream.InitCheck();
if (status < B_OK)
return status;
status = stream.Read(to,maxLength);
return status < B_OK ? status : B_OK;
}
| 11,365 |
14,668 | <gh_stars>1000+
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_SCREENSHOT_SCREENSHOT_DELEGATE_H_
#define IOS_CHROME_BROWSER_SCREENSHOT_SCREENSHOT_DELEGATE_H_
#import <UIKit/UIKit.h>
@class ScreenshotDelegate;
// TODO(crbug.com/1045560): Refactor this class to not use
// BrowserInterfaceProvider when possible.
@protocol BrowserInterfaceProvider;
// ScreenshotDelegate provides methods for UIScreenshotServiceDelegate to create
// PDF content of the captured window scene.
@interface ScreenshotDelegate : NSObject <UIScreenshotServiceDelegate>
// Init the ScreenshotDelegate and set the |browserInterfaceProvider| to
// generate PDF screenshots from.
- (instancetype)initWithBrowserInterfaceProvider:
(id<BrowserInterfaceProvider>)browserInterfaceProvider
NS_DESIGNATED_INITIALIZER;
- (instancetype)init NS_UNAVAILABLE;
@end
#endif // IOS_CHROME_BROWSER_SCREENSHOT_SCREENSHOT_DELEGATE_H_
| 349 |
536 | <reponame>Han0nly/jazzer<filename>bazel/kotlin.bzl
# Copyright 2021 Code Intelligence GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@io_bazel_rules_kotlin//kotlin:jvm.bzl", "kt_jvm_test")
# A kt_jvm_test wrapped in a java_test for Windows compatibility.
# Workaround for https://github.com/bazelbuild/rules_kotlin/issues/599: rules_kotlin can only create
# a shell wrapper script for Java targets, no native executable as is required on Windows.
def wrapped_kt_jvm_test(
name,
test_class,
size = None,
tags = None,
timeout = None,
visibility = None,
**kt_jvm_test_args):
kt_jvm_test_name = name + "_kt_"
# Modify a copy of the tags.
kt_jvm_test_tags = list(tags) if tags != None else []
kt_jvm_test_tags.append("manual")
kt_jvm_test(
name = kt_jvm_test_name,
test_class = test_class,
visibility = ["//visibility:private"],
tags = kt_jvm_test_tags,
**kt_jvm_test_args
)
native.java_test(
name = name,
size = size,
tags = tags,
test_class = test_class,
timeout = timeout,
visibility = visibility,
runtime_deps = [
":" + kt_jvm_test_name,
],
)
| 706 |
1,157 | <filename>server/modules/protocol/MariaDB/test/test_parse_kill.cc
/*
* Copyright (c) 2019 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2025-10-11
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/protocol/mariadb/client_connection.hh>
#include <inttypes.h>
#include <string>
#include <maxbase/alloc.h>
#include <maxbase/format.hh>
using kill_type_t = MariaDBClientConnection::kill_type_t;
constexpr auto KT_HARD = MariaDBClientConnection::KT_HARD;
constexpr auto KT_SOFT = MariaDBClientConnection::KT_SOFT;
constexpr auto KT_CONNECTION = MariaDBClientConnection::KT_CONNECTION;
constexpr auto KT_QUERY = MariaDBClientConnection::KT_QUERY;
using Type = MariaDBClientConnection::SpecialQueryDesc::Type;
struct test_t
{
std::string query;
Type type {Type::NONE};
uint64_t correct_id {0};
uint32_t correct_kt {0};
std::string correct_target;
};
int test_one_query(const test_t& test)
{
auto& sql = test.query;
auto len = sql.length();
auto query_desc = MariaDBClientConnection::parse_special_query(sql.c_str(), len);
auto found_type = query_desc.type;
auto found_kt = query_desc.kill_options;
auto found_id = query_desc.kill_id;
auto& found_target = query_desc.target;
std::string errmsg;
if (found_type != test.type)
{
errmsg = mxb::string_printf("Expected type '%i', got '%i'", (int)test.type, (int)found_type);
}
else if (found_kt != test.correct_kt)
{
errmsg = mxb::string_printf("Expected kill type '%u', got '%u'", test.correct_kt, found_kt);
}
else if (found_id != test.correct_id)
{
errmsg = mxb::string_printf("Expected thread id '%lu', got '%lu'", test.correct_id, found_id);
}
else if (found_target != test.correct_target)
{
errmsg = mxb::string_printf("Expected target '%s', got '%s'",
test.correct_target.c_str(), found_target.c_str());
}
if (errmsg.empty())
{
return 0;
}
else
{
printf("Result wrong on query: '%s': %s.\n", sql.c_str(), errmsg.c_str());
return 1;
}
}
int main(int argc, char** argv)
{
mxs_log_init(NULL, ".", MXS_LOG_TARGET_STDOUT);
MariaDBClientConnection::module_init();
/*
* The second column is true for cases where the query matches the regex, but reading the id fails due
* to overflow. In these cases the third col is 0, as the parser returns that by default. 0 is not a
* valid connection id.
*/
auto KILL = Type::KILL;
auto NONE = Type::NONE;
auto ROLE = Type::SET_ROLE;
auto DB = Type::USE_DB;
test_t tests[] =
{
{" kill ConNectioN 123 ", KILL, 123, KT_CONNECTION },
{"kIlL coNNectioN 987654321 ;", KILL, 987654321, KT_CONNECTION },
{" Ki5L CoNNectioN 987654321 ", NONE, },
{"1", NONE, },
{"kILL 1 ;", KILL, 1, },
{"\n\t kill \nQueRy 456", KILL, 456, KT_QUERY },
{" A kill 1; ", NONE, },
{" kill connection 1A", NONE, },
{" kill connection 1 A ", NONE, },
{"kill query 7 ; select * ", KILL, 7, KT_QUERY },
// 64-bit integer overflow
{"KIll query 123456789012345678901", KILL, 0, KT_QUERY },
{"KIll query \t \t 21 \n \t ", KILL, 21, KT_QUERY },
{"KIll \t \n \t -6 \n \t ", NONE, },
{"KIll 12345678901234567890123456 \n \t", KILL, },
{"kill ;", NONE, 0, },
{" kill ConNectioN 123 HARD", NONE, 0},
{" kill ConNectioN SOFT 123", NONE, 0, },
{"/* \ncomment1\ncomment2*/ kill HARD ConNectioN 123",
KILL, 123, KT_CONNECTION | KT_HARD},
{"/*** star* *comm///*EnT ****/ \n--linecomment\n /***/kill 123",
KILL, 123, },
{"#line-comment\nkill SOFT ConNectioN 123",
KILL, 123, KT_CONNECTION | KT_SOFT},
{"--line comment USE test;\n #set role my_role\n kill HARD 123",
KILL, 123, KT_HARD },
{" kill SOFT 123", KILL, 123, KT_SOFT },
{"KIll soft query 21 ", KILL, 21, KT_QUERY | KT_SOFT },
{"KIll query soft 21 ", NONE, },
{"KIll query user maxuser ", KILL, 0, KT_QUERY, "maxuser" },
{"KIll user ", NONE, },
{" #line-comment\n KILL 2 /* ab */ ", KILL, 2},
{"KILL 42 \n --ab ", KILL, 42},
{"use ;", NONE, 0, 0, },
{"use db1;", DB, 0, 0, "db1" },
{" SET ASDF;", NONE, 0, 0, },
{"/** comment */ seT RolE my_role ;", ROLE, 0, 0, "my_role" },
};
int result = 0;
for (auto& elem : tests)
{
result += test_one_query(elem);
}
return result;
}
| 3,014 |
313 | """
Module used to produce generalized sql out of given query
"""
import re
import sqlparse
class Generalizator:
"""
Class used to produce generalized sql out of given query
"""
def __init__(self, sql: str = ""):
self._raw_query = sql
# SQL queries normalization (#16)
@staticmethod
def _normalize_likes(sql: str) -> str:
"""
Normalize and wrap LIKE statements
:type sql str
:rtype: str
"""
sql = sql.replace("%", "")
# LIKE '%bot'
sql = re.sub(r"LIKE '[^\']+'", "LIKE X", sql)
# or all_groups LIKE X or all_groups LIKE X
matches = re.finditer(r"(or|and) [^\s]+ LIKE X", sql, flags=re.IGNORECASE)
matches = [match.group(0) for match in matches] if matches else None
if matches:
for match in set(matches):
sql = re.sub(
r"(\s?" + re.escape(match) + ")+", " " + match + " ...", sql
)
return sql
@property
def without_comments(self) -> str:
"""
Removes comments from SQL query
:rtype: str
"""
sql = sqlparse.format(self._raw_query, strip_comments=True)
sql = re.sub(r"\s{2,}", " ", sql)
return sql
@property
def generalize(self) -> str:
"""
Removes most variables from an SQL query
and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
"""
if self._raw_query == "":
return ""
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = self.without_comments
sql = sql.replace('"', "")
# multiple spaces
sql = re.sub(r"\s{2,}", " ", sql)
# handle LIKE statements
sql = self._normalize_likes(sql)
sql = re.sub(r"\\\\", "", sql)
sql = re.sub(r"\\'", "", sql)
sql = re.sub(r'\\"', "", sql)
sql = re.sub(r"'[^\']*'", "X", sql)
sql = re.sub(r'"[^\"]*"', "X", sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r"\s+", " ", sql)
# All numbers => N
sql = re.sub(r"-?[0-9]+", "N", sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(
r" (IN|VALUES)\s*\([^,]+,[^)]+\)", " \\1 (XYZ)", sql, flags=re.IGNORECASE
)
return sql.strip()
| 1,172 |
623 | // Copyright (C) 2020 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.patch;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static java.util.stream.Collectors.toList;
import com.google.common.collect.ImmutableSet;
import com.google.gerrit.entities.Patch;
import com.google.gerrit.server.patch.GitPositionTransformer.FileMapping;
import com.google.gerrit.server.patch.GitPositionTransformer.Mapping;
import com.google.gerrit.server.patch.GitPositionTransformer.Range;
import com.google.gerrit.server.patch.GitPositionTransformer.RangeMapping;
import com.google.gerrit.server.patch.filediff.Edit;
import com.google.gerrit.server.patch.filediff.FileEdits;
import java.util.List;
/** Mappings derived from diffs. */
public class DiffMappings {
private DiffMappings() {}
public static Mapping toMapping(PatchListEntry patchListEntry) {
FileMapping fileMapping = toFileMapping(patchListEntry);
ImmutableSet<RangeMapping> rangeMappings = toRangeMappings(patchListEntry);
return Mapping.create(fileMapping, rangeMappings);
}
public static Mapping toMapping(FileEdits fileEdits) {
FileMapping fileMapping = FileMapping.forFile(fileEdits.oldPath(), fileEdits.newPath());
ImmutableSet<RangeMapping> rangeMappings = toRangeMappings(fileEdits.edits());
return Mapping.create(fileMapping, rangeMappings);
}
private static FileMapping toFileMapping(PatchListEntry ple) {
return toFileMapping(ple.getChangeType(), ple.getOldName(), ple.getNewName());
}
private static FileMapping toFileMapping(
Patch.ChangeType changeType, String oldName, String newName) {
switch (changeType) {
case ADDED:
return FileMapping.forAddedFile(newName);
case MODIFIED:
case REWRITE:
return FileMapping.forModifiedFile(newName);
case DELETED:
// Name of deleted file is mentioned as newName.
return FileMapping.forDeletedFile(newName);
case RENAMED:
case COPIED:
return FileMapping.forRenamedFile(oldName, newName);
default:
throw new IllegalStateException("Unmapped diff type: " + changeType);
}
}
private static ImmutableSet<RangeMapping> toRangeMappings(PatchListEntry patchListEntry) {
return toRangeMappings(
patchListEntry.getEdits().stream().map(Edit::fromJGitEdit).collect(toList()));
}
private static ImmutableSet<RangeMapping> toRangeMappings(List<Edit> edits) {
return edits.stream()
.map(
edit ->
RangeMapping.create(
Range.create(edit.beginA(), edit.endA()),
Range.create(edit.beginB(), edit.endB())))
.collect(toImmutableSet());
}
}
| 1,121 |
711 | <filename>java110-core/src/main/java/com/java110/core/context/AbstractOrderDataFlowContext.java
package com.java110.core.context;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.java110.entity.center.Business;
import com.java110.entity.center.DataFlowLinksCost;
import com.java110.entity.center.DataFlowLog;
import org.springframework.http.ResponseEntity;
import java.util.*;
/**
* 数据流上下文
* Created by wuxw on 2018/5/18.
*/
public abstract class AbstractOrderDataFlowContext extends AbstractDataFlowContextPlus implements IOrderDataFlowContext{
protected AbstractOrderDataFlowContext(){}
protected AbstractOrderDataFlowContext(Date startDate, String code){}
/**
* 构建 对象信息
* @param reqInfo
* @param headerAll
* @return
* @throws Exception
*/
public <T> T builder(String reqInfo, Map<String,String> headerAll) throws Exception{
//预处理
preBuilder(reqInfo, headerAll);
//调用builder
T dataFlowContext = (T)doBuilder(reqInfo, headerAll);
//后处理
afterBuilder((IOrderDataFlowContext) dataFlowContext);
return dataFlowContext;
}
/**
* 预处理
* @param reqInfo
* @param headerAll
*/
protected void preBuilder(String reqInfo, Map<String,String> headerAll) {
}
/**
* 构建对象
* @param reqInfo
* @param headerAll
* @return
* @throws Exception
*/
public abstract IOrderDataFlowContext doBuilder(String reqInfo, Map<String,String> headerAll) throws Exception;
protected void afterBuilder(IOrderDataFlowContext dataFlowContext){
}
}
| 642 |
3,212 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.util;
import org.apache.commons.lang3.StringUtils;
import javax.servlet.http.HttpServletRequest;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
/**
* Request URI Builder encapsulates URI construction handling supported HTTP proxy request headers
*/
public class RequestUriBuilder {
private final String scheme;
private final String host;
private final int port;
private final String contextPath;
private String path;
private RequestUriBuilder(final String scheme, final String host, final int port, final String contextPath) {
this.scheme = scheme;
this.host = host;
this.port = port;
this.contextPath = contextPath;
}
/**
* Return Builder from HTTP Servlet Request using Scheme, Host, Port, and Context Path reading from headers
*
* @param httpServletRequest HTTP Servlet Request
* @param allowedContextPaths Comma-separated string of allowed context path values for proxy headers
* @return Request URI Builder
*/
public static RequestUriBuilder fromHttpServletRequest(final HttpServletRequest httpServletRequest, final List<String> allowedContextPaths) {
final String scheme = StringUtils.defaultIfEmpty(WebUtils.determineProxiedScheme(httpServletRequest), httpServletRequest.getScheme());
final String host = WebUtils.determineProxiedHost(httpServletRequest);
final int port = WebUtils.getServerPort(httpServletRequest);
final String contextPath = WebUtils.determineContextPath(httpServletRequest);
WebUtils.verifyContextPath(allowedContextPaths, contextPath);
return new RequestUriBuilder(scheme, host, port, contextPath);
}
/**
* Set Path appended to Context Path on build
*
* @param path Path may be null
* @return Request URI Builder
*/
public RequestUriBuilder path(final String path) {
this.path = path;
return this;
}
/**
* Build URI using configured properties
*
* @return URI
* @throws IllegalArgumentException Thrown on URI syntax exceptions
*/
public URI build() {
final String resourcePath = StringUtils.join(contextPath, path);
try {
return new URI(scheme, null, host, port, resourcePath, null, null);
} catch (final URISyntaxException e) {
throw new IllegalArgumentException("Build URI Failed", e);
}
}
}
| 1,044 |
1,584 | <filename>libjulius/src/version.c<gh_stars>1000+
/**
* @file version.c
*
* <JA>
* @brief バージョンおよびコンパイル時設定の出力
*
* </JA>
*
* <EN>
* @brief Output version and compilation-time configuration.
*
* </EN>
*
* @author <NAME>
* @date Mon Sep 12 01:34:15 2005
*
* $Revision: 1.12 $
*
*/
/*
* Copyright (c) 1991-2016 Kawahara Lab., Kyoto University
* Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology
* Copyright (c) 2005-2016 Julius project team, Nagoya Institute of Technology
* All rights reserved
*/
#include <julius/julius.h>
/**
* <JA>
* ヘッダを出力する.
*
* @param strm [in] 出力ストリーム
* </JA>
* <EN>
* Output application header.
*
* @param strm [in] output stream
* </EN>
*/
void
j_put_header(FILE *strm){
if (strm == NULL) return;
fprintf(strm,"%s rev.%s (%s)\n\n", JULIUS_PRODUCTNAME, JULIUS_VERSION, JULIUS_SETUP);
}
/**
* <JA>
* バージョン情報を出力する
*
* @param strm [in] 出力ストリーム
* </JA>
* <EN>
* Output version information.
*
* @param strm [in] output stream
* </EN>
*/
void
j_put_version(FILE *strm){
if (strm == NULL) return;
fprintf(strm,"\n%s rev.%s (%s) built for %s\n\n",
JULIUS_PRODUCTNAME, JULIUS_VERSION, JULIUS_SETUP, JULIUS_HOSTINFO);
fprintf(strm,"Copyright (c) 1991-2020 Kawahara Lab., Kyoto University\n");
fprintf(strm,"Copyright (c) 1997-2000 Information-technology Promotion Agency, Japan\n");
fprintf(strm,"Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology\n");
fprintf(strm,"Copyright (c) 2005-2020 Julius project team, Nagoya Institute of Technology\n\n");
}
/**
* <JA>
* コンパイル時の設定を出力する.
*
* @param strm [in] 入力ストリーム
* </JA>
* <EN>
* Output compile-time settings.
*
* @param strm [in] input stream
* </EN>
*/
void
j_put_compile_defs(FILE *strm){
if (strm == NULL) return;
fprintf(strm,"Engine specification:\n");
fprintf(strm," - Base setup : %s\n", JULIUS_SETUP);
fprintf(strm," - Supported LM : DFA, N-gram, Word\n");
fprintf(strm," - Extension :");
#ifndef UNIGRAM_FACTORING
fprintf(strm, " 2gramFactoring");
#endif
# ifdef GRAPHOUT_DYNAMIC
# ifdef GRAPHOUT_SEARCH
/* this is default */
//fprintf(strm, " GraphOutSearch");
# else
fprintf(strm, " GraphOutNonSearchTermination");
# endif
# else
fprintf(strm, " GraphOutFromNBest");
# endif
# ifndef GRAPHOUT_PRECISE_BOUNDARY
fprintf(strm, " DisableGraphOutPostFitting");
# endif
#ifdef CM_SEARCH_LIMIT
# ifdef CM_SEARCH_LIMIT_AFTER
fprintf(strm, " CMPruning_OnlyAfterReached");
# else
fprintf(strm, " CMPruning");
# endif
# ifdef CM_SEARCH_LIMIT_POP
fprintf(strm, " CMPruningOnPOP");
# endif
#endif
# ifndef LM_FIX_DOUBLE_SCORING
fprintf(strm, " NoLMFix");
# endif
# ifndef CLASS_NGRAM
fprintf(strm, " NoClassNGram");
# endif
#ifdef WORDS_INT
fprintf(strm, " WordsInt");
#endif
# ifdef LOWMEM
fprintf(strm, " SingleTree");
# else
# ifdef LOWMEM2
/* fprintf(strm, " HiFreqLinearTree");*/
# else
fprintf(strm, " ShortWordTree");
# endif
# endif
# ifndef CATEGORY_TREE
//fprintf(strm, " NoCategoryTree");
# endif
#ifdef MONOTREE
fprintf(strm, " MonoTree1");
#endif
#ifndef SCAN_BEAM
fprintf(strm, " NoScoreEnvelope");
#endif
#ifndef PASS1_IWCD
fprintf(strm, " NoIWCD1");
#endif
#ifdef PASS2_STRICT_IWCD
fprintf(strm, " StrictIWCD2");
#endif
#ifdef WPAIR
# ifdef WPAIR_KEEP_NLIMIT
fprintf(strm, " WordPairNApprox");
# else
fprintf(strm, " WordPairApprox");
# endif
#endif
#ifdef WORD_GRAPH
fprintf(strm, " 1stPassWordGraph");
#endif
#ifndef CONFIDENCE_MEASURE
fprintf(strm, " NoCM");
#else
# ifdef CM_NBEST
fprintf(strm, " N-bestCM");
# endif
# ifdef CM_MULTIPLE_ALPHA
fprintf(strm, " MultiCMOutput");
# endif
#endif /* CONFIDENCE_MEASURE */
#ifndef USE_MIC
fprintf(strm, " NoMic");
#endif
#ifdef USE_NETAUDIO
fprintf(strm, " NetAudio");
#endif
#ifndef HAVE_PTHREAD
fprintf(strm, " NoPThread");
#endif
#ifdef HAVE_LIBSNDFILE
fprintf(strm, " LibSndFile");
#endif
#ifdef VISUALIZE
fprintf(strm, " Visualize");
#endif
#ifdef FORK_ADINNET
fprintf(strm, " ForkOnAdinnet");
#endif
#ifndef MFCC_SINCOS_TABLE
fprintf(strm, " DisableMFCCTable");
#endif
#ifndef LM_FIX_DOUBLE_SCORING
fprintf(strm, " DisableLMFix3.4");
#endif
#ifdef USE_LIBJCODE
fprintf(strm, " Libjcode");
#endif
#ifdef HAVE_ICONV
fprintf(strm, " IconvOutput");
#endif
#ifdef GMM_VAD
fprintf(strm, " GMMVAD");
#endif
#ifdef SPSEGMENT_NAIST
fprintf(strm, " DecoderVAD");
#endif
#ifdef POWER_REJECT
fprintf(strm, " PowerReject");
#endif
#ifndef USE_MBR
fprintf(strm," NoMBR");
#endif
fprintf(strm, "\n");
fprintf(strm," - Compiled by : %s\n", JULIUS_BUILD_INFO);
}
/**
* <JA>
* ライブラリの設定を出力する
*
* @param strm [in] 出力ストリーム
* </JA>
* <EN>
* Output library configuration.
*
* @param strm [in] output stream
* </EN>
*/
void
j_put_library_defs(FILE *strm) {
if (strm == NULL) return;
fprintf(strm, "Library configuration: ");
confout(strm);
fprintf(strm, "\n");
}
/* end of file */
| 2,204 |
2,151 | <reponame>bimawa/boringssl<gh_stars>1000+
/* Copyright (c) 2017, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include <openssl/ssl.h>
#include <assert.h>
#include <openssl/bytestring.h>
#include <openssl/err.h>
#include "internal.h"
#include "../crypto/internal.h"
BSSL_NAMESPACE_BEGIN
bool ssl_protocol_version_from_wire(uint16_t *out, uint16_t version) {
switch (version) {
case TLS1_VERSION:
case TLS1_1_VERSION:
case TLS1_2_VERSION:
case TLS1_3_VERSION:
*out = version;
return true;
case TLS1_3_DRAFT23_VERSION:
case TLS1_3_DRAFT28_VERSION:
*out = TLS1_3_VERSION;
return true;
case DTLS1_VERSION:
// DTLS 1.0 is analogous to TLS 1.1, not TLS 1.0.
*out = TLS1_1_VERSION;
return true;
case DTLS1_2_VERSION:
*out = TLS1_2_VERSION;
return true;
default:
return false;
}
}
// The follow arrays are the supported versions for TLS and DTLS, in order of
// decreasing preference.
static const uint16_t kTLSVersions[] = {
TLS1_3_VERSION,
TLS1_3_DRAFT28_VERSION,
TLS1_3_DRAFT23_VERSION,
TLS1_2_VERSION,
TLS1_1_VERSION,
TLS1_VERSION,
};
static const uint16_t kDTLSVersions[] = {
DTLS1_2_VERSION,
DTLS1_VERSION,
};
static void get_method_versions(const SSL_PROTOCOL_METHOD *method,
const uint16_t **out, size_t *out_num) {
if (method->is_dtls) {
*out = kDTLSVersions;
*out_num = OPENSSL_ARRAY_SIZE(kDTLSVersions);
} else {
*out = kTLSVersions;
*out_num = OPENSSL_ARRAY_SIZE(kTLSVersions);
}
}
bool ssl_method_supports_version(const SSL_PROTOCOL_METHOD *method,
uint16_t version) {
const uint16_t *versions;
size_t num_versions;
get_method_versions(method, &versions, &num_versions);
for (size_t i = 0; i < num_versions; i++) {
if (versions[i] == version) {
return true;
}
}
return false;
}
// The following functions map between API versions and wire versions. The
// public API works on wire versions, except that TLS 1.3 draft versions all
// appear as TLS 1.3. This will get collapsed back down when TLS 1.3 is
// finalized.
static const char *ssl_version_to_string(uint16_t version) {
switch (version) {
case TLS1_3_DRAFT23_VERSION:
case TLS1_3_DRAFT28_VERSION:
case TLS1_3_VERSION:
return "TLSv1.3";
case TLS1_2_VERSION:
return "TLSv1.2";
case TLS1_1_VERSION:
return "TLSv1.1";
case TLS1_VERSION:
return "TLSv1";
case DTLS1_VERSION:
return "DTLSv1";
case DTLS1_2_VERSION:
return "DTLSv1.2";
default:
return "unknown";
}
}
static uint16_t wire_version_to_api(uint16_t version) {
switch (version) {
// Report TLS 1.3 draft versions as TLS 1.3 in the public API.
case TLS1_3_DRAFT23_VERSION:
case TLS1_3_DRAFT28_VERSION:
case TLS1_3_VERSION:
return TLS1_3_VERSION;
default:
return version;
}
}
// api_version_to_wire maps |version| to some representative wire version. In
// particular, it picks an arbitrary TLS 1.3 representative. This should only be
// used in context where that does not matter.
static bool api_version_to_wire(uint16_t *out, uint16_t version) {
if (version == TLS1_3_DRAFT23_VERSION ||
version == TLS1_3_DRAFT28_VERSION) {
return false;
}
// Check it is a real protocol version.
uint16_t unused;
if (!ssl_protocol_version_from_wire(&unused, version)) {
return false;
}
*out = version;
return true;
}
static bool set_version_bound(const SSL_PROTOCOL_METHOD *method, uint16_t *out,
uint16_t version) {
if (!api_version_to_wire(&version, version) ||
!ssl_method_supports_version(method, version) ||
!ssl_protocol_version_from_wire(out, version)) {
OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_SSL_VERSION);
return false;
}
return true;
}
static bool set_min_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out,
uint16_t version) {
// Zero is interpreted as the default minimum version.
if (version == 0) {
// TLS 1.0 does not exist in DTLS.
*out = method->is_dtls ? TLS1_1_VERSION : TLS1_VERSION;
return true;
}
return set_version_bound(method, out, version);
}
static bool set_max_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out,
uint16_t version) {
// Zero is interpreted as the default maximum version.
if (version == 0) {
*out = TLS1_2_VERSION;
return true;
}
return set_version_bound(method, out, version);
}
const struct {
uint16_t version;
uint32_t flag;
} kProtocolVersions[] = {
{TLS1_VERSION, SSL_OP_NO_TLSv1},
{TLS1_1_VERSION, SSL_OP_NO_TLSv1_1},
{TLS1_2_VERSION, SSL_OP_NO_TLSv1_2},
{TLS1_3_VERSION, SSL_OP_NO_TLSv1_3},
};
bool ssl_get_version_range(const SSL_HANDSHAKE *hs, uint16_t *out_min_version,
uint16_t *out_max_version) {
// For historical reasons, |SSL_OP_NO_DTLSv1| aliases |SSL_OP_NO_TLSv1|, but
// DTLS 1.0 should be mapped to TLS 1.1.
uint32_t options = hs->ssl->options;
if (SSL_is_dtls(hs->ssl)) {
options &= ~SSL_OP_NO_TLSv1_1;
if (options & SSL_OP_NO_DTLSv1) {
options |= SSL_OP_NO_TLSv1_1;
}
}
uint16_t min_version = hs->config->conf_min_version;
uint16_t max_version = hs->config->conf_max_version;
// QUIC requires TLS 1.3.
if (hs->ssl->ctx->quic_method && min_version < TLS1_3_VERSION) {
min_version = TLS1_3_VERSION;
}
// OpenSSL's API for controlling versions entails blacklisting individual
// protocols. This has two problems. First, on the client, the protocol can
// only express a contiguous range of versions. Second, a library consumer
// trying to set a maximum version cannot disable protocol versions that get
// added in a future version of the library.
//
// To account for both of these, OpenSSL interprets the client-side bitmask
// as a min/max range by picking the lowest contiguous non-empty range of
// enabled protocols. Note that this means it is impossible to set a maximum
// version of the higest supported TLS version in a future-proof way.
bool any_enabled = false;
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kProtocolVersions); i++) {
// Only look at the versions already enabled.
if (min_version > kProtocolVersions[i].version) {
continue;
}
if (max_version < kProtocolVersions[i].version) {
break;
}
if (!(options & kProtocolVersions[i].flag)) {
// The minimum version is the first enabled version.
if (!any_enabled) {
any_enabled = true;
min_version = kProtocolVersions[i].version;
}
continue;
}
// If there is a disabled version after the first enabled one, all versions
// after it are implicitly disabled.
if (any_enabled) {
max_version = kProtocolVersions[i-1].version;
break;
}
}
if (!any_enabled) {
OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SUPPORTED_VERSIONS_ENABLED);
return false;
}
*out_min_version = min_version;
*out_max_version = max_version;
return true;
}
static uint16_t ssl_version(const SSL *ssl) {
// In early data, we report the predicted version.
if (SSL_in_early_data(ssl) && !ssl->server) {
return ssl->s3->hs->early_session->ssl_version;
}
return ssl->version;
}
uint16_t ssl_protocol_version(const SSL *ssl) {
assert(ssl->s3->have_version);
uint16_t version;
if (!ssl_protocol_version_from_wire(&version, ssl->version)) {
// |ssl->version| will always be set to a valid version.
assert(0);
return 0;
}
return version;
}
bool ssl_supports_version(SSL_HANDSHAKE *hs, uint16_t version) {
SSL *const ssl = hs->ssl;
uint16_t protocol_version;
if (!ssl_method_supports_version(ssl->method, version) ||
!ssl_protocol_version_from_wire(&protocol_version, version) ||
hs->min_version > protocol_version ||
protocol_version > hs->max_version) {
return false;
}
// If the TLS 1.3 variant is set to |tls13_default|, all variants are enabled,
// otherwise only the matching version is enabled.
if (protocol_version == TLS1_3_VERSION) {
switch (ssl->tls13_variant) {
case tls13_draft23:
return version == TLS1_3_DRAFT23_VERSION;
case tls13_draft28:
return version == TLS1_3_DRAFT28_VERSION;
case tls13_rfc:
return version == TLS1_3_VERSION;
case tls13_all:
return true;
}
}
return true;
}
bool ssl_add_supported_versions(SSL_HANDSHAKE *hs, CBB *cbb) {
const uint16_t *versions;
size_t num_versions;
get_method_versions(hs->ssl->method, &versions, &num_versions);
for (size_t i = 0; i < num_versions; i++) {
if (ssl_supports_version(hs, versions[i]) &&
!CBB_add_u16(cbb, versions[i])) {
return false;
}
}
return true;
}
bool ssl_negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert,
uint16_t *out_version, const CBS *peer_versions) {
const uint16_t *versions;
size_t num_versions;
get_method_versions(hs->ssl->method, &versions, &num_versions);
for (size_t i = 0; i < num_versions; i++) {
if (!ssl_supports_version(hs, versions[i])) {
continue;
}
CBS copy = *peer_versions;
while (CBS_len(©) != 0) {
uint16_t version;
if (!CBS_get_u16(©, &version)) {
OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR);
*out_alert = SSL_AD_DECODE_ERROR;
return false;
}
if (version == versions[i]) {
*out_version = version;
return true;
}
}
}
OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL);
*out_alert = SSL_AD_PROTOCOL_VERSION;
return false;
}
bool ssl_is_draft28(uint16_t version) {
return version == TLS1_3_DRAFT28_VERSION || version == TLS1_3_VERSION;
}
BSSL_NAMESPACE_END
using namespace bssl;
int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version) {
return set_min_version(ctx->method, &ctx->conf_min_version, version);
}
int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version) {
return set_max_version(ctx->method, &ctx->conf_max_version, version);
}
int SSL_set_min_proto_version(SSL *ssl, uint16_t version) {
if (!ssl->config) {
return 0;
}
return set_min_version(ssl->method, &ssl->config->conf_min_version, version);
}
int SSL_set_max_proto_version(SSL *ssl, uint16_t version) {
if (!ssl->config) {
return 0;
}
return set_max_version(ssl->method, &ssl->config->conf_max_version, version);
}
int SSL_version(const SSL *ssl) {
return wire_version_to_api(ssl_version(ssl));
}
const char *SSL_get_version(const SSL *ssl) {
return ssl_version_to_string(ssl_version(ssl));
}
const char *SSL_SESSION_get_version(const SSL_SESSION *session) {
return ssl_version_to_string(session->ssl_version);
}
uint16_t SSL_SESSION_get_protocol_version(const SSL_SESSION *session) {
return wire_version_to_api(session->ssl_version);
}
int SSL_SESSION_set_protocol_version(SSL_SESSION *session, uint16_t version) {
// This picks a representative TLS 1.3 version, but this API should only be
// used on unit test sessions anyway.
return api_version_to_wire(&session->ssl_version, version);
}
| 4,782 |
777 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.browser;
/**
* Used to register listeners that can be notified of changes to the position of a view.
*/
public interface PositionObserver {
public interface Listener {
/**
* Called during predraw if the position of the underlying view has changed.
*/
void onPositionChanged(int positionX, int positionY);
}
/**
* @return The current x position of the observed view.
*/
int getPositionX();
/**
* @return The current y position of the observed view.
*/
int getPositionY();
/**
* Register a listener to be called when the position of the underlying view changes.
*/
void addListener(Listener listener);
/**
* Remove a previously installed listener.
*/
void removeListener(Listener listener);
/**
* Clears registerned listener(s).
*/
void clearListener();
}
| 350 |
324 | /*
* Copyright 2016-2020 chronicle.software
*
* https://chronicle.software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.openhft.chronicle.bytes;
import java.util.List;
public interface BytesRingBufferStats {
/**
* each time the ring is read, this logs the number of bytes in the write buffer, calling this
* method resets these statistics,
*
* @return Long.MAX_VALUE if no read calls were made since the last time this method was called.
*/
long minNumberOfWriteBytesRemaining();
/**
* @return the total capacity in bytes
*/
long capacity();
long getAndClearWriteCount();
long getAndClearMissedWriteCount();
long getAndClearContentionCount();
List<RingBufferReaderStats> readers();
}
| 377 |
372 | /****
**** NDR types for the S/390x Architecture - 64bit, Big Endian Mode
**** This is a modified version of the original header.
****/
#ifndef _NDRTYPES_H
#define _NDRTYPES_H
/*
* Use C99 stdint types if possible, as they have known sizes.
* The DCE/RPC porting guide defines the expected size of each
* of these types. Depending on compiler flags, int, long and
* pointer types may be 32 or 64 bit. (See gcc -m31 -m64 options.)
*
* n.b. for gcc this requires passing --std=gnu99/c99 (or greater)
* or else __STDC_VERSION__ is not defined.
*/
#if defined(__STDC__)
# if defined(__STDC_VERSION__)
# if (__STDC_VERSION__ >= 19901L)
# define C_STD_99
# endif
# endif
#endif
#if defined(C_STD_99)
#include <stdint.h>
#endif
typedef unsigned char ndr_boolean;
#define ndr_false false
#define ndr_true true
typedef unsigned char ndr_byte;
typedef unsigned char ndr_char;
typedef signed char ndr_small_int;
typedef unsigned char ndr_usmall_int;
#if defined(C_STD_99)
typedef int16_t ndr_short_int;
typedef uint16_t ndr_ushort_int;
typedef int32_t ndr_long_int;
typedef uint32_t ndr_ulong_int;
typedef int64_t ndr_hyper_int;
typedef uint64_t ndr_uhyper_int;
#else
typedef short int ndr_short_int;
typedef unsigned short int ndr_ushort_int;
typedef int ndr_long_int;
typedef unsigned int ndr_ulong_int;
typedef signed long int ndr_hyper_int;
typedef unsigned long int ndr_uhyper_int;
#endif
typedef float ndr_short_float;
typedef double ndr_long_float;
#endif /* NDRTYPES_H */
| 689 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-7h75-f67f-9cpp",
"modified": "2022-05-13T01:45:50Z",
"published": "2022-05-13T01:45:50Z",
"aliases": [
"CVE-2017-3650"
],
"details": "Vulnerability in the MySQL Server component of Oracle MySQL (subcomponent: C API). Supported versions that are affected are 5.7.18 and earlier. Difficult to exploit vulnerability allows unauthenticated attacker with network access via multiple protocols to compromise MySQL Server. Successful attacks of this vulnerability can result in unauthorized read access to a subset of MySQL Server accessible data. CVSS 3.0 Base Score 3.7 (Confidentiality impacts). CVSS Vector: (CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:N/A:N).",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:N/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-3650"
},
{
"type": "WEB",
"url": "https://access.redhat.com/errata/RHSA-2017:2886"
},
{
"type": "WEB",
"url": "http://www.oracle.com/technetwork/security-advisory/cpujul2017-3236622.html"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/99808"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id/1038928"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 662 |
1,705 | {
"active": true,
"synopsis": "Amazon Elastic Transcoder is media transcoding in the cloud. It is designed to be a highly scalable, easy to use and a cost effective way for developers and businesses to convert (or 'transcode') media files from their source format into versions that will playback on devices like smartphones, tablets and PCs."
} | 84 |
2,143 | <gh_stars>1000+
package com.tngtech.archunit.core.importer.testexamples;
public enum SomeEnum {
SOME_VALUE, OTHER_VALUE;
void bar() {
}
}
| 63 |
2,504 | //
// MasterDetailPage.xaml.h
// Declaration of the MasterDetailPage class.
//
#pragma once
#include "MasterDetailPage.g.h"
namespace MasterDetailApp
{
/// <summary>
/// An empty page that can be used on its own or navigated to within a Frame.
/// </summary>
public ref class MasterDetailPage sealed
{
public:
MasterDetailPage();
protected:
virtual void OnNavigatedTo(Windows::UI::Xaml::Navigation::NavigationEventArgs^ e) override;
private:
::MasterDetailApp::ViewModels::ItemViewModel^ m_lastSelectedItem;
void UpdateForVisualState(Windows::UI::Xaml::VisualState^ newState, Windows::UI::Xaml::VisualState^ oldState = nullptr);
void EnableContentTransitions();
void DisableContentTransitions();
void LayoutRoot_Loaded(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e);
void AdaptiveStates_CurrentStateChanged(Platform::Object^ sender, Windows::UI::Xaml::VisualStateChangedEventArgs^ e);
void MasterListView_ItemClick(Platform::Object^ sender, Windows::UI::Xaml::Controls::ItemClickEventArgs^ e);
};
}
| 438 |
348 | {"nom":"Quincampoix","circ":"10ème circonscription","dpt":"Seine-Maritime","inscrits":2614,"abs":1287,"votants":1327,"blancs":87,"nuls":15,"exp":1225,"res":[{"nuance":"REM","nom":"<NAME>","voix":921},{"nuance":"FN","nom":"Mme <NAME>","voix":304}]} | 100 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _STOC_RDBTDP_TDENUMERATION_HXX
#define _STOC_RDBTDP_TDENUMERATION_HXX
#include <list>
#include <osl/mutex.hxx>
#include <rtl/ref.hxx>
#include <registry/refltype.hxx>
#include <com/sun/star/container/XHierarchicalNameAccess.hpp>
#include <com/sun/star/reflection/InvalidTypeNameException.hpp>
#include <com/sun/star/reflection/NoSuchTypeNameException.hpp>
#include <com/sun/star/reflection/TypeDescriptionSearchDepth.hpp>
#include <com/sun/star/reflection/XTypeDescriptionEnumeration.hpp>
#include <com/sun/star/uno/Sequence.hxx>
#include <com/sun/star/uno/TypeClass.hpp>
#include <cppuhelper/implbase1.hxx>
#include "base.hxx"
namespace stoc_rdbtdp
{
typedef ::std::list< ::com::sun::star::uno::Reference<
::com::sun::star::reflection::XTypeDescription > > TypeDescriptionList;
class TypeDescriptionEnumerationImpl
: public cppu::WeakImplHelper1<
com::sun::star::reflection::XTypeDescriptionEnumeration >
{
public:
static rtl::Reference< TypeDescriptionEnumerationImpl > createInstance(
const ::com::sun::star::uno::Reference<
::com::sun::star::container::XHierarchicalNameAccess > & xTDMgr,
const rtl::OUString & rModuleName,
const ::com::sun::star::uno::Sequence<
::com::sun::star::uno::TypeClass > & rTypes,
::com::sun::star::reflection::TypeDescriptionSearchDepth eDepth,
const RegistryKeyList & rBaseKeys )
throw ( ::com::sun::star::reflection::NoSuchTypeNameException,
::com::sun::star::reflection::InvalidTypeNameException,
::com::sun::star::uno::RuntimeException );
virtual ~TypeDescriptionEnumerationImpl();
// XEnumeration (base of XTypeDescriptionEnumeration)
virtual sal_Bool SAL_CALL hasMoreElements()
throw ( ::com::sun::star::uno::RuntimeException );
virtual ::com::sun::star::uno::Any SAL_CALL nextElement()
throw ( ::com::sun::star::container::NoSuchElementException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException );
// XTypeDescriptionEnumeration
virtual ::com::sun::star::uno::Reference<
::com::sun::star::reflection::XTypeDescription > SAL_CALL
nextTypeDescription()
throw ( ::com::sun::star::container::NoSuchElementException,
::com::sun::star::uno::RuntimeException );
private:
// Note: keys must be open (XRegistryKey->openKey(...)).
TypeDescriptionEnumerationImpl(
const ::com::sun::star::uno::Reference<
::com::sun::star::container::XHierarchicalNameAccess > & xTDMgr,
const RegistryKeyList & rModuleKeys,
const ::com::sun::star::uno::Sequence<
::com::sun::star::uno::TypeClass > & rTypes,
::com::sun::star::reflection::TypeDescriptionSearchDepth eDepth );
static bool match( ::RTTypeClass eType1,
::com::sun::star::uno::TypeClass eType2 );
bool queryMore();
::com::sun::star::uno::Reference<
::com::sun::star::reflection::XTypeDescription > queryNext();
// members
osl::Mutex m_aMutex;
RegistryKeyList m_aModuleKeys;
RegistryKeyList m_aCurrentModuleSubKeys;
TypeDescriptionList m_aTypeDescs;
::com::sun::star::uno::Sequence<
::com::sun::star::uno::TypeClass > m_aTypes;
::com::sun::star::reflection::TypeDescriptionSearchDepth m_eDepth;
::com::sun::star::uno::Reference<
::com::sun::star::container::XHierarchicalNameAccess > m_xTDMgr;
};
} // namespace stoc_rdbtdp
#endif /* _STOC_RDBTDP_TDENUMERATION_HXX */
| 1,665 |
1,383 | <reponame>Benatti1991/chrono<gh_stars>1000+
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: <NAME>
// =============================================================================
//
// Test of operations with 3d vectors
//
// =============================================================================
#include "gtest/gtest.h"
#include "chrono/core/ChVector.h"
using namespace chrono;
const double ABS_ERR_D = 1e-15;
const float ABS_ERR_F = 1e-6f;
TEST(ChVectorTest, normalize) {
ChVector<> ad(1.1, -2.2, 3.3);
ASSERT_NEAR(ad.GetNormalized().Length(), 1.0, ABS_ERR_D);
ASSERT_TRUE(ad.Normalize());
ASSERT_NEAR(ad.Length(), 1.0, ABS_ERR_D);
ChVector<> bd(0.0);
ASSERT_FALSE(bd.Normalize());
ChVector<float> af(1.1f, -2.2f, 3.3f);
ASSERT_NEAR(af.GetNormalized().Length(), 1.0f, ABS_ERR_F);
ASSERT_TRUE(af.Normalize());
ASSERT_NEAR(af.Length(), 1.0f, ABS_ERR_F);
ChVector<float> bf(0.0f);
ASSERT_FALSE(bf.Normalize());
}
TEST(ChVectorTest, dot) {
ChVector<> ad(1.1, -2.2, 3.3);
ASSERT_NEAR(ad.Dot(ad), ad.Length2(), ABS_ERR_D);
ASSERT_NEAR(ad.Dot(-ad), -ad.Length2(), ABS_ERR_D);
ASSERT_NEAR(ad.Dot(ad.GetOrthogonalVector()), 0.0, ABS_ERR_D);
ChVector<float> af(1.1f, -2.2f, 3.3f);
ASSERT_NEAR(af.Dot(af), af.Length2(), ABS_ERR_F);
ASSERT_NEAR(af.Dot(-af), -af.Length2(), ABS_ERR_F);
ASSERT_NEAR(af.Dot(af.GetOrthogonalVector()), 0.0f, ABS_ERR_F);
}
TEST(ChVectorTest, cross) {
ChVector<> ad(1.1, -2.2, 3.3);
ChVector<> bd(-0.5, 0.6, 0.7);
auto cd = ad.Cross(bd);
ASSERT_NEAR(cd.Dot(ad), 0.0, ABS_ERR_D);
ASSERT_NEAR(cd.Dot(bd), 0.0, ABS_ERR_D);
auto zd1 = ad.Cross(ad);
ASSERT_NEAR(zd1.x(), 0.0, ABS_ERR_D);
ASSERT_NEAR(zd1.y(), 0.0, ABS_ERR_D);
ASSERT_NEAR(zd1.z(), 0.0, ABS_ERR_D);
auto zd2 = ad.Cross(-ad);
ASSERT_NEAR(zd2.x(), 0.0, ABS_ERR_D);
ASSERT_NEAR(zd2.y(), 0.0, ABS_ERR_D);
ASSERT_NEAR(zd2.z(), 0.0, ABS_ERR_D);
auto pd = ad.GetOrthogonalVector();
ASSERT_NEAR(ad.Cross(pd).Length(), ad.Length() * pd.Length(), ABS_ERR_D);
ChVector<float> af(1.1f, -2.2f, 3.3f);
ChVector<float> bf(-0.5f, 0.6f, 0.7f);
auto cf = af.Cross(bf);
ASSERT_NEAR(cf.Dot(af), 0.0f, ABS_ERR_F);
ASSERT_NEAR(cf.Dot(bf), 0.0f, ABS_ERR_F);
auto zf1 = af.Cross(af);
ASSERT_NEAR(zf1.x(), 0.0f, ABS_ERR_F);
ASSERT_NEAR(zf1.y(), 0.0f, ABS_ERR_F);
ASSERT_NEAR(zf1.z(), 0.0f, ABS_ERR_F);
auto zf2 = af.Cross(-af);
ASSERT_NEAR(zf2.x(), 0.0f, ABS_ERR_F);
ASSERT_NEAR(zf2.y(), 0.0f, ABS_ERR_F);
ASSERT_NEAR(zf2.z(), 0.0f, ABS_ERR_F);
auto pf = af.GetOrthogonalVector();
ASSERT_NEAR(af.Cross(pf).Length(), af.Length() * pf.Length(), ABS_ERR_F);
}
| 1,484 |
694 | <reponame>phanisai4u/PdfBox-Android<filename>library/src/main/java/com/tom_roush/pdfbox/pdmodel/graphics/state/PDTextState.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tom_roush.pdfbox.pdmodel.graphics.state;
import com.tom_roush.pdfbox.pdmodel.font.PDFont;
/**
* This class will hold the current state of the text parameters when executing a
* content stream.
*
* @author <NAME>
*/
public class PDTextState implements Cloneable
{
private float characterSpacing = 0;
private float wordSpacing = 0;
private float horizontalScaling = 100;
private float leading = 0;
private PDFont font;
private float fontSize;
private RenderingMode renderingMode = RenderingMode.FILL;
private float rise = 0;
private boolean knockout = true;
/**
* Get the value of the characterSpacing.
*
* @return The current characterSpacing.
*/
public float getCharacterSpacing()
{
return characterSpacing;
}
/**
* Set the value of the characterSpacing.
*
* @param value The characterSpacing.
*/
public void setCharacterSpacing(float value)
{
characterSpacing = value;
}
/**
* Get the value of the wordSpacing.
*
* @return The wordSpacing.
*/
public float getWordSpacing()
{
return wordSpacing;
}
/**
* Set the value of the wordSpacing.
*
* @param value The wordSpacing.
*/
public void setWordSpacing(float value)
{
wordSpacing = value;
}
/**
* Get the value of the horizontalScaling. The default is 100. This value
* is the percentage value 0-100 and not 0-1. So for mathematical operations
* you will probably need to divide by 100 first.
*
* @return The horizontalScaling.
*/
public float getHorizontalScaling()
{
return horizontalScaling;
}
/**
* Set the value of the horizontalScaling.
*
* @param value The horizontalScaling.
*/
public void setHorizontalScaling(float value)
{
horizontalScaling = value;
}
/**
* Get the value of the leading.
*
* @return The leading.
*/
public float getLeading()
{
return leading;
}
/**
* Set the value of the leading.
*
* @param value The leading.
*/
public void setLeading(float value)
{
leading = value;
}
/**
* Get the value of the font.
*
* @return The font.
*/
public PDFont getFont()
{
return font;
}
/**
* Set the value of the font.
*
* @param value The font.
*/
public void setFont(PDFont value)
{
font = value;
}
/**
* Get the value of the fontSize.
*
* @return The fontSize.
*/
public float getFontSize()
{
return fontSize;
}
/**
* Set the value of the fontSize.
*
* @param value The fontSize.
*/
public void setFontSize(float value)
{
fontSize = value;
}
/**
* Get the value of the renderingMode.
*
* @return The renderingMode.
*/
public RenderingMode getRenderingMode()
{
return renderingMode;
}
/**
* Set the value of the renderingMode.
*
* @param renderingMode The renderingMode.
*/
public void setRenderingMode(RenderingMode renderingMode)
{
this.renderingMode = renderingMode;
}
/**
* Get the value of the rise.
*
* @return The rise.
*/
public float getRise()
{
return rise;
}
/**
* Set the value of the rise.
*
* @param value The rise.
*/
public void setRise(float value)
{
rise = value;
}
/**
* Get the value of the knockout.
*
* @return The knockout.
*/
public boolean getKnockoutFlag()
{
return knockout;
}
/**
* Set the value of the knockout.
*
* @param value The knockout.
*/
public void setKnockoutFlag(boolean value)
{
knockout = value;
}
@Override
public PDTextState clone()
{
try
{
return (PDTextState)super.clone();
}
catch (CloneNotSupportedException e)
{
// should not happen
throw new RuntimeException(e);
}
}
}
| 2,045 |
3,913 | <reponame>cksspk/ruoyi-vue-pro<filename>yudao-admin-server/src/main/java/cn/iocoder/yudao/adminserver/modules/infra/controller/job/vo/job/InfJobPageReqVO.java
package cn.iocoder.yudao.adminserver.modules.infra.controller.job.vo.job;
import cn.iocoder.yudao.framework.common.pojo.PageParam;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.ToString;
@ApiModel("定时任务分页 Request VO")
@Data
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class InfJobPageReqVO extends PageParam {
@ApiModelProperty(value = "任务名称", example = "测试任务", notes = "模糊匹配")
private String name;
@ApiModelProperty(value = "任务状态", example = "1", notes = "参见 InfJobStatusEnum 枚举")
private Integer status;
@ApiModelProperty(value = "处理器的名字", example = "sysUserSessionTimeoutJob", notes = "模糊匹配")
private String handlerName;
}
| 419 |
1,408 | import logging
from datetime import datetime
from response.core.models import Incident
from response.slack.models.notification import Notification
logger = logging.getLogger(__name__)
NOTIFICATION_HANDLERS = []
class NotificationHandler(object):
def __init__(self, key, callback, interval_mins, max_notifications):
self.key = key
self.callback = callback
self.interval_mins = interval_mins
self.max_notifications = max_notifications
def __str__(self):
return self.key
def single_notification(initial_delay_mins=0, func=None):
"""
Register a handler that'll be called once in each open incident
"""
def _wrapper(fn):
NOTIFICATION_HANDLERS.append(
NotificationHandler(
key=fn.__name__,
callback=fn,
interval_mins=initial_delay_mins,
max_notifications=0,
)
)
return fn
if func:
return _wrapper(func)
return _wrapper
def recurring_notification(interval_mins, max_notifications=1):
"""
Register a handler that'll be called periodically for all open incidents.
"""
def _wrapper(fn):
NOTIFICATION_HANDLERS.append(
NotificationHandler(
key=fn.__name__,
callback=fn,
interval_mins=interval_mins,
max_notifications=max_notifications - 1,
)
)
return fn
return _wrapper
def handle_notifications():
# Only notify open incidents with a comms channel
open_incidents = Incident.objects.filter(
end_time__isnull=True, commschannel__incident__isnull=False
)
for incident in open_incidents:
for handler in NOTIFICATION_HANDLERS:
try:
# get the last sent notification for this incident/handler pair
notification = Notification.objects.get(
incident=incident, key=handler.key
)
# if we can find a previous notification for this incident/handler pair and
# it's been set to not repeat, exit here
if (
notification.repeat_count >= handler.max_notifications
or notification.completed
):
continue
# it's not exhausted its max_notifications, so wait 'interval_mins' before sending again
mins_since_last_notify = int(
(datetime.now() - notification.time).total_seconds() / 60
)
if mins_since_last_notify >= handler.interval_mins:
try:
handler.callback(incident)
except Exception as e:
logger.error(
f"Error calling notification handler {handler}: {e}"
)
notification.time = datetime.now()
notification.repeat_count = notification.repeat_count + 1
notification.save()
except Notification.DoesNotExist:
# we've never sent a notification to this incident/handler pair,
# so wait until 'interval_mins' mins have elapsed from start
mins_since_started = int(
(datetime.now() - incident.start_time).total_seconds() / 60
)
if mins_since_started >= handler.interval_mins:
try:
handler.callback(incident)
except Exception as e:
logger.error(
f"Error calling notification handler {handler}: {e}"
)
notification = Notification(
incident=incident,
key=handler.key,
time=datetime.now(),
repeat_count=0,
)
notification.save()
| 1,994 |
671 | #pragma once
#include <Windows.h>
#include <winternl.h>
#include <Windows.h>
#include <sddl.h>
#include <TlHelp32.h>
#include <stdio.h>
#include <string>
#include <tchar.h>
#include <AclAPI.h>
//#include <Ntsecapi.h>
#include "util.h"
#include "s4u.h"
#include "MSFRottenPotato.h"
typedef NTSTATUS(NTAPI *_NtQuerySystemInformation)(
SYSTEM_INFORMATION_CLASS SystemInformationClass,
PVOID SystemInformation,
ULONG SystemInformationLength,
PULONG ReturnLength
);
typedef struct _KERNELINFO {
PUCHAR pKernelBase;
PUCHAR pFunctionAddress;
} KERNELINFO, *PKERNELINFO;
typedef struct _SYSTEM_MODULE_INFORMATION_ENTRY {
HANDLE Section;
PVOID MappedBase;
PVOID ImageBase;
ULONG ImageSize;
ULONG Flags;
USHORT LoadOrderIndex;
USHORT InitOrderIndex;
USHORT LoadCount;
USHORT OffsetToFileName;
UCHAR FullPathName[256];
} SYSTEM_MODULE_INFORMATION_ENTRY, *PSYSTEM_MODULE_INFORMATION_ENTRY;
typedef struct _SYSTEM_MODULE_INFORMATION {
ULONG NumberOfModules;
SYSTEM_MODULE_INFORMATION_ENTRY Module[1];
} SYSTEM_MODULE_INFORMATION, *PSYSTEM_MODULE_INFORMATION;
typedef struct _SYSTEM_HANDLE
{
ULONG ProcessId;
UCHAR ObjectTypeNumber;
UCHAR Flags;
USHORT Handle;
PVOID Object;
ACCESS_MASK GrantedAccess;
} SYSTEM_HANDLE, *PSYSTEM_HANDLE;
typedef struct _SYSTEM_HANDLE_INFORMATION // Size=20
{
ULONG NumberOfHandles; // Size=4 Offset=0
SYSTEM_HANDLE Handles[1]; // Size=16 Offset=4
} SYSTEM_HANDLE_INFORMATION, *PSYSTEM_HANDLE_INFORMATION;
typedef struct _SYSTEM_HANDLE_TABLE_ENTRY_INFO {
USHORT UniqueProcessId;
USHORT CreatorBackTraceIndex;
UCHAR ObjectTypeIndex;
UCHAR HandleAttributes;
USHORT HandleValue;
PVOID Object;
ULONG GrantedAccess;
} SYSTEM_HANDLE_TABLE_ENTRY_INFO, *PSYSTEM_HANDLE_TABLE_ENTRY_INFO;
typedef struct _SID_BUILTIN
{
UCHAR Revision;
UCHAR SubAuthorityCount;
SID_IDENTIFIER_AUTHORITY IdentifierAuthority;
ULONG SubAuthority[2];
} SID_BUILTIN, *PSID_BUILTIN;
static wchar_t *SYSTEM_SID = L"S-1-5-18";
#ifndef STATUS_SUCCESS
#define STATUS_SUCCESS ((NTSTATUS)0x00000000L)
#endif
typedef struct _SID_INTEGRITY
{
UCHAR Revision;
UCHAR SubAuthorityCount;
SID_IDENTIFIER_AUTHORITY IdentifierAuthority;
ULONG SubAuthority[1];
} SID_INTEGRITY, *PSID_INTEGRITY;
typedef NTSYSAPI
NTSTATUS
(NTAPI *_ZwCreateToken)(
OUT PHANDLE TokenHandle,
IN ACCESS_MASK DesiredAccess,
IN POBJECT_ATTRIBUTES ObjectAttributes,
IN TOKEN_TYPE Type,
IN PLUID AuthenticationId,
IN PLARGE_INTEGER ExpirationTime,
IN PTOKEN_USER User,
IN PTOKEN_GROUPS Groups,
IN PTOKEN_PRIVILEGES Privileges,
IN PTOKEN_OWNER Owner,
IN PTOKEN_PRIMARY_GROUP PrimaryGroup,
IN PTOKEN_DEFAULT_DACL DefaultDacl,
IN PTOKEN_SOURCE Source
);
#define NtCurrentProcess ((HANDLE)(LONG_PTR)-1)
typedef NTSYSAPI
NTSTATUS
(NTAPI *_NtSetInformationProcess)(
HANDLE ProcessHandle,
PROCESSINFOCLASS ProcessInformationClass,
PVOID ProcessInformation,
ULONG ProcessInformationLength
);
typedef NTSYSAPI
NTSTATUS
(NTAPI *_NtQueryInformationProcess)(
_In_ HANDLE ProcessHandle,
_In_ PROCESSINFOCLASS ProcessInformationClass,
_Out_ PVOID ProcessInformation,
_In_ ULONG ProcessInformationLength,
_Out_opt_ PULONG ReturnLength
);
// Spawn a shell using the SeImpersonatePrivilege, requires an elevated primary token
void se_impersonate_priv(HANDLE);
// Spawn a shell by using the SeAssignPrimaryTokenPrivilege, requires an elevated primary token
// TODO figure out how to modify/set the session ID so it could be interactive (currently on session0)
void se_assign_primary_priv(HANDLE);
// Generate a new SYSTEM level primary token and return it
// Demonstrates SeCreateTokenPrivilege
HANDLE se_create_token_privilege(HANDLE, BOOL);
void set_backup_priv_reg();
void se_restore_priv_reg();
typedef struct _PROCESS_ACCESS_TOKEN
{
HANDLE Token;
HANDLE Thread;
} PROCESS_ACCESS_TOKEN, *PPROCESS_ACCESS_TOKEN; | 1,531 |
5,460 | import botocore.client
import boto3
import json
import pytest
import time
import sure # noqa # pylint: disable=unused-import
import uuid
from moto import (
mock_dynamodb2,
mock_lambda,
mock_logs,
mock_sns,
mock_sqs,
)
from uuid import uuid4
from .utilities import (
get_role_name,
get_test_zip_file3,
wait_for_log_msg,
get_test_zip_file_error,
)
_lambda_region = "us-west-2"
boto3.setup_default_session(region_name=_lambda_region)
@mock_logs
@mock_lambda
@mock_sqs
def test_create_event_source_mapping():
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["FunctionArn"] == func["FunctionArn"]
assert response["State"] == "Enabled"
@pytest.mark.network
@pytest.mark.parametrize("key", ["FunctionName", "FunctionArn"])
@mock_logs
@mock_lambda
@mock_sqs
def test_invoke_function_from_sqs(key):
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
name_or_arn = func[key]
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=name_or_arn
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["State"] == "Enabled"
sqs_client = boto3.client("sqs", region_name="us-east-1")
sqs_client.send_message(QueueUrl=queue.url, MessageBody="test")
expected_msg = "get_test_zip_file3 success"
log_group = f"/aws/lambda/{function_name}"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg
+ " was not found after sending an SQS message. All logs: "
+ str(all_logs)
)
@pytest.mark.network
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb_put():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = str(uuid4())[0:6] + "_table"
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
StreamSpecification={
"StreamEnabled": True,
"StreamViewType": "NEW_AND_OLD_IMAGES",
},
)
conn = boto3.client("lambda", region_name="us-east-1")
function_name = str(uuid4())[0:6]
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function executed after a DynamoDB table is updated",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
FunctionName=func["FunctionArn"],
)
assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"]
assert response["State"] == "Enabled"
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
expected_msg = "get_test_zip_file3 success"
log_group = f"/aws/lambda/{function_name}"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg + " was not found after a DDB insert. All logs: " + str(all_logs)
)
@pytest.mark.network
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb_update():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = str(uuid4())[0:6] + "_table"
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
StreamSpecification={
"StreamEnabled": True,
"StreamViewType": "NEW_AND_OLD_IMAGES",
},
)
conn = boto3.client("lambda", region_name="us-east-1")
function_name = str(uuid4())[0:6]
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function executed after a DynamoDB table is updated",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.create_event_source_mapping(
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
FunctionName=func["FunctionArn"],
)
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
log_group = f"/aws/lambda/{function_name}"
expected_msg = "get_test_zip_file3 success"
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted"
dynamodb.update_item(
TableName=table_name,
Key={"id": {"S": "item 1"}},
UpdateExpression="set #attr = :val",
ExpressionAttributeNames={"#attr": "new_attr"},
ExpressionAttributeValues={":val": {"S": "new_val"}},
)
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
assert msg_showed_up, (
expected_msg + " was not found after updating DDB. All logs: " + str(all_logs)
)
assert "Nr_of_records(1)" in all_logs, "Only one item should be updated"
assert (
"Nr_of_records(2)" not in all_logs
), "The inserted item should not show up again"
@pytest.mark.network
@mock_logs
@mock_lambda
@mock_sqs
def test_invoke_function_from_sqs_exception():
function_name = str(uuid4())[0:6]
logs_conn = boto3.client("logs", region_name="us-east-1")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file_error()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["State"] == "Enabled"
entries = []
for i in range(3):
body = {"uuid": str(uuid.uuid4()), "test": "test_{}".format(i)}
entry = {"Id": str(i), "MessageBody": json.dumps(body)}
entries.append(entry)
queue.send_messages(Entries=entries)
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(
logGroupName=f"/aws/lambda/{function_name}"
)
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) >= 1
result = logs_conn.get_log_events(
logGroupName=f"/aws/lambda/{function_name}",
logStreamName=log_streams[0]["logStreamName"],
)
for event in result.get("events"):
if "I failed!" in event["message"]:
messages = queue.receive_messages(MaxNumberOfMessages=10)
# Verify messages are still visible and unprocessed
assert len(messages) == 3
return
time.sleep(1)
assert False, "Test Failed"
@pytest.mark.network
@mock_logs
@mock_sns
@mock_lambda
def test_invoke_function_from_sns():
logs_conn = boto3.client("logs", region_name=_lambda_region)
sns_conn = boto3.client("sns", region_name=_lambda_region)
sns_conn.create_topic(Name="some-topic")
topics_json = sns_conn.list_topics()
topics = topics_json["Topics"]
topic_arn = topics[0]["TopicArn"]
conn = boto3.client("lambda", _lambda_region)
function_name = str(uuid4())[0:6]
result = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
sns_conn.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=result["FunctionArn"]
)
result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({}))
start = time.time()
events = []
while (time.time() - start) < 10:
result = logs_conn.describe_log_streams(
logGroupName=f"/aws/lambda/{function_name}"
)
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) == 1
result = logs_conn.get_log_events(
logGroupName=f"/aws/lambda/{function_name}",
logStreamName=log_streams[0]["logStreamName"],
)
events = result.get("events")
for event in events:
if event["message"] == "get_test_zip_file3 success":
return
time.sleep(1)
assert False, "Expected message not found in logs:" + str(events)
@mock_logs
@mock_lambda
@mock_sqs
def test_list_event_source_mappings():
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
mappings = conn.list_event_source_mappings(EventSourceArn="123")
mappings["EventSourceMappings"].should.have.length_of(0)
mappings = conn.list_event_source_mappings(
EventSourceArn=queue.attributes["QueueArn"]
)
assert len(mappings["EventSourceMappings"]) >= 1
assert mappings["EventSourceMappings"][0]["UUID"] == response["UUID"]
assert mappings["EventSourceMappings"][0]["FunctionArn"] == func["FunctionArn"]
@mock_lambda
@mock_sqs
def test_get_event_source_mapping():
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
mapping = conn.get_event_source_mapping(UUID=response["UUID"])
assert mapping["UUID"] == response["UUID"]
assert mapping["FunctionArn"] == func["FunctionArn"]
conn.get_event_source_mapping.when.called_with(UUID="1").should.throw(
botocore.client.ClientError
)
@mock_lambda
@mock_sqs
def test_update_event_source_mapping():
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func1 = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
func2 = conn.create_function(
FunctionName="testFunction2",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"]
)
assert response["FunctionArn"] == func1["FunctionArn"]
assert response["BatchSize"] == 10
assert response["State"] == "Enabled"
mapping = conn.update_event_source_mapping(
UUID=response["UUID"], Enabled=False, BatchSize=2, FunctionName="testFunction2"
)
assert mapping["UUID"] == response["UUID"]
assert mapping["FunctionArn"] == func2["FunctionArn"]
assert mapping["State"] == "Disabled"
assert mapping["BatchSize"] == 2
@mock_lambda
@mock_sqs
def test_delete_event_source_mapping():
function_name = str(uuid4())[0:6]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName=f"{function_name}_queue")
conn = boto3.client("lambda", region_name="us-east-1")
func1 = conn.create_function(
FunctionName=function_name,
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"]
)
assert response["FunctionArn"] == func1["FunctionArn"]
assert response["BatchSize"] == 10
assert response["State"] == "Enabled"
response = conn.delete_event_source_mapping(UUID=response["UUID"])
assert response["State"] == "Deleting"
conn.get_event_source_mapping.when.called_with(UUID=response["UUID"]).should.throw(
botocore.client.ClientError
)
| 6,708 |
5,169 | {
"name": "faceWindow",
"version": "1.0.0",
"summary": "SZ weking company use to group all items",
"description": "he is a good man,sz weking all people use this library,it is very easy to use",
"homepage": "https://github.com/1597538426/faceWindowDemo/tree/1.0.0",
"license": "MIT",
"authors": {
"1597538426": "<EMAIL>"
},
"platforms": {
"ios": null
},
"source": {
"git": "https://github.com/1597538426/faceWindowDemo.git",
"tag": "1.0.0"
},
"source_files": [
"faceWindow",
"faceWindow/**/*.{h,m}"
],
"exclude_files": "faceWindow/Exclude"
}
| 249 |
332 | <filename>sdk/src/main/java/com/vk/api/sdk/objects/store/StickersKeyword.java
// Autogenerated from vk-api-schema. Please don't edit it manually.
package com.vk.api.sdk.objects.store;
import com.google.gson.Gson;
import com.google.gson.annotations.SerializedName;
import com.vk.api.sdk.objects.Validable;
import com.vk.api.sdk.objects.annotations.Required;
import java.util.List;
import java.util.Objects;
/**
* StickersKeyword object
*/
public class StickersKeyword implements Validable {
@SerializedName("words")
@Required
private List<String> words;
@SerializedName("user_stickers")
private StickersKeywordStickers userStickers;
@SerializedName("promoted_stickers")
private StickersKeywordStickers promotedStickers;
@SerializedName("stickers")
private List<StickersKeywordSticker> stickers;
public List<String> getWords() {
return words;
}
public StickersKeyword setWords(List<String> words) {
this.words = words;
return this;
}
public StickersKeywordStickers getUserStickers() {
return userStickers;
}
public StickersKeyword setUserStickers(StickersKeywordStickers userStickers) {
this.userStickers = userStickers;
return this;
}
public StickersKeywordStickers getPromotedStickers() {
return promotedStickers;
}
public StickersKeyword setPromotedStickers(StickersKeywordStickers promotedStickers) {
this.promotedStickers = promotedStickers;
return this;
}
public List<StickersKeywordSticker> getStickers() {
return stickers;
}
public StickersKeyword setStickers(List<StickersKeywordSticker> stickers) {
this.stickers = stickers;
return this;
}
@Override
public int hashCode() {
return Objects.hash(promotedStickers, userStickers, words, stickers);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StickersKeyword stickersKeyword = (StickersKeyword) o;
return Objects.equals(promotedStickers, stickersKeyword.promotedStickers) &&
Objects.equals(userStickers, stickersKeyword.userStickers) &&
Objects.equals(words, stickersKeyword.words) &&
Objects.equals(stickers, stickersKeyword.stickers);
}
@Override
public String toString() {
final Gson gson = new Gson();
return gson.toJson(this);
}
public String toPrettyString() {
final StringBuilder sb = new StringBuilder("StickersKeyword{");
sb.append("promotedStickers=").append(promotedStickers);
sb.append(", userStickers=").append(userStickers);
sb.append(", words='").append(words).append("'");
sb.append(", stickers=").append(stickers);
sb.append('}');
return sb.toString();
}
}
| 1,139 |
387 | import time
import cv2
import numpy as np
from chainer import serializers, Variable
import chainer.functions as F
import argparse
from yolov2 import *
from lib.image_generator import *
from yolov2_predict import *
item_path = "./items"
background_path = "./backgrounds"
input_width, input_height = (416, 416)
loop = 10
# load image generator
print("loading image generator...")
generator = ImageGenerator(item_path, background_path)
animation = generator.generate_random_animation(loop=loop, bg_index=4, crop_width=input_width, crop_height=input_height, min_item_scale=1.0, max_item_scale=2.0)
for i in animation:
cv2.imshow("w", i)
cv2.waitKey(1)
# init video writer
codec = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video_writer = cv2.VideoWriter('output.avi', codec, 25.0, (input_width, input_height))
# load predictor
predictor = AnimalPredictor()
for frame in animation:
orig_img = frame.copy()
nms_results = predictor(orig_img)
# draw result
for result in nms_results:
left, top = result["box"].int_left_top()
cv2.rectangle(
orig_img,
result["box"].int_left_top(), result["box"].int_right_bottom(),
(0, 255, 0),
3
)
text = '%s(%2d%%)' % (result["label"], result["probs"].max()*result["conf"]*100)
cv2.putText(orig_img, text, (left, top-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
print(text)
cv2.imshow("w", orig_img)
cv2.waitKey(1)
video_writer.write(orig_img)
video_writer.release()
| 651 |
542 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <gio/gnetworking.h>
#include "dtls_transport.h"
#include "nice_agent_bio.h"
#include "rtp_packet.h"
#include "rtcp_packet.h"
#include "utils.h"
#include "peer_connection.h"
static const gchar *STATE_NAME[] = {"disconnected", "gathering", "connecting",
"connected", "ready", "failed"};
static const gchar *CANDIDATE_TYPE_NAME[] = {"host", "srflx", "prflx", "relay"};
static const gchar *STUN_ADDR = "172.16.17.32";
static const guint STUN_PORT = 3478;
struct PeerConnection {
NiceAgent *nice_agent;
gboolean controlling;
guint stream_id;
guint component_id;
GMainLoop *gloop;
GThread *gthread;
uint32_t audio_ssrc, video_ssrc;
DtlsTransport *dtls_transport;
SessionDescription *sdp;
Transceiver transceiver;
MediaStream *media_stream;
void (*onicecandidate)(char *sdp, void *userdata);
void (*oniceconnectionstatechange)(IceConnectionState state, void *userdata);
void (*ontrack)(uint8_t *packet, size_t bytes, void *userdata);
void *onicecandidate_userdata;
void *oniceconnectionstatechange_userdata;
void *ontrack_userdata;
void (*on_transport_ready)(void *userdata);
void *on_transport_ready_userdata;
};
void* peer_connection_gather_thread(void *data) {
PeerConnection *pc = (PeerConnection*)data;
g_main_loop_run(pc->gloop);
return NULL;
}
static void peer_connection_new_selected_pair_full_cb(NiceAgent* agent, guint stream_id,
guint component_id, NiceCandidate *lcandidate, NiceCandidate* rcandidate, gpointer data) {
PeerConnection *pc = (PeerConnection*)data;
dtls_transport_do_handshake(pc->dtls_transport);
}
static void* peer_connection_component_state_chanaged_cb(NiceAgent *agent,
guint stream_id, guint component_id, guint state, gpointer data) {
PeerConnection *pc = (PeerConnection*)data;
//LOG_INFO("SIGNAL: state changed %d %d %s[%d]",
// stream_id, component_id, STATE_NAME[state], state);
if(pc->oniceconnectionstatechange != NULL) {
pc->oniceconnectionstatechange(state, pc->oniceconnectionstatechange_userdata);
}
}
static void* peer_connection_candidate_gathering_done_cb(NiceAgent *agent, guint stream_id,
gpointer data) {
PeerConnection *pc = (PeerConnection*)data;
gchar *local_ufrag = NULL;
gchar *local_password = NULL;
gchar ipaddr[INET6_ADDRSTRLEN];
GSList *nice_candidates = NULL;
int i = 0;
NiceCandidate *nice_candidate;
char nice_candidate_addr[INET6_ADDRSTRLEN];
SessionDescription *sdp = pc->sdp;
if(!nice_agent_get_local_credentials(pc->nice_agent,
pc->stream_id, &local_ufrag, &local_password)) {
LOG_ERROR("get local credentials failed");
return NULL;
}
session_description_append(sdp, "v=0");
session_description_append(sdp, "o=- 1495799811084970 1495799811084970 IN IP4 0.0.0.0");
session_description_append(sdp, "s=-");
session_description_append(sdp, "t=0 0");
session_description_append(sdp, "a=msid-semantic: WMS");
if(pc->media_stream->tracks_num > 1)
session_description_append(sdp, "a=group:BUNDLE 0 1");
else
session_description_append(sdp, "a=group:BUNDLE 0");
session_description_add_codec(sdp, pc->media_stream->audio_codec, pc->transceiver.audio, local_ufrag, local_password, dtls_transport_get_fingerprint(pc->dtls_transport));
session_description_add_codec(sdp, pc->media_stream->video_codec, pc->transceiver.video, local_ufrag, local_password, dtls_transport_get_fingerprint(pc->dtls_transport));
if(local_ufrag)
free(local_ufrag);
if(local_password)
free(local_password);
nice_candidates = nice_agent_get_local_candidates(pc->nice_agent,
pc->stream_id, pc->component_id);
for(i = 0; i < g_slist_length(nice_candidates); ++i) {
nice_candidate = (NiceCandidate *)g_slist_nth(nice_candidates, i)->data;
nice_address_to_string(&nice_candidate->addr, nice_candidate_addr);
if(utils_is_valid_ip_address(nice_candidate_addr) > 0) {
nice_candidate_free(nice_candidate);
continue;
}
session_description_append(sdp, "a=candidate:%s 1 udp %u %s %d typ %s",
nice_candidate->foundation,
nice_candidate->priority,
nice_candidate_addr,
nice_address_get_port(&nice_candidate->addr),
CANDIDATE_TYPE_NAME[nice_candidate->type]);
nice_candidate_free(nice_candidate);
}
if(pc->onicecandidate != NULL) {
char *answer = NULL;
const char *sdp_content = session_description_get_content(pc->sdp);
answer = (char*)malloc(strlen(sdp_content) + 30);
sprintf(answer, "{\"type\": \"answer\", \"sdp\": \"%s\"}", sdp_content);
pc->onicecandidate(answer, pc->onicecandidate_userdata);
if(answer)
free(answer);
}
if(nice_candidates)
g_slist_free(nice_candidates);
}
int peer_connection_send_rtcp_pil(PeerConnection *pc, uint32_t ssrc) {
int ret = -1;
guint size = 12;
uint8_t plibuf[128];
rtcp_packet_get_pli(plibuf, 12, ssrc);
dtls_transport_encrypt_rctp_packet(pc->dtls_transport, plibuf, &size);
ret = nice_agent_send(pc->nice_agent, pc->stream_id, pc->component_id, size, (gchar*)plibuf);
return ret;
}
static void peer_connection_ice_recv_cb(NiceAgent *agent, guint stream_id, guint component_id,
guint len, gchar *buf, gpointer data) {
PeerConnection *pc = (PeerConnection*)data;
if(rtcp_packet_validate(buf, len)) {
if(pc->on_transport_ready != NULL) {
pc->on_transport_ready((void*)pc->on_transport_ready_userdata);
}
}
else if(dtls_transport_validate(buf)) {
dtls_transport_incomming_msg(pc->dtls_transport, buf, len);
}
else if(rtp_packet_validate(buf, len)) {
dtls_transport_decrypt_rtp_packet(pc->dtls_transport, buf, &len);
if(pc->ontrack != NULL) {
pc->ontrack(buf, len, pc->ontrack_userdata);
}
}
}
gboolean peer_connection_nice_agent_setup(PeerConnection *pc) {
pc->gloop = g_main_loop_new(NULL, FALSE);
pc->nice_agent = nice_agent_new(g_main_loop_get_context(pc->gloop),
NICE_COMPATIBILITY_RFC5245);
if(pc->nice_agent == NULL) {
LOG_ERROR("Failed to create agent");
return FALSE;
}
g_object_set(pc->nice_agent, "stun-server", STUN_ADDR, NULL);
g_object_set(pc->nice_agent, "stun-server-port", STUN_PORT, NULL);
g_object_set(pc->nice_agent, "controlling-mode", FALSE, NULL);
g_object_set(pc->nice_agent, "keepalive-conncheck", TRUE, NULL);
g_signal_connect(pc->nice_agent, "candidate-gathering-done",
G_CALLBACK(peer_connection_candidate_gathering_done_cb), pc);
g_signal_connect(pc->nice_agent, "component-state-changed",
G_CALLBACK(peer_connection_component_state_chanaged_cb), pc);
g_signal_connect(pc->nice_agent, "new-selected-pair-full",
G_CALLBACK(peer_connection_new_selected_pair_full_cb), pc);
pc->component_id = 1;
pc->stream_id = nice_agent_add_stream(pc->nice_agent, pc->component_id);
if(pc->stream_id == 0) {
LOG_ERROR("Failed to add stream");
return FALSE;
}
nice_agent_set_stream_name(pc->nice_agent, pc->stream_id, "video");
nice_agent_attach_recv(pc->nice_agent, pc->stream_id, pc->component_id,
g_main_loop_get_context(pc->gloop), peer_connection_ice_recv_cb, pc);
pc->gthread = g_thread_new("ice gather thread", &peer_connection_gather_thread, pc);
return TRUE;
}
PeerConnection* peer_connection_create(void) {
PeerConnection *pc = NULL;
pc = (PeerConnection*)malloc(sizeof(PeerConnection));
if(pc == NULL)
return pc;
pc->audio_ssrc = 0;
pc->video_ssrc = 0;
pc->onicecandidate = NULL;
pc->onicecandidate_userdata = NULL;
pc->oniceconnectionstatechange = NULL;
pc->oniceconnectionstatechange_userdata = NULL;
pc->on_transport_ready = NULL;
pc->on_transport_ready_userdata = NULL;
pc->transceiver.video = SENDONLY;
pc->transceiver.audio = SENDONLY;
if(peer_connection_nice_agent_setup(pc) == FALSE) {
peer_connection_destroy(pc);
return NULL;
}
pc->dtls_transport = dtls_transport_create(nice_agent_bio_new(pc->nice_agent, pc->stream_id, pc->component_id));
pc->sdp = session_description_create();
return pc;
}
void peer_connection_destroy(PeerConnection *pc) {
if(pc == NULL)
return;
g_main_loop_quit(pc->gloop);
g_thread_join(pc->gthread);
g_main_loop_unref(pc->gloop);
if(pc->nice_agent)
g_object_unref(pc->nice_agent);
if(pc->dtls_transport)
dtls_transport_destroy(pc->dtls_transport);
free(pc);
pc = NULL;
}
void peer_connection_add_stream(PeerConnection *pc, MediaStream *media_stream) {
pc->media_stream = media_stream;
}
int peer_connection_create_answer(PeerConnection *pc) {
if(!nice_agent_gather_candidates(pc->nice_agent, pc->stream_id)) {
LOG_ERROR("Failed to start candidate gathering");
return -1;
}
return 0;
}
void peer_connection_set_remote_description(PeerConnection *pc, char *remote_sdp_base64) {
guchar *remote_sdp = NULL;
gsize len;
gchar* ufrag = NULL;
gchar* pwd = NULL;
GSList *plist;
int i;
remote_sdp = g_base64_decode(remote_sdp_base64, &len);
pc->audio_ssrc = session_description_find_ssrc("audio", remote_sdp);
pc->video_ssrc = session_description_find_ssrc("video", remote_sdp);
// Remove mDNS
SessionDescription *sdp = NULL;
if(strstr(remote_sdp, "local") != NULL) {
sdp = session_description_create();
gchar **splits;
splits = g_strsplit(remote_sdp, "\r\n", 128);
for(i = 0; splits[i] != NULL; i++) {
if(strstr(splits[i], "candidate") != NULL && strstr(splits[i], "local") != NULL) {
char buf[256] = {0};
if(session_description_update_mdns_of_candidate(splits[i], buf, sizeof(buf)) != -1) {
session_description_append_newline(sdp, buf);
}
}
else {
session_description_append_newline(sdp, splits[i]);
}
}
free(remote_sdp);
remote_sdp = strdup(session_description_get_content(sdp));
}
plist = nice_agent_parse_remote_stream_sdp(pc->nice_agent,
pc->component_id, (gchar*)remote_sdp, &ufrag, &pwd);
if(ufrag && pwd && g_slist_length(plist) > 0) {
ufrag[strlen(ufrag) - 1] = '\0';
pwd[strlen(pwd) - 1] = '\0';
NiceCandidate* c = (NiceCandidate*)g_slist_nth(plist, 0)->data;
if(!nice_agent_set_remote_credentials(pc->nice_agent, 1, ufrag, pwd))
{
LOG_WARNING("failed to set remote credentials");
}
if(nice_agent_set_remote_candidates(pc->nice_agent, pc->stream_id,
pc->component_id, plist) < 1) {
LOG_WARNING("failed to set remote candidates");
}
g_free(ufrag);
g_free(pwd);
g_slist_free_full(plist, (GDestroyNotify)&nice_candidate_free);
}
if(sdp)
session_description_destroy(sdp);
if(remote_sdp)
free(remote_sdp);
}
int peer_connection_send_rtp_packet(PeerConnection *pc, uint8_t *packet, int bytes) {
dtls_transport_encrypt_rtp_packet(pc->dtls_transport, packet, &bytes);
int sent = nice_agent_send(pc->nice_agent, pc->stream_id, pc->component_id, bytes, (gchar*)packet);
if(sent < bytes) {
LOG_ERROR("only sent %d bytes? (was %d)\n", sent, bytes);
}
return sent;
}
void peer_connection_set_on_transport_ready(PeerConnection *pc, void (*on_transport_ready), void *data) {
pc->on_transport_ready = on_transport_ready;
pc->on_transport_ready_userdata = data;
}
void peer_connection_onicecandidate(PeerConnection *pc, void (*onicecandidate), void *userdata) {
pc->onicecandidate = onicecandidate;
pc->onicecandidate_userdata = userdata;
}
void peer_connection_oniceconnectionstatechange(PeerConnection *pc,
void (*oniceconnectionstatechange), void *userdata) {
pc->oniceconnectionstatechange = oniceconnectionstatechange;
pc->oniceconnectionstatechange_userdata = userdata;
}
void peer_connection_ontrack(PeerConnection *pc, void (*ontrack), void *userdata) {
pc->ontrack = ontrack;
pc->ontrack_userdata = userdata;
}
int peer_connection_add_transceiver(PeerConnection *pc, Transceiver transceiver) {
pc->transceiver = transceiver;
}
uint32_t peer_connection_get_ssrc(PeerConnection *pc, const char *type) {
if(strcmp(type, "audio") == 0) {
return pc->audio_ssrc;
}
else if(strcmp(type, "video") == 0) {
return pc->video_ssrc;
}
return 0;
}
| 4,768 |
9,425 | <gh_stars>1000+
"""
:codeauthor: :email: `<NAME> <<EMAIL>>`
tests.integration.states.network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import pytest
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
@pytest.mark.destructive_test
class NetworkTest(ModuleCase, SaltReturnAssertsMixin):
"""
Validate network state module
"""
def setUp(self):
os_family = self.run_function("grains.get", ["os_family"])
if os_family not in ("RedHat", "Debian"):
self.skipTest(
"Network state only supported on RedHat and Debian based systems"
)
@pytest.mark.slow_test
def test_managed(self):
"""
network.managed
"""
state_key = "network_|-dummy0_|-dummy0_|-managed"
ret = self.run_function("state.sls", mods="network.managed", test=True)
self.assertEqual(
"Interface dummy0 is set to be added.", ret[state_key]["comment"]
)
@pytest.mark.slow_test
def test_routes(self):
"""
network.routes
"""
state_key = "network_|-routes_|-dummy0_|-routes"
expected_changes = "Interface dummy0 routes are set to be added."
ret = self.run_function("state.sls", mods="network.routes", test=True)
self.assertEqual(
ret[state_key]["comment"], "Interface dummy0 routes are set to be added."
)
@pytest.mark.slow_test
def test_system(self):
"""
network.system
"""
state_key = "network_|-system_|-system_|-system"
global_settings = self.run_function("ip.get_network_settings")
ret = self.run_function("state.sls", mods="network.system", test=True)
self.assertIn(
"Global network settings are set to be {}".format(
"added" if not global_settings else "updated"
),
ret[state_key]["comment"],
)
| 865 |
861 | <filename>spring-cloud-gray-server/src/main/java/cn/springcloud/gray/server/service/UserResourceAuthorityService.java
package cn.springcloud.gray.server.service;
import cn.springcloud.gray.server.dao.mapper.ModelMapper;
import cn.springcloud.gray.server.dao.mapper.UserResourceAuthorityMapper;
import cn.springcloud.gray.server.dao.model.UserResourceAuthorityDO;
import cn.springcloud.gray.server.dao.repository.UserResourceAuthorityRepository;
import cn.springcloud.gray.server.module.domain.ResourceAuthorityFlag;
import cn.springcloud.gray.server.module.user.domain.UserResourceAuthority;
import cn.springcloud.gray.server.module.user.domain.UserResourceAuthorityQuery;
import cn.springcloud.gray.server.utils.PaginationUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.domain.Specification;
import org.springframework.stereotype.Service;
import javax.persistence.criteria.CriteriaBuilder;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Predicate;
import javax.persistence.criteria.Root;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
@Service
public class UserResourceAuthorityService extends AbstraceCRUDService<UserResourceAuthority, UserResourceAuthorityRepository, UserResourceAuthorityDO, Long> {
@Autowired
private UserResourceAuthorityRepository repository;
@Autowired
private UserResourceAuthorityMapper mapper;
@Override
protected UserResourceAuthorityRepository getRepository() {
return repository;
}
@Override
protected ModelMapper<UserResourceAuthority, UserResourceAuthorityDO> getModelMapper() {
return mapper;
}
public UserResourceAuthority findUserResourceAuthority(String userId, String resource, String resourceId) {
return mapper.do2model(repository.findFirstByUserIdAndResourceAndResourceId(userId, resource, resourceId));
}
public Page<UserResourceAuthority> queryUserResourceAuthority(UserResourceAuthorityQuery uraQuery, Pageable pageable) {
Specification<UserResourceAuthorityDO> specification = new Specification<UserResourceAuthorityDO>() {
@Override
public Predicate toPredicate(Root<UserResourceAuthorityDO> root, CriteriaQuery<?> query, CriteriaBuilder cb) {
List<Predicate> predicates = new ArrayList();
if (StringUtils.isNotEmpty(uraQuery.getUserId())) {
predicates.add(cb.equal(root.get("userId").as(String.class), uraQuery.getUserId()));
}
if (Objects.nonNull(uraQuery.getResource())) {
predicates.add(cb.equal(root.get("resource").as(String.class), uraQuery.getResource()));
}
if (Objects.nonNull(uraQuery.getResourceId())) {
predicates.add(cb.equal(root.get("resourceId").as(String.class), uraQuery.getResourceId()));
}
if (Objects.nonNull(uraQuery.getAuthorityFlag())) {
predicates.add(cb.equal(root.get("authorityFlag").as(Integer.class), uraQuery.getAuthorityFlag().getFlag()));
}
if (Objects.nonNull(uraQuery.getDelFlag())) {
predicates.add(cb.equal(root.get("delFlag").as(Boolean.class), uraQuery.getDelFlag()));
}
query.where(predicates.toArray(new Predicate[predicates.size()]));
return query.getRestriction();
}
};
Page<UserResourceAuthorityDO> page = repository.findAll(specification, pageable);
return PaginationUtils.convert(pageable, page, mapper);
}
public String firstAuthorityResourceId(String userId, String resource) {
UserResourceAuthorityDO record = repository.findFirstByUserIdAndResourceAndAuthorityFlagAndDelFlag(
userId, resource, ResourceAuthorityFlag.OWNER.getFlag(), false);
if (Objects.nonNull(record)) {
return record.getResourceId();
}
record = repository.findFirstByUserIdAndResourceAndAuthorityFlagAndDelFlag(
userId, resource, ResourceAuthorityFlag.ADMIN.getFlag(), false);
return Objects.nonNull(record) ? record.getResourceId() : null;
}
}
| 1,687 |
488 | <reponame>maurizioabba/rose<filename>tests/CompileTests/OpenMP_tests/cvalidation/for_schedule_dynamic.c
/*
* Test for dynamic scheduling with chunk size
* Method: caculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested chunk size
* unless it is the last one.
* It is possible for two adjacent chunks are assigned to the same thread
* Modifyied by <NAME>
*/
// Skip testing on 64 bit systems for now!
#ifndef __LP64__
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#define CFDMAX_SIZE 100
int
check_for_schedule_dynamic (FILE * logFile)
{
const int chunk_size = 7;
int tid;
int tids[CFDMAX_SIZE];
int count = 0;
int tmp_count = 0; /*dispatch times*/
int *tmp; /*store chunk size for each dispatch*/
int i;
int result = 0;
#pragma omp parallel private(tid) shared(tids)
{ /* begin of parallel */
tid = omp_get_thread_num ();
#pragma omp for schedule(dynamic,chunk_size)
for (i = 0; i < CFDMAX_SIZE; i++)
{
tids[i] = tid;
}
} /* end of parallel */
for (i = 0; i < CFDMAX_SIZE - 1; ++i)
{
if (tids[i] != tids[i + 1])
{
count++;
}
}
tmp = (int *) malloc (sizeof (int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i)
{
if (tmp_count > count)
{
printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */
break;
}
if (tids[i] != tids[i + 1])
{
tmp_count++;
tmp[tmp_count] = 1;
}
else
{
tmp[tmp_count]++;
}
}
/*
printf("debug----\n");
for (i = 0; i < CFDMAX_SIZE; ++i)
printf("%d ",tids[i]);
printf("debug----\n");
*/
/* is dynamic statement working? */
for (i = 0; i < count; i++)
{
if ((tmp[i]%chunk_size)!=0)
/*it is possible for 2 adjacent chunks assigned to a same thread*/
{
result++;
fprintf(logFile,"The intermediate dispatch has wrong chunksize.\n");
/*result += ((tmp[i] / chunk_size) - 1);*/
}
}
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size))
{
result++;
fprintf(logFile,"the last dispatch has wrong chunksize.\n");
}
/* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */
return (result==0);
}
int
crosscheck_for_schedule_dynamic (FILE * logFile)
{
const int chunk_size = 7;
int tid;
int tids[CFDMAX_SIZE];
int count = 0;
int tmp_count = 0; /*dispatch times*/
int *tmp; /*store chunk size for each dispatch*/
int i;
int result = 0;
#pragma omp parallel private(tid) shared(tids)
{ /* begin of parallel */
tid = omp_get_thread_num ();
#pragma omp for
for (i = 0; i < CFDMAX_SIZE; i++)
{
tids[i] = tid;
}
} /* end of parallel */
for (i = 0; i < CFDMAX_SIZE - 1; ++i)
{
if (tids[i] != tids[i + 1])
{
count++;
}
}
tmp = (int *) malloc (sizeof (int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i)
{
if (tmp_count > count)
{
printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */
break;
}
if (tids[i] != tids[i + 1])
{
tmp_count++;
tmp[tmp_count] = 1;
}
else
{
tmp[tmp_count]++;
}
}
/*
printf("debug----\n");
for (i = 0; i < CFDMAX_SIZE; ++i)
printf("%d ",tids[i]);
printf("debug----\n");
*/
/* is dynamic statement working? */
for (i = 0; i < count; i++)
{
if ((tmp[i]%chunk_size)!=0)
/*it is possible for 2 adjacent chunks assigned to a same thread*/
{
result++;
fprintf(logFile,"The intermediate dispatch has wrong chunksize.\n");
/*result += ((tmp[i] / chunk_size) - 1);*/
}
}
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size))
{
result++;
fprintf(logFile,"the last dispatch has wrong chunksize.\n");
}
/* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */
return (result==0);
}
#else
#warning "Not tested on 64 bit systems"
#endif
| 1,735 |
2,082 | from model_mommy import mommy
def make_comment(doc, user):
return mommy.make("Comment", example=doc, user=user)
def make_doc(project):
return mommy.make("Example", text="example", project=project)
def make_image(project, filepath):
return mommy.make("Example", filename=filepath, project=project)
def make_example_state(example, user):
return mommy.make("ExampleState", example=example, confirmed_by=user)
| 138 |
302 | <reponame>jacobkimmel/non-parametric-transformers
import math
import gpytorch
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.cluster import KMeans
from torch import nn
# MLP feature extractor
class MLP(nn.Module):
def __init__(
self, input_size, hidden_layer_sizes, output_size,
dropout_prob=None):
super(MLP, self).__init__()
fc_layers = []
all_layer_sizes = [input_size] + hidden_layer_sizes
for layer_size_idx in range(len(all_layer_sizes) - 1):
fc_layers.append(
nn.Linear(all_layer_sizes[layer_size_idx],
all_layer_sizes[layer_size_idx + 1]))
self.fc_layers = nn.ModuleList(fc_layers)
self.output_layer = nn.Linear(
hidden_layer_sizes[-1], output_size)
if dropout_prob is not None:
self.dropout = torch.nn.Dropout(p=dropout_prob)
else:
self.dropout = None
def forward(self, x):
for fc_layer in self.fc_layers:
x = fc_layer(x)
x = F.relu(x)
if self.dropout is not None:
x = self.dropout(x)
output = self.output_layer(x)
return output
# GP Layer
# Trains one GP per feature, as per the SV-DKL paper
# The outputs of those GPs are mixed in the Softmax Likelihood for classification
class GaussianProcessLayer(gpytorch.models.ApproximateGP):
def __init__(self, num_dim, grid_bounds=(-10., 10.), grid_size=64):
if num_dim > 1:
batch_shape = torch.Size([num_dim])
else:
batch_shape = torch.Size([])
variational_distribution = (
gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points=grid_size, batch_shape=batch_shape))
# Our base variational strategy is a GridInterpolationVariationalStrategy,
# which places variational inducing points on a Grid
# We wrap it with a MultitaskVariationalStrategy so that our output is a vector-valued GP
variational_strategy = gpytorch.variational.GridInterpolationVariationalStrategy(
self, grid_size=grid_size, grid_bounds=[grid_bounds],
variational_distribution=variational_distribution
)
if num_dim > 1:
variational_strategy = gpytorch.variational.IndependentMultitaskVariationalStrategy(
variational_strategy, num_tasks=num_dim)
super().__init__(variational_strategy)
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(
math.exp(-1), math.exp(1), sigma=0.1, transform=torch.exp
)
)
)
self.mean_module = gpytorch.means.ConstantMean()
self.grid_bounds = grid_bounds
def forward(self, x):
mean = self.mean_module(x)
covar = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean, covar)
# Stochastic Variational Deep Kernel Learning
# Wilson et al. 2016
# https://arxiv.org/abs/1611.00336
# https://docs.gpytorch.ai/en/v1.2.1/examples/06_PyTorch_NN_Integration_DKL/
# Deep_Kernel_Learning_DenseNet_CIFAR_Tutorial.html
class DKLClassificationModel(gpytorch.Module):
def __init__(self, feature_extractor, num_dim, grid_bounds=(-10., 10.)):
super(DKLClassificationModel, self).__init__()
self.feature_extractor = feature_extractor
self.gp_layer = GaussianProcessLayer(
num_dim=num_dim, grid_bounds=grid_bounds)
self.grid_bounds = grid_bounds
self.num_dim = num_dim
def forward(self, x):
features = self.feature_extractor(x)
features = gpytorch.utils.grid.scale_to_bounds(features, self.grid_bounds[0], self.grid_bounds[1])
# This next line makes it so that we learn a GP for each feature
features = features.transpose(-1, -2).unsqueeze(-1)
res = self.gp_layer(features)
return res
class DKLInducingPointGP(gpytorch.models.ApproximateGP):
def __init__(self, n_inducing_points, feature_extractor, batch_size, X_train):
inducing_points = self.get_inducing_points(X_train, n_inducing_points, feature_extractor, batch_size)
variational_distribution = (
gpytorch.variational.CholeskyVariationalDistribution(
inducing_points.size(0)))
variational_strategy = gpytorch.variational.VariationalStrategy(
self, inducing_points, variational_distribution,
learn_inducing_locations=True)
super(DKLInducingPointGP, self).__init__(variational_strategy)
self.feature_extractor_batch_size = batch_size
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def get_inducing_points(self, X_train, n_inducing_points,
feature_extractor, feature_extractor_batch_size):
if torch.cuda.is_available():
self.feature_extractor = self.feature_extractor.cuda()
n_inducing_points = min(X_train.size(0), n_inducing_points)
n_embeds = min(X_train.size(0), n_inducing_points * 10)
feature_extractor_embeds = []
# Input indices to embed
input_indices = np.random.choice(
np.arange(X_train.size(0)), size=n_embeds, replace=False)
with torch.no_grad():
for i in range(0, n_inducing_points, feature_extractor_batch_size):
batch_indices = input_indices[i:i+feature_extractor_batch_size]
feature_extractor_embeds.append(
feature_extractor(X_train[batch_indices]))
feature_extractor_embeds = torch.cat(feature_extractor_embeds).numpy()
km = KMeans(n_clusters=n_inducing_points)
km.fit(feature_extractor_embeds)
if True:
a = 1
inducing_points = torch.from_numpy(km.cluster_centers_)
return inducing_points
class DKLRegressionModel(gpytorch.Module):
def __init__(self, feature_extractor, n_inducing_points, batch_size, X_train):
super(DKLRegressionModel, self).__init__()
self.feature_extractor = feature_extractor
self.gp_layer = DKLInducingPointGP(n_inducing_points, feature_extractor, batch_size, X_train)
def forward(self, x):
features = self.feature_extractor(x)
res = self.gp_layer(features)
return res
| 3,021 |
453 | #include <_ansi.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "trap.h"
_kill (n, m)
{
return TRAP0 (SYS_exit, 0xdead, 0, 0);
}
| 71 |
1,821 | <reponame>hangqiu/pixie
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <vector>
#include "src/carnot/planner/objects/collection_object.h"
#include "src/carnot/planner/objects/dataframe.h"
#include "src/carnot/planner/objects/expr_object.h"
#include "src/carnot/planner/objects/funcobject.h"
#include "src/carnot/planner/objects/qlobject.h"
namespace px {
namespace carnot {
namespace planner {
namespace compiler {
std::string QLObjectTypeString(QLObjectType type) {
return std::string(absl::StripPrefix(absl::AsciiStrToLower(magic_enum::enum_name(type)), "k"));
}
void QLObject::AddSubscriptMethod(std::shared_ptr<FuncObject> func_object) {
DCHECK_EQ(func_object->name(), kSubscriptMethodName);
DCHECK(func_object->arguments() == std::vector<std::string>{"key"})
<< absl::StrJoin(func_object->arguments(), ",");
AddMethod(kSubscriptMethodName, func_object);
}
StatusOr<std::shared_ptr<QLObject>> QLObject::GetAttribute(const pypa::AstPtr& ast,
std::string_view attr) const {
if (HasMethod(attr)) {
return GetMethod(attr);
}
if (!HasNonMethodAttribute(attr)) {
return CreateAstError(ast, "'$0' object has no attribute '$1'", name(), attr);
}
return GetAttributeImpl(ast, attr);
}
Status QLObject::AssignAttribute(std::string_view attr_name, QLObjectPtr object) {
if (!CanAssignAttribute(attr_name)) {
return CreateError("Cannot assign attribute $0 to object of type $1", attr_name, name());
}
attributes_[attr_name] = object;
return Status::OK();
}
StatusOr<QLObjectPtr> QLObject::FromIRNode(IRNode* node, ASTVisitor* ast_visitor) {
if (Match(node, Operator())) {
return Dataframe::Create(static_cast<OperatorIR*>(node), ast_visitor);
} else if (Match(node, Expression())) {
return ExprObject::Create(static_cast<ExpressionIR*>(node), ast_visitor);
} else {
return node->CreateIRNodeError("Could not create QL object from IRNode of type $0",
node->type_string());
}
}
Status QLObject::SetDocString(const std::string& doc_string) {
// TODO(PP-2142) Support non ExprObject nodes to pass strings around so we can
// store it in __doc__.
doc_string_ = doc_string;
return Status::OK();
}
Status QLObject::SetDocString(QLObjectPtr doc_string) {
// TODO(PP-2142) Redo the doc_string requirements so StringIR isn't a requirement.
PL_RETURN_IF_ERROR(AssignAttribute(kDocStringAttributeName, doc_string));
DCHECK(doc_string->HasNode());
DCHECK_EQ(doc_string->node()->type(), IRNodeType::kString);
doc_string_ = static_cast<StringIR*>(doc_string->node())->str();
return Status::OK();
}
} // namespace compiler
} // namespace planner
} // namespace carnot
} // namespace px
| 1,190 |
679 | <filename>main/shell/source/win32/shlxthandler/util/iso8601_converter.cxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_shell.hxx"
#include "internal/iso8601_converter.hxx"
#include "internal/utilities.hxx"
#include <sstream>
#include <iomanip>
//-----------------------------------
/* Converts ISO 8601 conform date/time
representation to the representation
conforming to the current locale
*/
std::wstring iso8601_date_to_local_date(const std::wstring& isoDate )
{
const std::wstring CONST_SPACE(L" ");
::std::wstring ws8601DateTime(isoDate);
if ( ws8601DateTime.length() >= 19 )
{
// fill in the SYSTEMTIME structure;
std::string asDateTime = WStringToString( ws8601DateTime );
SYSTEMTIME DateTime;
DateTime.wYear = ( unsigned short )strtol( asDateTime.substr( 0, 4 ).c_str(), NULL, 10 );
DateTime.wMonth = ( unsigned short )strtol( asDateTime.substr( 5, 2 ).c_str(), NULL, 10 );
DateTime.wDayOfWeek = 0;
DateTime.wDay = ( unsigned short )strtol( asDateTime.substr( 8, 2 ).c_str(), NULL, 10 );
DateTime.wHour = ( unsigned short )strtol( asDateTime.substr( 11,2 ).c_str(), NULL, 10 );
DateTime.wMinute = ( unsigned short )strtol( asDateTime.substr( 14,2 ).c_str(), NULL, 10 );
DateTime.wSecond = ( unsigned short )strtol( asDateTime.substr( 17,2 ).c_str(), NULL, 10 );
DateTime.wMilliseconds = 0;
// get Date info from structure
WCHAR DateBuffer[ MAX_PATH ];
int DateSize = GetDateFormatW(
LOCALE_SYSTEM_DEFAULT,
0,
&DateTime,
NULL,
DateBuffer,
MAX_PATH );
if ( DateSize )
ws8601DateTime.assign(DateBuffer);
else
ws8601DateTime = StringToWString( asDateTime );
// get Time info from structure
WCHAR TimeBuffer[ MAX_PATH ];
int TimeSize = GetTimeFormatW(
LOCALE_SYSTEM_DEFAULT,
0,
&DateTime,
NULL,
TimeBuffer,
MAX_PATH );
if ( TimeSize )
{
ws8601DateTime.append(L" ");
ws8601DateTime.append(TimeBuffer);
}
else
ws8601DateTime = StringToWString( asDateTime );
}
return ws8601DateTime;
}
//------------------------------------
/* Converts ISO 8601 conform duration
representation to the representation
conforming to the current locale
Expect format PTnHnMnS according to
ISO 8601 where n is arbitrary number
of digits
*/
std::wstring iso8601_duration_to_local_duration(const std::wstring& iso8601duration)
{
std::wstring days;
std::wstring hours;
std::wstring minutes;
std::wstring seconds;
std::wstring::const_iterator iter = iso8601duration.begin();
std::wstring::const_iterator iter_end = iso8601duration.end();
std::wstring num;
for (/**/; iter != iter_end; ++iter)
{
if (isdigit(*iter))
{
num += *iter;
}
else
{
if (*iter == L'D' || *iter == L'd')
days = num;
else if (*iter == L'H' || *iter == L'h')
hours = num;
else if (*iter == L'M' || *iter == L'm')
minutes = num;
else if (*iter == L'S' || *iter == L's')
seconds = num;
num.clear();
}
}
if (days.length() > 0)
{
int h = ((_wtoi(days.c_str()) * 24) + _wtoi(hours.c_str()));
wchar_t buff[10];
_itow(h, buff, 10);
hours = buff;
}
#if defined(_MSC_VER) //&& defined(_M_X64)
std::wostringstream oss;
oss << std::setw(2) << std::setfill(wchar_t('0')) << hours << L":" <<
std::setw(2) << std::setfill(wchar_t('0')) << minutes << L":" <<
std::setw(2) << std::setfill(wchar_t('0')) << seconds;
return oss.str();
#elif defined( __MINGW32__ )
#define ADD_AS_PREFILLED( st, out ) \
if ( st.length() == 0 ) \
out += L"00"; \
else if ( st.length() == 1 ) \
out += L"0"; \
out += st;
std::wstring result;
ADD_AS_PREFILLED( hours, result )
result += L":";
ADD_AS_PREFILLED( minutes, result )
result += L":";
ADD_AS_PREFILLED( seconds, result )
return result;
#undef ADD_AS_PREFILLED
/*
#else
std::wostringstream oss;
oss << std::setw(2) << std::setfill('0') << hours << L":" <<
std::setw(2) << std::setfill('0') << minutes << L":" <<
std::setw(2) << std::setfill('0') << seconds;
return oss.str();
*/
#endif
}
/* vim: set noet sw=4 ts=4: */
| 1,895 |
301 | <reponame>ZeroMagic/Community-Policy<gh_stars>100-1000
{
"allowedVnets": {
"type": "array",
"metadata": {
"displayName": "Allowed VNets",
"description": "Enter list of allowed VNets using a semi-coling ';' seperated list. VNets must be entered using their resource ID. Example: /subscriptions/{SubID}/resourceGroups/resourceGroupName/providers/Microsoft.Network/virtualNetworks/resourceGroupName/virtualNetworkPeerings/vnetName"
},
"defaultvalue": "None"
},
"effect": {
"type": "String",
"metadata": {
"displayName": "Effect",
"description": "Enable or disable the execution of the policy"
},
"allowedValues": [
"Deny",
"Disabled"
],
"defaultValue": "Deny"
}
} | 366 |
488 | <gh_stars>100-1000
public class cave3_rshift0 {
public static void main(String[] args) {
int i = -1 >>> 24;
}
}
| 47 |
718 | <reponame>mikeleber/j2html
package j2html.rendering;
import j2html.Config;
import j2html.utils.Indenter;
import j2html.utils.TextEscaper;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.Deque;
/**
* Composes HTML with lines breaks and indentation between tags and text.
*
* @param <T> The type of the Appendable to which HTML will be appended.
*/
public class IndentedHtml<T extends Appendable> implements HtmlBuilder<T> {
/**
* Returns an HtmlBuilder that will generate indented HTML using
* Config defaults.
*
* @param out The Appendable to which HTML will be appended.
* @param <T> The type of the Appendable to which HTML will be appended.
* @return An HtmlBuilder for indented HTML.
*/
public static final <T extends Appendable> IndentedHtml<T> into(T out) {
return new IndentedHtml<>(out, Config.defaults());
}
/**
* Returns an HtmlBuilder that will generate indented HTML using
* the given Config.
*
* @param out The Appendable to which HTML will be appended.
* @param config The Config which will specify indentation, text escapement, tag closing, etc.
* @param <T> The type of the Appendable to which HTML will be appended.
* @return An HtmlBuilder for indented HTML.
*/
public static final <T extends Appendable> IndentedHtml<T> into(T out, Config config) {
return new IndentedHtml<>(out, config);
}
/**
* Returns an HtmlBuilder that will generate indented HTML in memory using
* Config defaults.
*
* @return An HtmlBuilder for indented HTML.
*/
public static final IndentedHtml<StringBuilder> inMemory() {
return into(new StringBuilder());
}
/**
* Returns an HtmlBuilder that will generate indented HTML in memory using
* the given Config.
*
* @param config The Config which will specify indentation, text escapement, tag closing, etc.
* @return An HtmlBuilder for indented HTML.
*/
public static final IndentedHtml<StringBuilder> inMemory(Config config) {
return into(new StringBuilder(), config);
}
private final T out;
private final Indenter indenter;
private final TextEscaper textEscaper;
private final TagBuilder enclosingElementAttributes;
private final TagBuilder emptyElementAttributes;
// Dealing with preformatted elements (pre and textarea) requires
// that we know what our parent elements are. To do that we use
// a stack; adding items as start tags are created, and removing them
// as those tags are closed. Determining whether or not we are
// currently rendering into a preformatted element is as simple as
// asking if any tags on the stack match a preformatted element name.
private final Deque<String> trace = new ArrayDeque<>();
private IndentedHtml(T out, Config config) {
this.out = out;
this.indenter = config.indenter();
this.textEscaper = config.textEscaper();
this.enclosingElementAttributes = new IndentedTagBuilder(false);
this.emptyElementAttributes = new IndentedTagBuilder(config.closeEmptyTags());
}
private boolean isContentSelfFormatting() {
return trace.contains("pre") || trace.contains("textarea");
}
private int lvl() {
return trace.size();
}
@Override
public TagBuilder appendStartTag(String name) throws IOException {
if (!isContentSelfFormatting()) {
out.append(indenter.indent(lvl(), ""));
}
trace.push(name);
out.append("<").append(name);
return enclosingElementAttributes;
}
@Override
public HtmlBuilder<T> appendEndTag(String name) throws IOException {
if (!name.equals(trace.peek())) {
throw new RuntimeException("Incorrect element closed: " + name + ". Expected: " + trace.peek());
}
if (!isContentSelfFormatting()) {
trace.pop();
out.append(indenter.indent(lvl(), ""));
} else {
trace.pop();
}
out.append("</").append(name).append(">");
if (!isContentSelfFormatting()) {
out.append("\n");
}
return this;
}
@Override
public TagBuilder appendEmptyTag(String name) throws IOException {
if (!isContentSelfFormatting()) {
out.append(indenter.indent(lvl(), ""));
}
out.append("<").append(name);
return emptyElementAttributes;
}
private void appendLines(String txt) throws IOException {
if (!isContentSelfFormatting()) {
String[] lines = txt.split("\n");
for (String line : lines) {
out.append(indenter.indent(lvl(), line)).append("\n");
}
} else {
out.append(txt);
}
}
@Override
public HtmlBuilder<T> appendEscapedText(String txt) throws IOException {
appendLines(textEscaper.escape(txt));
return this;
}
@Override
public HtmlBuilder<T> appendUnescapedText(String txt) throws IOException {
appendLines(txt);
return this;
}
@Override
public T output() {
return out;
}
@Override
@Deprecated
public HtmlBuilder<T> append(CharSequence csq) throws IOException {
out.append(csq);
return this;
}
@Override
@Deprecated
public HtmlBuilder<T> append(CharSequence csq, int start, int end) throws IOException {
out.append(csq, start, end);
return this;
}
@Override
@Deprecated
public HtmlBuilder<T> append(char c) throws IOException {
out.append(c);
return this;
}
private class IndentedTagBuilder implements TagBuilder {
private final boolean closeTag;
private IndentedTagBuilder(boolean closeTag) {
this.closeTag = closeTag;
}
@Override
public TagBuilder appendAttribute(String name, String value) throws IOException {
out.append(" ")
.append(name)
.append("=\"")
.append(textEscaper.escape(value))
.append("\"");
return this;
}
@Override
public TagBuilder appendBooleanAttribute(String name) throws IOException {
out.append(" ").append(name);
return this;
}
@Override
public HtmlBuilder<T> completeTag() throws IOException {
if (closeTag) {
out.append("/");
}
out.append(">");
if (!isContentSelfFormatting()) {
out.append("\n");
}
return IndentedHtml.this;
}
@Override
@Deprecated
public TagBuilder append(CharSequence csq) throws IOException {
out.append(csq);
return this;
}
@Override
@Deprecated
public TagBuilder append(CharSequence csq, int start, int end) throws IOException {
out.append(csq, start, end);
return this;
}
@Override
@Deprecated
public TagBuilder append(char c) throws IOException {
out.append(c);
return this;
}
}
}
| 2,974 |
190,993 | /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Classes for keeping track of on-device state for TPUs.
#ifndef TENSORFLOW_COMPILER_XRT_XRT_TPU_DEVICE_H_
#define TENSORFLOW_COMPILER_XRT_XRT_TPU_DEVICE_H_
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/stream_executor/tpu/tpu_node_context.h"
namespace tensorflow {
// This accessor is used for XLA TPU. It uses the distributed TPU compilation
// cache infrastructure which it accesses via the TPU_SYSTEM resource manager.
class XRTTpuDeviceAccessor {
public:
static Status GetResourceManager(OpKernelContext* ctx, ResourceMgr** rm);
class ScopedRef {
public:
ScopedRef() {}
~ScopedRef() {}
ScopedRef(const ScopedRef&) = delete;
ScopedRef& operator=(const ScopedRef&) = delete;
// Returns the XLA device properties from the TpuNodeContext object
// protected by this ScopedRef.
xla::Backend* backend() { return node_context_->backend(); }
int device_ordinal() { return ordinal_; }
se::DeviceMemoryAllocator* allocator() {
return backend()->memory_allocator();
}
private:
// XRTTpuDeviceAccessor::InitScopedRef is the only way to initialize
// ScopedRef.
friend class XRTTpuDeviceAccessor;
Status Acquire(int device_ordinal);
Status Acquire(OpKernelContext* ctx);
std::unique_ptr<tpu::TpuNodeContext> node_context_;
int ordinal_ = 0;
};
static Status InitScopedRef(OpKernelContext* ctx, int device_ordinal,
ScopedRef* scoped_ref);
static Status InitScopedRef(OpKernelContext* ctx, ScopedRef* scoped_ref);
};
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_XRT_XRT_TPU_DEVICE_H_
| 816 |
1,414 | <reponame>redscientistlabs/Bizhawk50X-Vanguard<filename>Real-Time Corruptor/BizHawk_RTC/waterbox/libsnes/bsnes/snes/chip/superfx/memory/memory.hpp
unsigned rom_mask; //rom_size - 1
unsigned ram_mask; //ram_size - 1
uint8 bus_read(unsigned addr);
void bus_write(unsigned addr, uint8 data);
uint8 op_read(uint16 addr);
alwaysinline uint8 peekpipe();
alwaysinline uint8 pipe();
void cache_flush();
uint8 cache_mmio_read(uint16 addr);
void cache_mmio_write(uint16 addr, uint8 data);
void memory_reset();
| 183 |
459 | package pocketknife;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.util.Log;
import pocketknife.internal.BundleBinding;
import pocketknife.internal.IntentBinding;
import pocketknife.internal.Memoizer;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import static pocketknife.internal.GeneratedAdapters.ANDROID_PREFIX;
import static pocketknife.internal.GeneratedAdapters.BUNDLE_ADAPTER_SUFFIX;
import static pocketknife.internal.GeneratedAdapters.INTENT_ADAPTER_SUFFIX;
import static pocketknife.internal.GeneratedAdapters.JAVA_PREFIX;
public final class PocketKnife {
private static final String TAG = "PocketKnife";
public static final String COULD_NOT_CREATE_INSTANCE_BUNDLE_ADAPTER_CLASS = "Could not create an instance of the bundle adapter for class ";
public static final String COULD_NOT_CREATE_INSTANCE_INTENT_ADAPTER_CLASS = "Could not create an instance of the intent adapter for class ";
private static boolean debug;
private PocketKnife() {
throw new AssertionError("No instances.");
}
/**
* Control whether debug logging ins enabled
*/
public static void setDebug(boolean debug) {
PocketKnife.debug = debug;
}
/**
* Save annotated fields in the specified {@code target} to the {@link Bundle}.
*
* @param target Target class for field saving.
* @param bundle Bundle to save the field values.
*/
public static <T> void saveInstanceState(T target, Bundle bundle) {
@SuppressWarnings("unchecked")
BundleBinding<T> binding = (BundleBinding<T>) getBundleBinding(target.getClass().getClassLoader(), target.getClass());
if (binding != null) {
binding.saveInstanceState(target, bundle);
}
}
/**
* Restore annotated fields in the specified {@code target} from the {@link Bundle}.
*
* @param target Target class to restore fields
* @param bundle Bundle to restore field values.
*/
public static <T> void restoreInstanceState(T target, Bundle bundle) {
@SuppressWarnings("unchecked")
BundleBinding<T> binding = (BundleBinding<T>) getBundleBinding(target.getClass().getClassLoader(), target.getClass());
if (binding != null) {
binding.restoreInstanceState(target, bundle);
}
}
/**
* Bind annotated fields in the specified {@link android.app.Fragment} from its arguments.
*
* @param fragment fragment to bind the arguments;
*/
public static void bindArguments(android.app.Fragment fragment) {
bindArguments(fragment, fragment.getArguments());
}
/**
* Bind annotated fields in the specified {@link android.support.v4.app.Fragment} from its arguments.
*
* @param fragment fragment to bind the arguments;
*/
public static void bindArguments(android.support.v4.app.Fragment fragment) {
bindArguments(fragment, fragment.getArguments());
}
/**
* Bind annotated fields in the specified {@code target} from the {@link android.os.Bundle}.
*
* @param target Target object for bind arguments
* @param bundle Bundle containing arguments;
*/
public static <T> void bindArguments(T target, Bundle bundle) {
@SuppressWarnings("unchecked")
BundleBinding<T> binding = (BundleBinding<T>) getBundleBinding(target.getClass().getClassLoader(), target.getClass());
if (binding != null) {
binding.bindArguments(target, bundle);
}
}
/**
* Bind annotated field in the specified {@link android.app.Activity} from its intent.
*
* @param activity activity to bind the extras.
*/
public static void bindExtras(Activity activity) {
bindExtras(activity, activity.getIntent());
}
/**
* Bind annotated fields in the specified {@code target} from the {@link android.content.Intent}.
*
* @param target Target object to bind the extras.
* @param intent Intent containing the extras.
*/
public static <T> void bindExtras(T target, Intent intent) {
@SuppressWarnings("unchecked")
IntentBinding<T> binding = (IntentBinding<T>) getIntentBinding(target.getClass().getClassLoader(), target.getClass());
if (binding != null) {
binding.bindExtras(target, intent);
}
}
private static BundleBinding<?> getBundleBinding(ClassLoader classLoader, Class<?> cls) {
String clsName = cls.getName();
if (clsName.startsWith(ANDROID_PREFIX) || clsName.startsWith(JAVA_PREFIX)) {
if (debug) {
Log.d(TAG, "MISS: Reached framework class. Abandoning search.");
}
return null;
}
Class<?> adapterClass = loadClass(classLoader, clsName.concat(BUNDLE_ADAPTER_SUFFIX));
if (!adapterClass.equals(Void.class)) {
if (debug) {
Log.d(TAG, "Found loadable bundle adapter for " + clsName);
}
try {
@SuppressWarnings("unchecked")
Constructor<BundleBinding<?>> constructor = (Constructor<BundleBinding<?>>) adapterClass.getConstructor();
return constructor.newInstance();
} catch (NoSuchMethodException e) {
throw new IllegalStateException(
"Couldn't find default constructor in the generated bundle adapter for class "
+ clsName);
} catch (InvocationTargetException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_BUNDLE_ADAPTER_CLASS + clsName, e);
} catch (InstantiationException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_BUNDLE_ADAPTER_CLASS + clsName, e);
} catch (IllegalAccessException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_BUNDLE_ADAPTER_CLASS + clsName, e);
}
}
// Search for Parent Class adapter
if (debug) {
Log.d(TAG, String.format("%s not found. Trying superclass %s", clsName, cls.getSuperclass().getName()));
}
return getBundleBinding(classLoader, cls.getSuperclass());
}
private static IntentBinding<?> getIntentBinding(ClassLoader classLoader, Class cls) {
String clsName = cls.getName();
if (clsName.startsWith(ANDROID_PREFIX) || clsName.startsWith(JAVA_PREFIX)) {
if (debug) {
Log.d(TAG, "MISS: Reached framework class. Abandoning search.");
}
return null;
}
Class<?> adapterClass = loadClass(classLoader, clsName.concat(INTENT_ADAPTER_SUFFIX));
if (!adapterClass.equals(Void.class)) {
if (debug) {
Log.d(TAG, "Found loadable intent adapter for " + clsName);
}
try {
@SuppressWarnings("unchecked")
Constructor<IntentBinding<?>> constructor = (Constructor<IntentBinding<?>>) adapterClass.getConstructor();
return constructor.newInstance();
} catch (NoSuchMethodException e) {
throw new IllegalStateException(
"Couldn't find default constructor in the generated intent adapter for class "
+ clsName);
} catch (InvocationTargetException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_INTENT_ADAPTER_CLASS + clsName, e);
} catch (InstantiationException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_INTENT_ADAPTER_CLASS + clsName, e);
} catch (IllegalAccessException e) {
throw new IllegalStateException(
COULD_NOT_CREATE_INSTANCE_INTENT_ADAPTER_CLASS + clsName, e);
}
}
// Search for Parent Class adapter
if (debug) {
Log.d(TAG, String.format("%s not found. Trying superclass %s", clsName, cls.getSuperclass().getName()));
}
return getIntentBinding(classLoader, cls.getSuperclass());
}
private static Class<?> loadClass(ClassLoader classLoader, String name) {
// a null classloader is the system classloader.
if (classLoader == null) {
if (debug) {
Log.d(TAG, "Class loader is null using system class loader");
}
classLoader = ClassLoader.getSystemClassLoader();
}
return CACHES.get(classLoader).get(name);
}
private static final Memoizer<ClassLoader, Memoizer<String, Class<?>>> CACHES = new Memoizer<ClassLoader, Memoizer<String, Class<?>>>() {
@Override
protected Memoizer<String, Class<?>> create(final ClassLoader classLoader) {
return new Memoizer<String, Class<?>>() {
@Override
protected Class<?> create(String className) {
try {
Class<?> cls = classLoader.loadClass(className);
if (debug) {
Log.d(TAG, "Successfully loaded class " + className);
}
return cls;
} catch (ClassNotFoundException e) {
if (debug) {
Log.d(TAG, "Failed to load class " + className);
}
return Void.class; // Cache the failure (negative case).
}
}
};
}
};
}
| 4,203 |
338 | <reponame>lilesper/SparseVoxelOctree
#include "DeviceCreateInfo.hpp"
#include "Queue.hpp"
#include "Surface.hpp"
#include <memory>
#include <set>
#include <string>
namespace myvk {
void DeviceCreateInfo::Initialize(const std::shared_ptr<PhysicalDevice> &physical_device,
const QueueSelectorFunc &queue_selector_func,
const std::vector<const char *> &extensions, bool use_allocator,
bool use_pipeline_cache) {
m_physical_device_ptr = physical_device;
m_use_allocator = use_allocator;
m_use_pipeline_cache = use_pipeline_cache;
// CHECK EXTENSIONS
m_extensions = extensions;
{
uint32_t extension_count;
vkEnumerateDeviceExtensionProperties(m_physical_device_ptr->GetHandle(), nullptr, &extension_count, nullptr);
std::vector<VkExtensionProperties> extension_properties(extension_count);
vkEnumerateDeviceExtensionProperties(m_physical_device_ptr->GetHandle(), nullptr, &extension_count,
extension_properties.data());
std::set<std::string> extension_set(m_extensions.begin(), m_extensions.end());
for (const VkExtensionProperties &i : extension_properties) {
extension_set.erase(i.extensionName);
}
m_extension_support = extension_set.empty();
}
// PROCESS QUEUES
m_queue_selections.clear();
m_present_queue_selections.clear();
m_queue_support = queue_selector_func(physical_device, &m_queue_selections, &m_present_queue_selections);
generate_queue_creations();
}
void DeviceCreateInfo::generate_queue_creations() {
m_queue_creations.clear();
// transform index_specifiers to queue_indices
std::map<uint32_t, std::set<uint32_t>> queue_sets;
std::map<uint32_t, std::map<uint32_t, uint32_t>> queue_tables;
for (const auto &i : m_present_queue_selections)
queue_sets[i.family].insert(i.index_specifier);
for (const auto &i : m_queue_selections)
queue_sets[i.family].insert(i.index_specifier);
for (const auto &i : queue_sets) {
uint32_t queue_count = m_physical_device_ptr->GetQueueFamilyProperties()[i.first].queueCount;
m_queue_creations[i.first].resize(std::min((uint32_t)i.second.size(), queue_count));
std::map<uint32_t, uint32_t> table;
uint32_t cnt = 0;
for (uint32_t x : i.second)
table[x] = (cnt++) % queue_count;
queue_tables[i.first] = std::move(table);
}
for (const auto &i : m_queue_selections) {
uint32_t queue_index = queue_tables[i.family][i.index_specifier];
m_queue_creations[i.family][queue_index].first.push_back(&i);
}
for (const auto &i : m_present_queue_selections) {
uint32_t queue_index = queue_tables[i.family][i.index_specifier];
m_queue_creations[i.family][queue_index].second.push_back(&i);
}
}
void DeviceCreateInfo::enumerate_device_queue_create_infos(std::vector<VkDeviceQueueCreateInfo> *out_create_infos,
std::vector<float> *out_priorities) const {
out_create_infos->clear();
out_priorities->clear();
if (m_queue_creations.empty())
return;
uint32_t max_queue_count = 0;
for (const auto &i : m_queue_creations) {
if (i.second.size() > max_queue_count)
max_queue_count = i.second.size();
}
out_priorities->resize(max_queue_count, 1.0f);
for (const auto &i : m_queue_creations) {
VkDeviceQueueCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
info.queueFamilyIndex = i.first;
info.queueCount = i.second.size();
info.pQueuePriorities = out_priorities->data();
out_create_infos->push_back(info);
}
}
void DeviceCreateInfo::fetch_queues(const std::shared_ptr<Device> &device) const {
for (const auto &creation : m_queue_creations) {
uint32_t family = creation.first;
for (uint32_t index = 0; index < creation.second.size(); ++index) {
std::shared_ptr<UniqueQueue> unique_queue = UniqueQueue::Create(device, family, index);
for (const QueueSelection *selection : creation.second[index].first)
(*selection->target) = Queue::Create(unique_queue);
for (const PresentQueueSelection *selection : creation.second[index].second)
(*selection->target) = PresentQueue::Create(unique_queue, selection->surface);
}
}
}
} // namespace myvk
| 1,700 |
414 | <filename>ios-sealtalk/RCloudMessage/Sections/Contact/AddFriend/View/RCDSelectAddressBookCell.h
//
// RCDSelectAddressBookCell.h
// SealTalk
//
// Created by 孙浩 on 2019/7/12.
// Copyright © 2019 RongCloud. All rights reserved.
//
#import "RCDTableViewCell.h"
#import "RCDContactsInfo.h"
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, RCDSelectedStatus) {
RCDSelectedStatusUnSelected,
RCDSelectedStatusSelected,
};
@interface RCDSelectAddressBookCell : RCDTableViewCell
@property (nonatomic, assign) RCDSelectedStatus selectStatus;
- (void)setModel:(RCDContactsInfo *)model;
@end
NS_ASSUME_NONNULL_END
| 238 |
415 | <filename>pkg/keyidentifier/testdata/specs/XCRBWoAGuKurE3jr.json
{
"Comment": "comment -- rsa/1024 by putty aCgsTB91FXgEdIfC",
"FingerprintSHA256": "9RxBvOhUGHJmjCnASxd8HvyVz1jST7Mwm5mQKDODXHI",
"FingerprintMD5": "b6:39:39:e9:c9:4b:c7:37:6d:62:f7:ec:55:3a:4a:ac",
"Type": "rsa",
"Bits": 1024,
"Encrypted": false,
"command": "puttygen --random-device /dev/urandom -t rsa -b 1024 -o specs/XCRBWoAGuKurE3jr -O private-sshcom --new-passphrase /dev/fd/63 -C comment -- rsa/1024 by putty aCgsTB91FXgEdIfC",
"Format": "sshcom",
"Source": "putty"
}
| 296 |
1,091 | /*
* Copyright 2015-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.net.driver;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.junit.Test;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.onosproject.net.driver.DefaultDriverDataTest.DEVICE_ID;
public class DefaultDriverTest {
public static final String MFR = "mfr";
public static final String HW = "hw";
public static final String SW = "sw";
public static final String KEY = "key";
public static final String VALUE = "value";
public static final String ROOT = "rootDriver";
public static final String CHILD = "childDriver";
public static final String GRAND_CHILD = "grandChildDriver";
@Test
public void basics() {
DefaultDriver ddp = new DefaultDriver("foo.base", new ArrayList<>(), "Circus", "lux", "1.2a",
ImmutableMap.of(TestBehaviour.class,
TestBehaviourImpl.class,
TestBehaviourTwo.class,
TestBehaviourTwoImpl.class),
ImmutableMap.of("foo", "bar"));
DefaultDriver ddc = new DefaultDriver("foo.bar", ImmutableList.of(ddp), "Circus", "lux", "1.2a",
ImmutableMap.of(),
ImmutableMap.of("foo", "bar"));
assertEquals("incorrect name", "foo.bar", ddc.name());
assertEquals("incorrect parent", ddp, ddc.parent());
assertEquals("incorrect empty parent", ImmutableList.of(), ddp.parents());
assertEquals("incorrect mfr", "Circus", ddc.manufacturer());
assertEquals("incorrect hw", "lux", ddc.hwVersion());
assertEquals("incorrect sw", "1.2a", ddc.swVersion());
assertEquals("incorrect behaviour count", 2, ddp.behaviours().size());
assertEquals("incorrect behaviour count", 0, ddc.behaviours().size());
assertTrue("incorrect behaviour", ddc.hasBehaviour(TestBehaviour.class));
Behaviour b1 = ddc.createBehaviour(new DefaultDriverData(ddc, DEVICE_ID), TestBehaviour.class);
assertTrue("incorrect behaviour class", b1 instanceof TestBehaviourImpl);
Behaviour b2 = ddc.createBehaviour(new DefaultDriverHandler(new DefaultDriverData(ddc, DEVICE_ID)),
TestBehaviourTwo.class);
assertTrue("incorrect behaviour class", b2 instanceof TestBehaviourTwoImpl);
assertEquals("incorrect property count", 1, ddc.properties().size());
assertEquals("incorrect key count", 1, ddc.keys().size());
assertEquals("incorrect property", "bar", ddc.value("foo"));
assertTrue("incorrect toString", ddc.toString().contains("lux"));
}
@Test
public void merge() {
DefaultDriver one = new DefaultDriver("foo.bar", new ArrayList<>(), "Circus", "lux", "1.2a",
ImmutableMap.of(TestBehaviour.class,
TestBehaviourImpl.class),
ImmutableMap.of("foo", "bar"));
Driver ddc =
one.merge(new DefaultDriver("foo.bar", new ArrayList<>(), "", "", "",
ImmutableMap.of(TestBehaviourTwo.class,
TestBehaviourTwoImpl.class),
ImmutableMap.of("goo", "wee")));
assertEquals("incorrect name", "foo.bar", ddc.name());
assertEquals("incorrect mfr", "Circus", ddc.manufacturer());
assertEquals("incorrect hw", "lux", ddc.hwVersion());
assertEquals("incorrect sw", "1.2a", ddc.swVersion());
assertEquals("incorrect behaviour count", 2, ddc.behaviours().size());
assertTrue("incorrect behaviour", ddc.hasBehaviour(TestBehaviourTwo.class));
assertEquals("incorrect property count", 2, ddc.properties().size());
assertEquals("incorrect key count", 2, ddc.keys().size());
assertEquals("incorrect property", "wee", ddc.value("goo"));
assertTrue("incorrect toString", ddc.toString().contains("Circus"));
}
@Test
public void testGetProperty() throws Exception {
DefaultDriver root = new DefaultDriver(ROOT, Lists.newArrayList(), MFR, HW, SW,
ImmutableMap.of(), ImmutableMap.of());
DefaultDriver child = new DefaultDriver(CHILD, Lists.newArrayList(root), MFR, HW, SW,
ImmutableMap.of(), ImmutableMap.of(KEY, VALUE));
DefaultDriver grandChild = new DefaultDriver(GRAND_CHILD, Lists.newArrayList(child),
MFR, HW, SW, ImmutableMap.of(), ImmutableMap.of());
assertNull(root.getProperty(KEY));
assertEquals(VALUE, child.getProperty(KEY));
assertEquals(VALUE, grandChild.getProperty(KEY));
}
}
| 2,518 |
475 | from vedastr.utils import build_from_cfg
from .registry import METRICS
def build_metric(cfg, default_args=None):
metric = build_from_cfg(cfg, METRICS, default_args)
return metric
| 68 |
746 | package org.protege.editor.core.editorkit;
import org.protege.editor.core.editorkit.plugin.EditorKitHook;
import org.protege.editor.core.editorkit.plugin.EditorKitHookPlugin;
import org.protege.editor.core.editorkit.plugin.EditorKitHookPluginLoader;
/**
* <NAME>
* Stanford Center for Biomedical Informatics Research
* 13 Sep 16
*/
public class Initializers {
/**
* Instantiate, initialise and install EditorKitHook plugins for the specified EditorKit
* @param editorKit The EditorKit. Not {@code null}.
*/
public static void loadEditorKitHooks(EditorKit editorKit) {
if(editorKit == null) {
throw new RuntimeException("EditorKit must not be null");
}
for (EditorKitHookPlugin editorKitHookPlugin : new EditorKitHookPluginLoader(editorKit).getPlugins()) {
try {
EditorKitHook instance = editorKitHookPlugin.newInstance();
instance.initialise();
editorKit.put(editorKitHookPlugin.getId(), instance);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
| 450 |
3,065 | package com.github.mzule.activityrouter.module;
import com.github.mzule.activityrouter.annotation.Module;
/**
* Created by CaoDongping on 30/10/2016.
*/
@Module("sdk")
public class SdkModule {
}
| 72 |
921 | <filename>src/sqlancer/mariadb/oracle/MariaDBNoRECOracle.java
package sqlancer.mariadb.oracle;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import sqlancer.IgnoreMeException;
import sqlancer.common.oracle.NoRECBase;
import sqlancer.common.oracle.TestOracle;
import sqlancer.common.query.SQLQueryAdapter;
import sqlancer.common.query.SQLancerResultSet;
import sqlancer.mariadb.MariaDBProvider.MariaDBGlobalState;
import sqlancer.mariadb.MariaDBSchema;
import sqlancer.mariadb.MariaDBSchema.MariaDBColumn;
import sqlancer.mariadb.MariaDBSchema.MariaDBDataType;
import sqlancer.mariadb.MariaDBSchema.MariaDBTable;
import sqlancer.mariadb.ast.MariaDBAggregate;
import sqlancer.mariadb.ast.MariaDBAggregate.MariaDBAggregateFunction;
import sqlancer.mariadb.ast.MariaDBColumnName;
import sqlancer.mariadb.ast.MariaDBExpression;
import sqlancer.mariadb.ast.MariaDBPostfixUnaryOperation;
import sqlancer.mariadb.ast.MariaDBPostfixUnaryOperation.MariaDBPostfixUnaryOperator;
import sqlancer.mariadb.ast.MariaDBSelectStatement;
import sqlancer.mariadb.ast.MariaDBSelectStatement.MariaDBSelectType;
import sqlancer.mariadb.ast.MariaDBText;
import sqlancer.mariadb.ast.MariaDBVisitor;
import sqlancer.mariadb.gen.MariaDBExpressionGenerator;
public class MariaDBNoRECOracle extends NoRECBase<MariaDBGlobalState> implements TestOracle {
private final MariaDBSchema s;
private static final int NOT_FOUND = -1;
public MariaDBNoRECOracle(MariaDBGlobalState globalState) {
super(globalState);
this.s = globalState.getSchema();
errors.add("is out of range");
// regex
errors.add("unmatched parentheses");
errors.add("nothing to repeat at offset");
errors.add("missing )");
errors.add("missing terminating ]");
errors.add("range out of order in character class");
errors.add("unrecognized character after ");
errors.add("Got error '(*VERB) not recognized or malformed");
errors.add("must be followed by");
errors.add("malformed number or name after");
errors.add("digit expected after");
}
@Override
public void check() throws SQLException {
MariaDBTable randomTable = s.getRandomTable();
List<MariaDBColumn> columns = randomTable.getColumns();
MariaDBExpressionGenerator gen = new MariaDBExpressionGenerator(state.getRandomly()).setColumns(columns)
.setCon(con).setState(state.getState());
MariaDBExpression randomWhereCondition = gen.getRandomExpression();
List<MariaDBExpression> groupBys = Collections.emptyList(); // getRandomExpressions(columns);
int optimizedCount = getOptimizedQuery(randomTable, randomWhereCondition, groupBys);
int unoptimizedCount = getUnoptimizedQuery(randomTable, randomWhereCondition, groupBys);
if (optimizedCount == NOT_FOUND || unoptimizedCount == NOT_FOUND) {
throw new IgnoreMeException();
}
if (optimizedCount != unoptimizedCount) {
state.getState().getLocalState().log(optimizedQueryString + ";\n" + unoptimizedQueryString + ";");
throw new AssertionError(optimizedCount + " " + unoptimizedCount);
}
}
private int getUnoptimizedQuery(MariaDBTable randomTable, MariaDBExpression randomWhereCondition,
List<MariaDBExpression> groupBys) throws SQLException {
MariaDBSelectStatement select = new MariaDBSelectStatement();
select.setGroupByClause(groupBys);
MariaDBPostfixUnaryOperation isTrue = new MariaDBPostfixUnaryOperation(MariaDBPostfixUnaryOperator.IS_TRUE,
randomWhereCondition);
MariaDBText asText = new MariaDBText(isTrue, " as count", false);
select.setFetchColumns(Arrays.asList(asText));
select.setFromTables(Arrays.asList(randomTable));
select.setSelectType(MariaDBSelectType.ALL);
int secondCount = 0;
unoptimizedQueryString = "SELECT SUM(count) FROM (" + MariaDBVisitor.asString(select) + ") as asdf";
SQLQueryAdapter q = new SQLQueryAdapter(unoptimizedQueryString, errors);
try (SQLancerResultSet rs = q.executeAndGet(state)) {
if (rs == null) {
return NOT_FOUND;
} else {
while (rs.next()) {
secondCount = rs.getInt(1);
}
}
}
return secondCount;
}
private int getOptimizedQuery(MariaDBTable randomTable, MariaDBExpression randomWhereCondition,
List<MariaDBExpression> groupBys) throws SQLException {
MariaDBSelectStatement select = new MariaDBSelectStatement();
select.setGroupByClause(groupBys);
MariaDBAggregate aggr = new MariaDBAggregate(
new MariaDBColumnName(new MariaDBColumn("*", MariaDBDataType.INT, false, 0)),
MariaDBAggregateFunction.COUNT);
select.setFetchColumns(Arrays.asList(aggr));
select.setFromTables(Arrays.asList(randomTable));
select.setWhereClause(randomWhereCondition);
select.setSelectType(MariaDBSelectType.ALL);
int firstCount;
optimizedQueryString = MariaDBVisitor.asString(select);
SQLQueryAdapter q = new SQLQueryAdapter(optimizedQueryString, errors);
try (SQLancerResultSet rs = q.executeAndGet(state)) {
if (rs == null) {
firstCount = NOT_FOUND;
} else {
rs.next();
firstCount = rs.getInt(1);
}
} catch (Exception e) {
throw new AssertionError(optimizedQueryString, e);
}
return firstCount;
}
}
| 2,269 |
317 | //========================================================================================
//
// $File: //ai_stream/rel_23_0/devtech/sdk/public/samplecode/common/win/pragma.h $
//
// $Revision: #1 $
//
// Copyright 1987 Adobe Systems Incorporated. All rights reserved.
//
// NOTICE: Adobe permits you to use, modify, and distribute this file in accordance
// with the terms of the Adobe license agreement accompanying it. If you have received
// this file from a source other than Adobe, then your use, modification, or
// distribution of it requires the prior written permission of Adobe.
//
//========================================================================================
#pragma warning (disable: 4290) // C++ exception specification ignored except
// to indicate a function is not __declspec(nothrow)
#pragma warning (disable: 4800) // Ignore "'AIBool8' : forcing value to bool 'true' or
// 'false' (performance warning)" warning
| 261 |
2,151 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
#include <tuple>
#include "ipc/ipc_test_sink.h"
#include "ppapi/c/dev/ppp_class_deprecated.h"
#include "ppapi/proxy/plugin_var_tracker.h"
#include "ppapi/proxy/ppapi_messages.h"
#include "ppapi/proxy/ppapi_proxy_test.h"
#include "ppapi/proxy/proxy_object_var.h"
#include "ppapi/shared_impl/proxy_lock.h"
namespace ppapi {
namespace proxy {
namespace {
PP_Var MakeObject(int32_t object_id) {
PP_Var ret;
ret.type = PP_VARTYPE_OBJECT;
ret.value.as_id = object_id;
return ret;
}
// A Deallocate() function for PPP_Class that just increments the integer
// referenced by the pointer so we know how often Deallocate was called.
void MarkOnDeallocate(void* object) {
(*static_cast<int*>(object))++;
}
// A class that just implements MarkOnDeallocate on destruction.
PPP_Class_Deprecated mark_on_deallocate_class = {
NULL, // HasProperty,
NULL, // HasMethod,
NULL, // GetProperty,
NULL, // GetAllPropertyNames,
NULL, // SetProperty,
NULL, // RemoveProperty,
NULL, // Call,
NULL, // Construct,
&MarkOnDeallocate
};
} // namespace
class PluginVarTrackerTest : public PluginProxyTest {
public:
PluginVarTrackerTest() {}
protected:
// Asserts that there is a unique "release object" IPC message in the test
// sink. This will return the var ID from the message or -1 if none found.
int32_t GetObjectIDForUniqueReleaseObject() {
const IPC::Message* release_msg = sink().GetUniqueMessageMatching(
PpapiHostMsg_PPBVar_ReleaseObject::ID);
if (!release_msg)
return -1;
std::tuple<int64_t> id;
PpapiHostMsg_PPBVar_ReleaseObject::Read(release_msg, &id);
return std::get<0>(id);
}
};
TEST_F(PluginVarTrackerTest, GetHostObject) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
// Round-trip through the tracker to make sure the host object comes out the
// other end.
PP_Var plugin_object = var_tracker().ReceiveObjectPassRef(
host_object, plugin_dispatcher());
PP_Var host_object2 = var_tracker().GetHostObject(plugin_object);
EXPECT_EQ(PP_VARTYPE_OBJECT, host_object2.type);
EXPECT_EQ(host_object.value.as_id, host_object2.value.as_id);
var_tracker().ReleaseVar(plugin_object);
}
TEST_F(PluginVarTrackerTest, ReceiveObjectPassRef) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
// Receive the object, we should have one ref and no messages.
PP_Var plugin_object = var_tracker().ReceiveObjectPassRef(
host_object, plugin_dispatcher());
EXPECT_EQ(0u, sink().message_count());
EXPECT_EQ(1, var_tracker().GetRefCountForObject(plugin_object));
EXPECT_EQ(0,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_object));
// Receive the same object again, we should get the same plugin ID out.
PP_Var plugin_object2 = var_tracker().ReceiveObjectPassRef(
host_object, plugin_dispatcher());
EXPECT_EQ(plugin_object.value.as_id, plugin_object2.value.as_id);
EXPECT_EQ(2, var_tracker().GetRefCountForObject(plugin_object));
EXPECT_EQ(0,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_object));
// It should have sent one message to decerment the refcount in the host.
// This is because it only maintains one host refcount for all references
// in the plugin, but the host just sent the second one.
EXPECT_EQ(host_object.value.as_id, GetObjectIDForUniqueReleaseObject());
sink().ClearMessages();
// Release the object, one ref at a time. The second release should free
// the tracking data and send a release message to the browser.
var_tracker().ReleaseVar(plugin_object);
EXPECT_EQ(1, var_tracker().GetRefCountForObject(plugin_object));
var_tracker().ReleaseVar(plugin_object);
EXPECT_EQ(-1, var_tracker().GetRefCountForObject(plugin_object));
EXPECT_EQ(host_object.value.as_id, GetObjectIDForUniqueReleaseObject());
}
// Tests freeing objects that have both refcounts and "tracked with no ref".
TEST_F(PluginVarTrackerTest, FreeTrackedAndReferencedObject) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
// Phase one: First receive via a "pass ref", then a tracked with no ref.
PP_Var plugin_var = var_tracker().ReceiveObjectPassRef(
host_object, plugin_dispatcher());
PP_Var plugin_var2 = var_tracker().TrackObjectWithNoReference(
host_object, plugin_dispatcher());
EXPECT_EQ(plugin_var.value.as_id, plugin_var2.value.as_id);
EXPECT_EQ(1, var_tracker().GetRefCountForObject(plugin_var));
EXPECT_EQ(1,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
// Free via the refcount, this should release the object to the browser but
// maintain the tracked object.
var_tracker().ReleaseVar(plugin_var);
EXPECT_EQ(0, var_tracker().GetRefCountForObject(plugin_var));
EXPECT_EQ(1u, sink().message_count());
EXPECT_EQ(host_object.value.as_id, GetObjectIDForUniqueReleaseObject());
// Now free via the tracked object, this should free it.
var_tracker().StopTrackingObjectWithNoReference(plugin_var);
EXPECT_EQ(-1, var_tracker().GetRefCountForObject(plugin_var));
// Phase two: Receive via a tracked, then get an addref.
sink().ClearMessages();
plugin_var = var_tracker().TrackObjectWithNoReference(
host_object, plugin_dispatcher());
plugin_var2 = var_tracker().ReceiveObjectPassRef(
host_object, plugin_dispatcher());
EXPECT_EQ(plugin_var.value.as_id, plugin_var2.value.as_id);
EXPECT_EQ(1, var_tracker().GetRefCountForObject(plugin_var));
EXPECT_EQ(1,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
// Free via the tracked object, this should have no effect.
var_tracker().StopTrackingObjectWithNoReference(plugin_var);
EXPECT_EQ(0,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
EXPECT_EQ(0u, sink().message_count());
// Now free via the refcount, this should delete it.
var_tracker().ReleaseVar(plugin_var);
EXPECT_EQ(-1, var_tracker().GetRefCountForObject(plugin_var));
EXPECT_EQ(host_object.value.as_id, GetObjectIDForUniqueReleaseObject());
}
TEST_F(PluginVarTrackerTest, RecursiveTrackWithNoRef) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
// Receive a tracked object twice.
PP_Var plugin_var = var_tracker().TrackObjectWithNoReference(
host_object, plugin_dispatcher());
EXPECT_EQ(1,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
PP_Var plugin_var2 = var_tracker().TrackObjectWithNoReference(
host_object, plugin_dispatcher());
EXPECT_EQ(plugin_var.value.as_id, plugin_var2.value.as_id);
EXPECT_EQ(0, var_tracker().GetRefCountForObject(plugin_var));
EXPECT_EQ(2,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
// Now release those tracked items, the reference should be freed.
var_tracker().StopTrackingObjectWithNoReference(plugin_var);
EXPECT_EQ(1,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
var_tracker().StopTrackingObjectWithNoReference(plugin_var);
EXPECT_EQ(-1,
var_tracker().GetTrackedWithNoReferenceCountForObject(plugin_var));
}
// Tests that objects implemented by the plugin that have no references by
// the plugin get their Deallocate function called on destruction.
TEST_F(PluginVarTrackerTest, PluginObjectInstanceDeleted) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
PP_Instance pp_instance = 0x12345;
int deallocate_called = 0;
void* user_data = &deallocate_called;
// Make a var with one reference.
scoped_refptr<ProxyObjectVar> object(
new ProxyObjectVar(plugin_dispatcher(), host_object.value.as_id));
PP_Var plugin_var = MakeObject(var_tracker().AddVar(object.get()));
var_tracker().PluginImplementedObjectCreated(
pp_instance, plugin_var, &mark_on_deallocate_class, user_data);
// Release the plugin ref to the var. WebKit hasn't called destroy so
// we won't get a destroy call.
object = NULL;
var_tracker().ReleaseVar(plugin_var);
EXPECT_EQ(0, deallocate_called);
// Synthesize an instance destuction, this should call Deallocate.
var_tracker().DidDeleteInstance(pp_instance);
EXPECT_EQ(1, deallocate_called);
}
// Tests what happens when a plugin keeps a ref to a plugin-implemented
// object var longer than the instance. We should not call the destructor until
// the plugin releases its last ref.
TEST_F(PluginVarTrackerTest, PluginObjectLeaked) {
ProxyAutoLock lock;
PP_Var host_object = MakeObject(12345);
PP_Instance pp_instance = 0x12345;
int deallocate_called = 0;
void* user_data = &deallocate_called;
// Make a var with one reference.
scoped_refptr<ProxyObjectVar> object(
new ProxyObjectVar(plugin_dispatcher(), host_object.value.as_id));
PP_Var plugin_var = MakeObject(var_tracker().AddVar(object.get()));
var_tracker().PluginImplementedObjectCreated(
pp_instance, plugin_var, &mark_on_deallocate_class, user_data);
// Destroy the instance. This should not call deallocate since the plugin
// still has a ref.
var_tracker().DidDeleteInstance(pp_instance);
EXPECT_EQ(0, deallocate_called);
// Release the plugin ref to the var. Since the instance is gone this should
// call deallocate.
object = NULL;
var_tracker().ReleaseVar(plugin_var);
EXPECT_EQ(1, deallocate_called);
}
} // namespace proxy
} // namespace ppapi
| 3,277 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.