max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
572 | // (C) Copyright <NAME> and <NAME> 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Repository at: https://github.com/djeedjay/DebugViewPP/
#include "stdafx.h"
#include "Win32/Win32Lib.h"
#include "DebugView++Lib/DBWinBuffer.h"
namespace fusion {
namespace debugviewpp {
// this method is used to prevent acquiring the global DBWIN_BUFFER on XP, which will otherwise popup a MessageBox with a tip to 'Run As Administator'
// however, as there are no 'global' messages there, this does not apply to WindowsXP
bool IsWindowsVistaOrGreater()
{
// consider using ::AtlIsOldWindows? needs to be tested on XP
OSVERSIONINFO osvi = {};
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
// http://stackoverflow.com/questions/27246562/how-to-get-the-os-version-in-win8-1-as-getversion-getversionex-are-deprecated
// it looks like we can safely suppress this warning
#pragma warning(suppress : 4996)
GetVersionEx(&osvi);
return (osvi.dwMajorVersion > 5);
}
bool IsDBWinViewerActive()
{
Win32::Handle hMap(::OpenFileMapping(FILE_MAP_READ, 0, L"DBWIN_BUFFER"));
return hMap != nullptr;
}
bool HasGlobalDBWinReaderRights()
{
Win32::Handle hMap(::CreateFileMapping(nullptr, nullptr, PAGE_READWRITE, 0, sizeof(DbWinBuffer), L"Global\\DBWIN_BUFFER"));
return hMap != nullptr;
}
} // namespace debugviewpp
} // namespace fusion
| 510 |
555 | {
"id": "<KEY>",
"livemode": false,
"object": "checkout.session"
}
| 32 |
373 | <filename>Platform/ARM/SgiPkg/Library/NorFlashLib/StandaloneMmNorFlashLib.c
/** @file
* NOR flash platform library to be used in StandaloneMM context
*
* This file provides platform callbacks for the NOR flash module that executes
* in the StandaloneMM context. The third NOR flash instance of 64MB size on the
* reference design platform is assigned to be used in the StandaloneMM context.
*
* Copyright (c) 2021, ARM Ltd. All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*
**/
#include <Library/DebugLib.h>
#include <Library/IoLib.h>
#include <Library/NorFlashPlatformLib.h>
#include <PiMm.h>
#include <SgiPlatform.h>
//
// 64MB NOR flash connected to CS2 is assigned to be used in StandaloneMM
// context.
//
STATIC NOR_FLASH_DESCRIPTION mNorFlashDevices[] = {
{
// NOR-Flash2 assigned for secure storage.
FixedPcdGet64 (PcdSmcCs2Base),
FixedPcdGet64 (PcdSmcCs2Base),
SIZE_256KB * 256,
SIZE_256KB,
},
};
/** Allow access to NOR flash
On the reference design platforms, the access to NOR flash has to be
explicitly permitted by writing to the FLASH_RWEN bit of the SYSPH_SYS_REG
register.
@retval EFI_SUCCESS Initialize required to access NOR flash is complete.
**/
EFI_STATUS
NorFlashPlatformInitialization (
VOID
)
{
UINT64 SysRegFlash;
SysRegFlash = FixedPcdGet64 (PcdSysPeriphSysRegBase) + SGI_SYSPH_SYS_REG_FLASH;
MmioOr32 (SysRegFlash, SGI_SYSPH_SYS_REG_FLASH_RWEN);
return EFI_SUCCESS;
}
/** Returns the list of available NOR flash devices
For the StandaloneMM execution context, return the list of available NOR
flash devices that are available for use.
@param[in] NorFlashDevices Pointer to array of NOR flash devices.
@param[in] Count Number of elements in the NOR flash devices
array.
@retval EFI_SUCCESS Valid set of NOR flash devices is returned.
@retval EFI_INVALID_PARAMETER Pointers to NOR flash devices and/or count is
invalid.
**/
EFI_STATUS
NorFlashPlatformGetDevices (
OUT NOR_FLASH_DESCRIPTION **NorFlashDevices,
OUT UINT32 *Count
)
{
if ((NorFlashDevices == NULL) || (Count == NULL)) {
return EFI_INVALID_PARAMETER;
}
*NorFlashDevices = mNorFlashDevices;
*Count = ARRAY_SIZE (mNorFlashDevices);
return EFI_SUCCESS;
}
| 974 |
335 | {
"word": "Spermatogenesis",
"definitions": [
"The production or development of mature spermatozoa."
],
"parts-of-speech": "Noun"
} | 65 |
1,442 | #include "abscissa_title_cell.h"
#include <apps/shared/hideable_even_odd_editable_text_cell.h>
using namespace Escher;
namespace Graph {
void AbscissaTitleCell::drawRect(KDContext * ctx, KDRect rect) const {
EvenOddMessageTextCell::drawRect(ctx, rect);
// Draw the separator
if (m_separatorLeft) {
KDRect r = separatorRect(bounds());
ctx->fillRect(r, Shared::HideableEvenOddEditableTextCell::hideColor());
}
}
void AbscissaTitleCell::layoutSubviews(bool force) {
m_messageTextView.setFrame(rectWithoutSeparator(bounds()), force);
}
void AbscissaTitleCell::didSetSeparator() {
reloadCell();
}
}
| 227 |
5,249 | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import Image
import numpy as np
# Returns a numpy buffer of shape (num_images, 1, 28, 28)
def load_mnist_data(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2051
num_images = raw_buf[4:8].view(">i4")[0]
image_c = 1
image_h = raw_buf[8:12].view(">i4")[0]
image_w = raw_buf[12:16].view(">i4")[0]
# Need to scale all values to the range of [0, 1]
return np.ascontiguousarray((raw_buf[16:] / 255.0).astype(np.float32).reshape(num_images, image_c, image_h, image_w))
# Returns a numpy buffer of shape (num_images)
def load_mnist_labels(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2049
num_labels = raw_buf[4:8].view(">i4")[0]
return np.ascontiguousarray(raw_buf[8:].astype(np.int32).reshape(num_labels))
class MNISTEntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, training_data, cache_file, batch_size=64):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = cache_file
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
self.data = load_mnist_data(training_data)
self.batch_size = batch_size
self.current_index = 0
# Allocate enough memory for a whole batch.
self.device_input = cuda.mem_alloc(self.data[0].nbytes * self.batch_size)
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.data.shape[0]:
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.data[self.current_index:self.current_index + self.batch_size].ravel()
cuda.memcpy_htod(self.device_input, batch)
self.current_index += self.batch_size
return [self.device_input]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
| 1,426 |
785 | <gh_stars>100-1000
/*
* Copyright © 2018 Apple Inc. and the ServiceTalk project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicetalk.concurrent.api.internal;
import io.servicetalk.buffer.api.Buffer;
import io.servicetalk.concurrent.CloseableIterator;
import io.servicetalk.concurrent.internal.AbstractCloseableIteratorAsInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.annotation.Nullable;
import static io.servicetalk.buffer.api.ReadOnlyBufferAllocators.DEFAULT_RO_ALLOCATOR;
/**
* Conversion from a {@link CloseableIterator} of {@link Buffer}s to a {@link InputStream}.
*/
public final class CloseableIteratorBufferAsInputStream extends AbstractCloseableIteratorAsInputStream<Buffer> {
private static final Buffer CLOSED = DEFAULT_RO_ALLOCATOR.fromAscii("");
@Nullable
private Buffer leftover;
/**
* Create a new instance.
* @param iterator The {@link CloseableIterator} providing data.
*/
public CloseableIteratorBufferAsInputStream(CloseableIterator<Buffer> iterator) {
super(iterator);
}
@Override
protected int leftOverReadableBytes() {
assert leftover != null;
return leftover.readableBytes();
}
@Override
protected void leftOverReadBytes(final byte[] dst, final int offset, final int length) {
assert leftover != null;
leftover.readBytes(dst, offset, length);
}
@Override
protected boolean hasLeftOver() {
return leftover != null;
}
@Override
protected void leftOverCheckReset() {
assert leftover != null;
if (leftover.readableBytes() == 0) {
leftover = null;
}
}
@Override
protected void leftOverReset() {
leftover = null;
}
@Override
protected void nextLeftOver(final CloseableIterator<Buffer> iterator) {
leftover = iterator.next();
}
@Override
protected byte leftOverReadSingleByte() {
assert leftover != null;
final byte b = leftover.readByte();
leftOverCheckReset();
return b;
}
@Override
protected boolean isClosed() {
return leftover == CLOSED;
}
@Override
public void close() throws IOException {
leftover = CLOSED;
super.close();
}
}
| 958 |
680 | package org.ff4j.couchdb;
/*
* #%L
* ff4j-store-couchbase
* %%
* Copyright (C) 2013 - 2017 FF4J
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
/**
* Default settings to connect to couchDB.
*
* @author <NAME> (@drizztguen77)
*/
public class CouchDbConstants {
/**
* Hide default constructor.
*/
private CouchDbConstants() { }
/**
* Default event name.
*/
public static final String DEFAULT_EVENT_TYPE = "ff4j_event";
/**
* Default event name.
*/
public static final String DEFAULT_FEATURE_TYPE = "ff4j_feature";
/**
* Default event name.
*/
public static final String DEFAULT_PROPERTY_TYPE = "ff4j_property";
/**
* Default mon dg name (use ff4j).
*/
public static final String DEFAULT_DBNAME = "ff4j";
/**
* Default SSL protocol
*/
public static final String SSL_PROTOCOL = "https";
/**
* Default plaintext protocol
*/
public static final String PLAINTEXT_PROTOCOL = "http";
/**
* Default SSL port
*/
public static final int SSL_PORT = 443;
/**
* Default database port
*/
public static final int DEFAULT_DATABASE_PORT = 5984;
}
| 609 |
335 | {
"word": "Deceptively",
"definitions": [
"In a way or to an extent that gives a misleading impression; to a lesser or greater extent than appears the case."
],
"parts-of-speech": "Adverb"
} | 77 |
310 | <filename>org/domterm/util/PrintNested.java
package org.domterm.util;
import java.io.*;
import java.util.*;
/* https://news.ycombinator.com/item?id=17307123
*/
/** Convenience class for pretty-printing structured data, with folding.
* The class can be used as-is, or you can customize it by extending it.
*
* This class handles nested objects that can be a mix of:
* - Lists (java.util.List objects): A sequence of values, each prefixed
* with an integer index, surrounded by "[" and "]".
* - Maps (java.util.Map objects): A sequence of values, each prefixed
* with a key (commonly a string), surrounded by "[" and "]".
* - Trees (the Tree interface in this class): A header object (commonly a
* string, followed by one of more children.
*/
public class PrintNested
{
/** Demo/test method. */
public static void main(String[] args) {
PrintWriter out = new PrintWriter(System.out);
PrintNested pn = new PrintNested(out);
LinkedHashMap<Object,Object> map1 = new LinkedHashMap<Object,Object>();
map1.put("key1", 98789767);
map1.put("key2", "<KEY>");
Object obj =
pn.makeTree("info: GET /foo/endpoint",
"info: user <user ID> valid auth",
map1,
Arrays.asList(
3, 4,
pn.makeTree("info1 - request to service A",
"info1a - request took 1 seconds",
"info1b - request took 1 seconds"),
"info2",
Arrays.asList(11099011,
Arrays.asList(13008821,
13008822),
12099012)
),
pn.makeTree("info3 - request to service B",
"debug - opening new connection\nnext line",
"debug - success, result = ...",
"info - request took 1 seconds"),
"info - http request took 3 seconds abcdefghi",
"info - preparing result took 1 seconds"
);
pn.printObject(obj, false);
out.println();
out.flush();
}
protected PrintWriter out;
protected boolean isDomTerm;
public static final int NL_LINEAR = 116;
public static final int NL_FILL = 115;
public static final int NL_MISER = 117;
public static final int NL_MANDATORY = 118;
/** Where hide/show (folding) buttons are printed.
* If false, buttons are printed between header and children.
* (This is probably more "logical".)
* If true, buttons are printed before header.
* (This does print the parts in a less logical order,
* but it looks pretty and may be more familiar.)
*/
public boolean prefixButtons = true;
/* The character icon to use for the "hide" button.
* The default is “black down-pointing triangle”.
* Must be a single Unicode code point. */
public String hideButtonChar = "\u25BC";
/* The character icon to use for the "show" button.
* The default is “black right-pointing triangle”.
* Must be a single Unicode code point. */
public String showButtonChar = "\u25B6";
/** Horional line.
* Defaults to x2500 "box drawings light horizontal". */
public String boxHorizonal = "\u2500";
/** Vertical line.
* Defaults to x2502 "box drawings light vertical". */
public String boxVertical = "\u2502";
/** Vertical line for last child.
* Defaults to x250a "box drawings light quadruple dash vertical". */
public String boxVerticalLast = "\u250a";
/** Left "margin" to indicate a child element (except the last one).
* Defaults to x251C "box drawings light vertical and right". */
public String boxChild = "\u251C";
/** Left "margin" to indicate the last child element.
* Defaults to x2514 "box drawings light up and right". */
public String boxChildLast = "\u2514";
public String nobreakSeparator = " " + boxChild;
/** How many columns to indent for each nesting level.
* Must be at least 1. */
public int indentStep = 2;
private StringBuilder indentation = new StringBuilder();
private Stack<Integer> indentationLengthStack = new Stack<Integer>();
public PrintNested(PrintWriter out) {
this.out = out;
this.isDomTerm = System.console() != null
&& System.getenv("DOMTERM") != null;
}
/** A generalized tree node.
*/
public static interface Tree {
Object getHeader();
Iterable getChildren();
};
/** A simple implementation of Tree. */
public static class SimpleTree implements Tree {
Object header;
List children;
public Object getHeader() { return header; }
public Iterable getChildren() { return children; }
}
/** Create a Tree from the given arguments. */
public Tree makeTree(Object header, Object... children) {
SimpleTree node = new SimpleTree();
node.header = header;
node.children = Arrays.asList(children);
return node;
}
public boolean isTree(Object obj) {
return obj instanceof Tree;
}
public boolean isList(Object obj) {
return obj instanceof List;
}
public void startLogicalBlock() {
if (isDomTerm)
out.print("\033]110\007");
else {
indentationLengthStack.add(indentation.length());
}
}
public void endLogicalBlock() {
if (isDomTerm)
out.print("\033]111\007");
else {
indentation.setLength(indentationLengthStack.pop());
}
}
public void printSimple(Object obj) {
out.print(obj == null ? "(null)" : obj.toString());
}
public void newline(int kind) {
out.print("\033]"+kind+"\007");
}
public void printObject(Object obj, boolean element) {
if (isList(obj) || obj instanceof Map) {
if (prefixButtons && ! element)
printHideButton();
if (isList(obj))
printList(obj);
else
printMap(obj);
} else if (isTree(obj)) {
if (prefixButtons && ! element)
printHideButton();
printTree((Tree) obj);
} else {
printSimple(obj);
}
}
public void printElementSeparator(String nobreak, boolean last) {
String postbreak = (last ? boxChildLast : boxChild)
+ repeat(boxHorizonal,indentStep-1);
if (isDomTerm) {
int kindCode = NL_LINEAR;
out.print("\033]"+kindCode+";\"\",\""+postbreak+"\","+nobreak+"\007");
} else {
out.print("\n" + indentation + postbreak);
}
}
public void printHideButton() {
if (isDomTerm)
out.print("\033[16u"+hideButtonChar+showButtonChar+"\033[17u");
}
protected void printIndentation(boolean last) {
// negative indentation to compensate for the 'postbreak' string.
String indent = (last ? boxVerticalLast : boxVertical)
+ repeat(" ", indentStep-1);
if (isDomTerm) {
out.print("\033]112;"+(-indentStep)+"\007");
out.print("\033]114;\""+indent+"\"\007"); // indentation
} else {
indentation.append(indent);
}
}
public void printTree(Tree obj) {
out.print(obj.getHeader());
if (! prefixButtons)
printHideButton();
Iterator it = obj.getChildren().iterator();
boolean first = true;
boolean more = it.hasNext();
while (more) {
Object child = it.next();
more = it.hasNext();
printElementSeparator("\""+boxChild+"\"", ! more);
first = false;
startLogicalBlock();
printIndentation(!more);
printObject(child, false);
endLogicalBlock();
}
}
public void printListHeader(List obj) {
out.print("array("+obj.size()+")[");
}
public void printListTail() {
out.print("]");
}
public void printMapHeader(Map obj) {
out.print("{");
}
public void printMapTail() {
out.print("}");
}
protected void printKey(Object key) {
if (key != null) {
out.print(key.toString()+":");
if (isDomTerm)
out.print("\033]"+NL_FILL+";\"\",\"\",\" \"\007");
else
out.print(" ");
}
}
protected void printElement(Object key, Object obj,
boolean first, boolean last) {
printElementSeparator(first ? "\"\"" : "\";\"", last);
startLogicalBlock();
printIndentation(last);
boolean isList = isList(obj);
boolean isTree = isTree(obj);
boolean isMap = obj instanceof Map;
if (prefixButtons) {
if (isList || isTree || isMap)
printHideButton();
else if (isDomTerm)
out.print(" ");
}
printKey(key);
printObject(obj, true);
endLogicalBlock();
}
public void printList(Object arg) {
List obj = (List) arg;
printListHeader(obj);
if (! prefixButtons)
printHideButton();
Iterator it = obj.iterator();
boolean more = it.hasNext();
boolean first = true;
int index = 0;
while (more) {
Object child = it.next();
more = it.hasNext();
printElement(Integer.valueOf(index), child, first, !more);
first = false;
index++;
}
printListTail();
}
public void printMap(Object arg) {
Map obj = (Map) arg;
printMapHeader(obj);
if (! prefixButtons)
printHideButton();
Iterator it = obj.entrySet().iterator();
boolean more = it.hasNext();
boolean first = true;
int index = 0;
while (more) {
Map.Entry child = (Map.Entry) it.next();
more = it.hasNext();
printElement(child.getKey(), child.getValue(), first, !more);
first = false;
index++;
}
printMapTail();
}
private static String repeat(String str, int count) {
StringBuilder sb = new StringBuilder();
while (--count >= 0)
sb.append(str);
return sb.toString();
}
}
| 4,968 |
11,058 | """
A shim of the os module containing only simple path-related utilities
"""
try:
from os import *
except ImportError:
import abc
def __getattr__(name):
raise OSError("no os specific module found")
def _shim():
import _dummy_os, sys
sys.modules['os'] = _dummy_os
sys.modules['os.path'] = _dummy_os.path
import posixpath as path
import sys
sys.modules['os.path'] = path
del sys
sep = path.sep
def fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
| 939 |
453 | #ifndef ATA_DRIVER_H
#define ATA_DRIVER_H
#include <stdint.h>
#include <stdlibadd/array.h>
#endif
| 48 |
530 | from typing import Any
from tartiflette.types.type import GraphQLWrappingType
__all__ = ("GraphQLNonNull",)
class GraphQLNonNull(GraphQLWrappingType):
"""
Definition of a GraphQL non-null container.
"""
is_non_null_type = True
# Introspection attributes
kind = "NON_NULL"
def __eq__(self, other: Any) -> bool:
"""
Returns True if `other` instance is identical to `self`.
:param other: object instance to compare to `self`
:type other: Any
:return: whether or not `other` is identical to `self`
:rtype: bool
"""
return self is other or (
isinstance(other, GraphQLNonNull)
and self.gql_type == other.gql_type
)
def __repr__(self) -> str:
"""
Returns the representation of a GraphQLNonNull instance.
:return: the representation of a GraphQLNonNull instance
:rtype: str
"""
return "GraphQLNonNull(gql_type={!r})".format(self.gql_type)
def __str__(self) -> str:
"""
Returns a human-readable representation of the non-null type.
:return: a human-readable representation of the non-null type
:rtype: str
"""
return "{!s}!".format(self.gql_type)
| 531 |
1,483 | /*
* Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.lealone.sql.dml;
import org.lealone.common.util.StatementBuilder;
import org.lealone.db.api.Trigger;
import org.lealone.db.async.AsyncHandler;
import org.lealone.db.async.AsyncResult;
import org.lealone.db.auth.Right;
import org.lealone.db.result.ResultTarget;
import org.lealone.db.session.ServerSession;
import org.lealone.db.table.Column;
import org.lealone.db.value.Value;
import org.lealone.sql.SQLStatement;
/**
* This class represents the statement
* INSERT
*
* @author <NAME>
* @author zhh
*/
public class Insert extends InsertBase {
public Insert(ServerSession session) {
super(session);
}
@Override
public int getType() {
return SQLStatement.INSERT;
}
@Override
public String getPlanSQL() {
StatementBuilder buff = new StatementBuilder("INSERT INTO ");
buff.append(table.getSQL()).append('(');
for (Column c : columns) {
buff.appendExceptFirst(", ");
buff.append(c.getSQL());
}
buff.append(")\n");
if (insertFromSelect) {
buff.append("DIRECT ");
}
getValuesPlanSQL(buff);
return buff.toString();
}
@Override
public int update() {
YieldableInsert yieldable = new YieldableInsert(this, null);
return syncExecute(yieldable);
}
@Override
public YieldableInsert createYieldableUpdate(AsyncHandler<AsyncResult<Integer>> asyncHandler) {
return new YieldableInsert(this, asyncHandler); // 统一处理单机模式、复制模式、sharding模式
}
private static class YieldableInsert extends YieldableInsertBase implements ResultTarget {
public YieldableInsert(Insert statement, AsyncHandler<AsyncResult<Integer>> asyncHandler) {
super(statement, asyncHandler);
}
@Override
protected boolean startInternal() {
if (!table.trySharedLock(session))
return true;
session.getUser().checkRight(table, Right.INSERT);
table.fire(session, Trigger.INSERT, true);
statement.setCurrentRowNumber(0);
if (statement.query != null) {
yieldableQuery = statement.query.createYieldableQuery(0, false, null,
statement.insertFromSelect ? this : null);
}
return false;
}
@Override
protected void stopInternal() {
table.fire(session, Trigger.INSERT, false);
}
@Override
protected void executeLoopUpdate() {
if (!isReplicationAppendMode && session.isReplicationMode() && table.getScanIndex(session).isAppendMode()) {
startKey = table.getScanIndex(session).getStartKey(session.getReplicationName());
if (startKey != -1) {
session.setFinalResult(true);
isReplicationAppendMode = true;
} else {
// 在复制模式下append记录时,先获得一个rowId区间,然后需要在客户端做rowId区间冲突检测,最后再返回正确的rowId区间
handleReplicationAppend();
return;
}
}
if (yieldableQuery == null) {
while (pendingException == null && index < listSize) {
addRowInternal(createNewRow());
if (yieldIfNeeded(++index)) {
return;
}
}
onLoopEnd();
} else {
if (statement.insertFromSelect) {
yieldableQuery.run();
if (yieldableQuery.isStopped()) {
onLoopEnd();
}
} else {
if (rows == null) {
yieldableQuery.run();
if (!yieldableQuery.isStopped()) {
return;
}
rows = yieldableQuery.getResult();
}
while (pendingException == null && rows.next()) {
Value[] values = rows.currentRow();
if (addRow(values)) {
return;
}
}
rows.close();
onLoopEnd();
}
}
}
// 以下实现ResultTarget接口,可以在执行查询时,边查边增加新记录
@Override
public boolean addRow(Value[] values) {
addRowInternal(createNewRow(values));
if (yieldIfNeeded(updateCount.get() + 1)) {
return true;
}
return false;
}
@Override
public int getRowCount() {
return updateCount.get();
}
}
}
| 2,590 |
387 | import numpy as np
from nltk.corpus import brown
# Split the input text into chunks, where
# each chunk contains N words
def chunker(input_data, N):
input_words = input_data.split(' ')
output = []
cur_chunk = []
count = 0
for word in input_words:
cur_chunk.append(word)
count += 1
if count == N:
output.append(' '.join(cur_chunk))
count, cur_chunk = 0, []
output.append(' '.join(cur_chunk))
return output
if __name__=='__main__':
# Read the first 12000 words from the Brown corpus
input_data = ' '.join(brown.words()[:12000])
# Define the number of words in each chunk
chunk_size = 700
chunks = chunker(input_data, chunk_size)
print('\nNumber of text chunks =', len(chunks), '\n')
for i, chunk in enumerate(chunks):
print('Chunk', i+1, '==>', chunk[:50])
| 360 |
2,989 | {
"name" : "person",
"id" : 1,
"uri" : "jdbc:oracle:thin:person/person@devdb:1521:db",
"slowSourceQueryThreshold" : 2000,
"sources" :
[
{"id" : 101,
"name" : "com.linkedin.events.example.person.Person",
"uri": "person.person",
"partitionFunction" : "constant:1"
}
]
}
| 143 |
848 | package esform.domain;
/**
* Created by
*
* @name:孙证杰
* @email:<EMAIL> on 2017/10/18.
*/
public class Clazz extends BaseDomain {
private String name;
private Integer type;
private Integer sort;
public Clazz(String name) {
this.name = name;
}
public Clazz() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getType() {
return type;
}
public void setType(Integer type) {
this.type = type;
}
public Integer getSort() {
return sort;
}
public void setSort(Integer sort) {
this.sort = sort;
}
}
| 299 |
1,433 | /**
* @file
* @author <NAME>, <NAME>, <NAME>
* @date 2012
* @brief Datalink for MS/TP module
*
* @section LICENSE
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mstpmodule.h"
#include "bacint.h"
#include "dlmstp_linux.h"
#include <termios.h>
#define MSTP_THREAD_PRINT_ENABLED
#ifdef MSTP_THREAD_PRINT_ENABLED
#define mstp_thread_debug(...) fprintf(stderr, __VA_ARGS__)
#else
#define mstp_thread_debug(...)
#endif
void *dl_mstp_thread(
void *pArgs)
{
ROUTER_PORT *port = (ROUTER_PORT *) pArgs;
struct mstp_port_struct_t mstp_port = { (MSTP_RECEIVE_STATE) 0 };
volatile SHARED_MSTP_DATA shared_port_data = { 0 };
uint16_t pdu_len;
uint8_t shutdown = 0;
shared_port_data.Treply_timeout = 260;
shared_port_data.MSTP_Packets = 0;
shared_port_data.Tusage_timeout = 50;
shared_port_data.RS485_Handle = -1;
shared_port_data.RS485_Baud = B38400;
shared_port_data.RS485MOD = 0;
switch (port->params.mstp_params.databits) {
case 5:
shared_port_data.RS485MOD = CS5;
break;
case 6:
shared_port_data.RS485MOD = CS6;
break;
case 7:
shared_port_data.RS485MOD = CS7;
break;
default:
shared_port_data.RS485MOD = CS8;
break;
}
switch (port->params.mstp_params.parity) {
case PARITY_EVEN:
shared_port_data.RS485MOD |= PARENB;
break;
case PARITY_ODD:
shared_port_data.RS485MOD |= PARENB | PARODD;
break;
default:
break;
}
if (port->params.mstp_params.stopbits == 2)
shared_port_data.RS485MOD |= CSTOPB;
mstp_port.UserData = (void *) &shared_port_data;
dlmstp_set_baud_rate(&mstp_port, port->params.mstp_params.baudrate);
dlmstp_set_mac_address(&mstp_port, port->route_info.mac[0]);
dlmstp_set_max_info_frames(&mstp_port,
port->params.mstp_params.max_frames);
dlmstp_set_max_master(&mstp_port, port->params.mstp_params.max_master);
if (!dlmstp_init(&mstp_port, port->iface))
printf("MSTP %s init failed. Stop.\n", port->iface);
port->port_id = create_msgbox();
if (port->port_id == INVALID_MSGBOX_ID) {
port->state = INIT_FAILED;
return NULL;
}
port->state = RUNNING;
while (!shutdown) {
/* message loop */
BACMSG msg_storage, *bacmsg;
MSG_DATA *msg_data;
bacmsg = recv_from_msgbox(port->port_id, &msg_storage);
if (bacmsg) {
switch (bacmsg->type) {
case DATA:
msg_data = (MSG_DATA *) bacmsg->data;
if (msg_data->dest.net == BACNET_BROADCAST_NETWORK) {
dlmstp_get_broadcast_address(&(msg_data->dest));
} else {
msg_data->dest.mac[0] = msg_data->dest.adr[0];
msg_data->dest.mac_len = 1;
}
dlmstp_send_pdu(&mstp_port, &(msg_data->dest),
msg_data->pdu, msg_data->pdu_len);
check_data(msg_data);
break;
case SERVICE:
switch (bacmsg->subtype) {
case SHUTDOWN:
shutdown = 1;
break;
default:
break;
}
break;
default:
continue;
break;
}
} else {
pdu_len = dlmstp_receive(&mstp_port, NULL, NULL, 0, 1000);
if (pdu_len > 0) {
msg_data = (MSG_DATA *) malloc(sizeof(MSG_DATA));
memmove(&(msg_data->src),
(const void *) &(shared_port_data.Receive_Packet.address),
sizeof(shared_port_data.Receive_Packet.address));
msg_data->src.adr[0] = msg_data->src.mac[0];
msg_data->src.len = 1;
msg_data->pdu = (uint8_t *) malloc(pdu_len);
memmove(msg_data->pdu,
(const void *) &(shared_port_data.Receive_Packet.pdu),
pdu_len);
msg_data->pdu_len = pdu_len;
msg_storage.type = DATA;
msg_storage.subtype = (MSGSUBTYPE) 0;
msg_storage.origin = port->port_id;
msg_storage.data = msg_data;
if (!send_to_msgbox(port->main_id, &msg_storage)) {
free_data(msg_data);
}
}
}
}
dlmstp_cleanup(&mstp_port);
port->state = FINISHED;
return NULL;
}
| 2,985 |
3,269 | <reponame>judocode/sorbet
#include "libfuzzer/libfuzzer_macro.h"
#include "test/fuzz/TextDocumentPositionParamsWithoutTextDocumentIdentifier.pb.h"
#include <cxxopts.hpp>
// ^^^ should go first as they violate our poisons
#include "common/common.h"
using namespace std;
static bool disableFastPath = false;
static string fileName;
extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) {
cxxopts::Options options("fuzz_hover", "Fuzz all potential LSP hovers given a file");
options.allow_unrecognised_options().add_options()("single_test", "run over single test.",
cxxopts::value<std::string>()->default_value(""), "testpath");
options.add_options()("lsp-disable-fastpath", "disable fastpath in lsp tests");
auto res = options.parse(*argc, *argv);
if (res.count("single_test") != 1) {
printf("--single_test=<filename> argument expected\n");
return 1;
}
fileName = res["single_test"].as<std::string>();
disableFastPath = res["lsp-disable-fastpath"].as<bool>();
return 0;
}
DEFINE_PROTO_FUZZER(
const com::stripe::rubytyper::fuzz::TextDocumentPositionParamsWithoutTextDocumentIdentifier &input) {
ENFORCE(input.line() != 42);
}
| 492 |
361 | /**
* 문제: 기능개발 (https://programmers.co.kr/learn/courses/30/lessons/42586)
* Difficulty: Level 2
* Comment: "뒤에 있는 기능은 앞에 있는 기능이 배포될 때 함께 배포된다." 라는 말에 주목하세요.
* 즉, 모든 기능이 순차적으로 배포되어야 합니다.
* 순차적이면 어떤 자료구조를 사용하면 될까요? 큐죠?
*/
#include <string>
#include <vector>
#include <queue>
#include <iostream>
using namespace std;
vector<int> solution(vector<int> progresses, vector<int> speeds) {
vector<int> answer;
queue<pair<int, int>> q;
int time = 0;
for(int i = 0; i < progresses.size(); i++) q.push({progresses[i], i});
while(!q.empty()) {
int X = q.front().first, Y = speeds[q.front().second];
int cnt = 1;
while(X + Y * time < 100) time++;
q.pop();
while(!q.empty()) {
if(q.front().first + speeds[q.front().second] * time >= 100) {
q.pop();
cnt++;
} else break;
}
answer.push_back(cnt);
}
return answer;
}
int main() { // Driver
vector<int> progress = {95, 90, 99, 99, 80, 99};
vector<int> speeds = {1, 1, 1, 1, 1, 1};
vector<int> result = solution(progress, speeds);
for(int i : result) cout << i << ' ';
} | 719 |
310 | {
"name": "MultiMix 8",
"description": "A USB audio interface/mixer.",
"url": "https://www.alesis.com/multimix8usb"
} | 48 |
3,056 | <gh_stars>1000+
/*
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LYRA_CODEC_SPARSE_MATMUL_COMPUTE_GRU_GATES_ARM_H_
#define LYRA_CODEC_SPARSE_MATMUL_COMPUTE_GRU_GATES_ARM_H_
#if defined __ARM_NEON || defined __aarch64__
#include <arm_neon.h>
#endif
#include <cstdint>
#include "sparse_matmul/compute/ar_inputs.h"
#include "sparse_matmul/numerics/fast_transcendentals.h"
namespace csrblocksparse {
static constexpr int kNeonSIMDWidth = 4;
// ------ Scalar calculation --------
// See "Efficient Neural Audio Synthesis" for a description of the calculation.
// https://arxiv.org/abs/1802.08435
//
// NOTE:
// |sample| = (|coarse_at_sminus1|, |fine_at_sminus1|,
// |coarse_at_sminus1|, |fine_at_sminus1|)
// |w_sample| = (|coarse_at_s|, |coarse_at_s|, |coarse_at_s|, |coarse_at_s|)
//
// CHEATSHEET:
// vld1q_f32 = load 4 32-bit floats
// vmulq_f32(a, b) : return a * b;
// vaddq_f32(a, b) : return a + b;
// vmlaq_f32(c, a, b) : return c + a * b;
// vpaddq_f32(a, b) : return (a0 + a1, a2 + a3, b0 + b1, b2 + b3)
// vsubq_f32(a, b) : return a - b;
// vst1q_f32 = store 4 32-bit floats
#if defined __ARM_NEON || defined __aarch64__
#if !defined __aarch64__
// Backport of vpaddq_f32 to ARM32.
inline float32x4_t vpaddq_f32(float32x4_t a, float32x4_t b) {
float32x2_t a10 = vget_low_f32(a);
float32x2_t a32 = vget_high_f32(a);
float32x2_t b10 = vget_low_f32(b);
float32x2_t b32 = vget_high_f32(b);
return vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32));
}
#endif
template <ARInputsMode kInputsMode, bool SplitGates>
void GoThroughGatesFloat(int start, int end, const float* qr_ptr,
const float* gru_gates_ptr,
const float* gru_gates_other_ptr,
const float* conditioning_ptr, float* gru_h_ptr,
const float* w_hat, int proj_size,
const float* coarse_at_sminus1,
const float* fine_at_sminus1,
const float* coarse_at_s) {
// Increment all the pointers to save on pointer arithmetic in the loop.
conditioning_ptr += start;
gru_h_ptr += start;
gru_gates_ptr += start;
if (SplitGates) {
DCHECK_NE(gru_gates_other_ptr, nullptr);
gru_gates_other_ptr += start;
}
if (kInputsMode != ARInputsMode::k0ARInputs) {
DCHECK_NE(qr_ptr, nullptr);
qr_ptr += 2 * start;
DCHECK_NE(coarse_at_sminus1, nullptr);
DCHECK_NE(fine_at_sminus1, nullptr);
if (kInputsMode == ARInputsMode::k3ARInputs) {
DCHECK_NE(w_hat, nullptr);
DCHECK_NE(coarse_at_s, nullptr);
w_hat += start;
}
}
for (int i = start; i < end; i += kNeonSIMDWidth) {
float32x4_t reset = vld1q_f32(gru_gates_ptr);
float32x4_t update = vld1q_f32(gru_gates_ptr + proj_size);
float32x4_t cell = vld1q_f32(gru_gates_ptr + 2 * proj_size);
float32x4_t qr_cell;
if (SplitGates) {
reset = vaddq_f32(reset, vld1q_f32(gru_gates_other_ptr));
update = vaddq_f32(update, vld1q_f32(gru_gates_other_ptr + proj_size));
cell = vaddq_f32(cell, vld1q_f32(gru_gates_other_ptr + 2 * proj_size));
}
if (kInputsMode != ARInputsMode::k0ARInputs) {
// Setup the sample vector.
float32x4_t sample = vdupq_n_f32(*coarse_at_sminus1);
sample = vsetq_lane_f32(*fine_at_sminus1, sample, 1);
sample = vsetq_lane_f32(*fine_at_sminus1, sample, 3);
// All auto types are float32x4_t, auto used to fit statements on one line
// for readability. Do two rows of QR at once.
auto qr_reset_0 = vmulq_f32(vld1q_f32(qr_ptr), sample);
auto qr_reset_1 = vmulq_f32(vld1q_f32(qr_ptr + 4), sample);
auto qr_reset = vpaddq_f32(qr_reset_0, qr_reset_1);
auto qr_update_0 = vmulq_f32(vld1q_f32(qr_ptr + 2 * proj_size), sample);
auto qr_update_1 =
vmulq_f32(vld1q_f32(qr_ptr + 4 + 2 * proj_size), sample);
auto qr_update = vpaddq_f32(qr_update_0, qr_update_1);
auto qr_cell_0 = vmulq_f32(vld1q_f32(qr_ptr + 4 * proj_size), sample);
auto qr_cell_1 = vmulq_f32(vld1q_f32(qr_ptr + 4 + 4 * proj_size), sample);
qr_cell = vpaddq_f32(qr_cell_0, qr_cell_1);
if (kInputsMode == ARInputsMode::k3ARInputs) {
float32x4_t w_sample = vdupq_n_f32(*coarse_at_s);
qr_reset = vmlaq_f32(qr_reset, vld1q_f32(w_hat), w_sample);
qr_update =
vmlaq_f32(qr_update, vld1q_f32(w_hat + proj_size), w_sample);
qr_cell =
vmlaq_f32(qr_cell, vld1q_f32(w_hat + 2 * proj_size), w_sample);
}
reset = vaddq_f32(reset, qr_reset);
update = vaddq_f32(update, qr_update);
}
auto reset_conditioning = vld1q_f32(conditioning_ptr);
auto update_conditioning = vld1q_f32(conditioning_ptr + proj_size);
auto cell_conditioning = vld1q_f32(conditioning_ptr + 2 * proj_size);
reset = fast_sigmoid(vaddq_f32(reset, reset_conditioning));
update = fast_sigmoid(vaddq_f32(update, update_conditioning));
if (kInputsMode == ARInputsMode::k0ARInputs) {
cell = vmulq_f32(reset, cell);
} else {
cell = vmlaq_f32(qr_cell, reset, cell);
}
auto hbar = fast_tanh(vaddq_f32(cell, cell_conditioning));
auto prev_h = vld1q_f32(gru_h_ptr);
auto diff = vsubq_f32(prev_h, hbar);
auto new_h = vmlaq_f32(hbar, diff, update);
vst1q_f32(gru_h_ptr, new_h);
// Increment all the pointers.
conditioning_ptr += kNeonSIMDWidth;
gru_h_ptr += kNeonSIMDWidth;
gru_gates_ptr += kNeonSIMDWidth;
if (SplitGates) gru_gates_other_ptr += kNeonSIMDWidth;
if (kInputsMode != ARInputsMode::k0ARInputs) {
qr_ptr += 2 * kNeonSIMDWidth;
if (kInputsMode == ARInputsMode::k3ARInputs) w_hat += kNeonSIMDWidth;
}
}
}
// This version should only be used if all of the 32-bit fixed point
// representations have the same number of mantissa bits.
// |ar_at_sminus1| packs sample 0 and 1 into a pair because the QR weights are
// formatted with the weights interleaved for sample 0 and 1. The two samples
// represent coarse and fine for WaveRNN.
template <typename GRUStateType, typename GRUMatMulOutType,
ARInputsMode kInputsMode, bool SplitGates>
void GoThroughGatesFixed(int start, int end, const float* qr_ptr,
const int32_t* gru_gates_ptr,
const int32_t* gru_gates_other_ptr,
const int32_t* conditioning_ptr, int16_t* gru_h_ptr,
const float* w_hat, int proj_size,
const std::pair<float, float>* ar_at_sminus1,
const float* coarse_at_s) {
// Increment all the pointers to save on pointer arithmetic in the loop.
conditioning_ptr += start;
gru_h_ptr += start;
gru_gates_ptr += start;
if (SplitGates) {
DCHECK_NE(gru_gates_other_ptr, nullptr);
gru_gates_other_ptr += start;
}
float32x4_t sample01;
float32x4_t w_sample;
if (kInputsMode != ARInputsMode::k0ARInputs) {
DCHECK_NE(qr_ptr, nullptr);
qr_ptr += 2 * start;
DCHECK_NE(ar_at_sminus1, nullptr);
sample01 = vdupq_n_f32(ar_at_sminus1->first);
sample01 = vsetq_lane_f32(ar_at_sminus1->second, sample01, 1);
sample01 = vsetq_lane_f32(ar_at_sminus1->second, sample01, 3);
if (kInputsMode == ARInputsMode::k3ARInputs) {
DCHECK_NE(w_hat, nullptr);
DCHECK_NE(coarse_at_s, nullptr);
w_hat += start;
w_sample = vdupq_n_f32(*coarse_at_s);
}
}
for (int i = start; i < end; i += kNeonSIMDWidth) {
auto reset = vld1q_s32(gru_gates_ptr);
auto update = vld1q_s32(gru_gates_ptr + proj_size);
// vcvtq_n_f32_s32 = convert 32-bit fixed point to fp32
auto cell_int = vld1q_s32(gru_gates_ptr + 2 * proj_size);
if (SplitGates) {
reset = vaddq_s32(reset, vld1q_s32(gru_gates_other_ptr));
update = vaddq_s32(update, vld1q_s32(gru_gates_other_ptr + proj_size));
cell_int =
vaddq_s32(cell_int, vld1q_s32(gru_gates_other_ptr + 2 * proj_size));
}
float32x4_t cell =
vcvtq_n_f32_s32(cell_int, GRUMatMulOutType::kMantissaBits);
float32x4_t qr_cell;
if (kInputsMode != ARInputsMode::k0ARInputs) {
// Do two rows of QR at once.
float32x4_t qr_reset_0 = vmulq_f32(vld1q_f32(qr_ptr), sample01);
float32x4_t qr_reset_1 = vmulq_f32(vld1q_f32(qr_ptr + 4), sample01);
float32x4_t qr_reset = vpaddq_f32(qr_reset_0, qr_reset_1);
float32x4_t qr_update_0 =
vmulq_f32(vld1q_f32(qr_ptr + 2 * proj_size), sample01);
float32x4_t qr_update_1 =
vmulq_f32(vld1q_f32(qr_ptr + 4 + 2 * proj_size), sample01);
float32x4_t qr_update = vpaddq_f32(qr_update_0, qr_update_1);
float32x4_t qr_cell_0 =
vmulq_f32(vld1q_f32(qr_ptr + 4 * proj_size), sample01);
float32x4_t qr_cell_1 =
vmulq_f32(vld1q_f32(qr_ptr + 4 + 4 * proj_size), sample01);
qr_cell = vpaddq_f32(qr_cell_0, qr_cell_1);
if (kInputsMode == ARInputsMode::k3ARInputs) {
float32x4_t w_sample = vdupq_n_f32(*coarse_at_s);
qr_reset = vmlaq_f32(qr_reset, vld1q_f32(w_hat), w_sample);
qr_update =
vmlaq_f32(qr_update, vld1q_f32(w_hat + proj_size), w_sample);
qr_cell =
vmlaq_f32(qr_cell, vld1q_f32(w_hat + 2 * proj_size), w_sample);
}
reset = vaddq_s32(
reset, vcvtq_n_s32_f32(qr_reset, GRUMatMulOutType::kMantissaBits));
update = vaddq_s32(
update, vcvtq_n_s32_f32(qr_update, GRUMatMulOutType::kMantissaBits));
}
auto reset_conditioning = vld1q_s32(conditioning_ptr);
auto update_conditioning = vld1q_s32(conditioning_ptr + proj_size);
float32x4_t cell_conditioning =
vcvtq_n_f32_s32(vld1q_s32(conditioning_ptr + 2 * proj_size),
GRUMatMulOutType::kMantissaBits);
float32x4_t reset_f32 = fast_sigmoid<GRUMatMulOutType::kExponentBits>(
vaddq_s32(reset, reset_conditioning));
float32x4_t update_f32 = fast_sigmoid<GRUMatMulOutType::kExponentBits>(
vaddq_s32(update, update_conditioning));
if (kInputsMode == ARInputsMode::k0ARInputs) {
cell = vmulq_f32(reset_f32, cell);
} else {
cell = vmlaq_f32(qr_cell, reset_f32, cell);
}
float32x4_t hbar = fast_tanh(vaddq_f32(cell, cell_conditioning));
float32x4_t prev_h = vcvtq_n_f32_s32(vmovl_s16(vld1_s16(gru_h_ptr)),
GRUStateType::kMantissaBits);
float32x4_t diff = vsubq_f32(prev_h, hbar);
float32x4_t new_h = vmlaq_f32(hbar, diff, update_f32);
// vcvtq_n_s32_f32 = convert fp32 to signed 32-bit fixed point
// vqrshrn_n_s32 = saturating, rounding, narrowing right shift - used to
// convert a 32-bit fixed point value to a 16-bit fixed point value
vst1_s16(gru_h_ptr,
vqrshrn_n_s32(
vcvtq_n_s32_f32(new_h, GRUStateType::kMantissaBits + 16), 16));
// Increment all the pointers.
conditioning_ptr += kNeonSIMDWidth;
gru_h_ptr += kNeonSIMDWidth;
gru_gates_ptr += kNeonSIMDWidth;
if (SplitGates) gru_gates_other_ptr += kNeonSIMDWidth;
if (kInputsMode != ARInputsMode::k0ARInputs) {
qr_ptr += 2 * kNeonSIMDWidth;
if (kInputsMode == ARInputsMode::k3ARInputs) w_hat += kNeonSIMDWidth;
}
}
}
#endif // defined __ARM_NEON || defined __aarch64__
} // namespace csrblocksparse
#endif // LYRA_CODEC_SPARSE_MATMUL_COMPUTE_GRU_GATES_ARM_H_
| 5,879 |
2,960 | #include <bits/stdc++.h>
using namespace std;
/**
* Your LRUCache object will be instantiated and called as such:
* LRUCache* obj = new LRUCache(capacity);
* int param_1 = obj->get(key);
* obj->put(key,value);
*/
class LRUCache {
public:
LRUCache(int capacity) {
cap_ = capacity;
}
int get(int key) {
if (ht_.count(key) == 0) {
return -1;
}
int value = (*ht_[key]).second;
if (li_.front().first != key) {
li_.erase(ht_[key]);
li_.push_front(make_pair(key, value));
ht_[key] = li_.begin(); // iterator failure
}
return value;
}
void put(int key, int value) {
if (cap_ <= 0) {
return;
}
if (ht_.count(key) == 0) {
li_.erase(ht_[key]);
} else {
if (li_.size() == cap_) {
auto lru = li_.back();
li_.pop_back();
ht_.erase(lru.first);
}
}
li_.push_front(make_pair(key, value));
ht_[key] = li_.begin(); // iterator failure
}
private:
int cap_;
list<pair<int, int>> li_;
unordered_map<int, list<pair<int, int>>::iterator> ht_;
};
| 637 |
429 | /*
* Copyright 2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.digitalpetri.modbus;
import java.util.Optional;
public enum FunctionCode {
ReadCoils(0x01),
ReadDiscreteInputs(0x02),
ReadHoldingRegisters(0x03),
ReadInputRegisters(0x04),
WriteSingleCoil(0x05),
WriteSingleRegister(0x06),
ReadExceptionStatus(0x07),
Diagnostics(0x08),
GetCommEventCounter(0x0B),
GetCommEventLog(0x0C),
WriteMultipleCoils(0x0F),
WriteMultipleRegisters(0x10),
ReportSlaveId(0x11),
ReadFileRecord(0x14),
WriteFileRecord(0x15),
MaskWriteRegister(0x16),
ReadWriteMultipleRegisters(0x17),
ReadFifoQueue(0x18),
EncapsulatedInterfaceTransport(0x2B);
private final int code;
FunctionCode(int code) {
this.code = code;
}
public int getCode() {
return code;
}
public static Optional<FunctionCode> fromCode(int code) {
switch(code) {
case 0x01: return Optional.of(ReadCoils);
case 0x02: return Optional.of(ReadDiscreteInputs);
case 0x03: return Optional.of(ReadHoldingRegisters);
case 0x04: return Optional.of(ReadInputRegisters);
case 0x05: return Optional.of(WriteSingleCoil);
case 0x06: return Optional.of(WriteSingleRegister);
case 0x07: return Optional.of(ReadExceptionStatus);
case 0x08: return Optional.of(Diagnostics);
case 0x0B: return Optional.of(GetCommEventCounter);
case 0x0C: return Optional.of(GetCommEventLog);
case 0x0F: return Optional.of(WriteMultipleCoils);
case 0x10: return Optional.of(WriteMultipleRegisters);
case 0x11: return Optional.of(ReportSlaveId);
case 0x14: return Optional.of(ReadFileRecord);
case 0x15: return Optional.of(WriteFileRecord);
case 0x16: return Optional.of(MaskWriteRegister);
case 0x17: return Optional.of(ReadWriteMultipleRegisters);
case 0x18: return Optional.of(ReadFifoQueue);
case 0x2B: return Optional.of(EncapsulatedInterfaceTransport);
}
return Optional.empty();
}
public static boolean isExceptionCode(int code) {
return fromCode(code - 0x80).isPresent();
}
}
| 1,111 |
743 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.guacamole.tunnel;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Map-like storage for intercepted Guacamole streams.
*
* @param <T>
* The type of object which will produce or consume the data sent over the
* intercepted Guacamole stream. Usually, this will be either InputStream
* or OutputStream.
*/
public class InterceptedStreamMap<T extends Closeable> {
/**
* Logger for this class.
*/
private static final Logger logger = LoggerFactory.getLogger(InterceptedStreamMap.class);
/**
* The maximum number of milliseconds to wait for notification that a
* stream has closed before explicitly checking for closure ourselves.
*/
private static final long STREAM_WAIT_TIMEOUT = 1000;
/**
* Mapping of the indexes of all streams whose associated "blob" and "end"
* instructions should be intercepted.
*/
private final ConcurrentMap<String, InterceptedStream<T>> streams =
new ConcurrentHashMap<String, InterceptedStream<T>>();
/**
* Closes the given stream, logging any errors that occur during closure.
* The monitor of the stream is notified via a single call to notify() once
* the attempt to close has been made.
*
* @param stream
* The stream to close and notify.
*/
private void close(T stream) {
// Attempt to close stream
try {
stream.close();
}
catch (IOException e) {
logger.warn("Unable to close intercepted stream: {}", e.getMessage());
logger.debug("I/O error prevented closure of intercepted stream.", e);
}
// Notify waiting threads that the stream has ended
synchronized (stream) {
stream.notify();
}
}
/**
* Closes the stream object associated with the stream having the given
* index, if any, removing it from the map, logging any errors that occur
* during closure, and unblocking any in-progress calls to waitFor() for
* that stream. If no such stream exists within this map, then this
* function has no effect.
*
* @param index
* The index of the stream whose associated stream object should be
* closed.
*
* @return
* The stream associated with the given index, if the stream was stored
* within this map, or null if no such stream exists.
*/
public InterceptedStream<T> close(String index) {
// Remove associated stream
InterceptedStream<T> stream = streams.remove(index);
if (stream == null)
return null;
// Close stream if it exists
close(stream.getStream());
return stream;
}
/**
* Closes the given stream, logging any errors that occur during closure,
* and unblocking any in-progress calls to waitFor() for the given stream.
* If the given stream is stored within this map, it will also be removed.
*
* @param stream
* The stream to close.
*
* @return
* true if the given stream was stored within this map, false
* otherwise.
*/
public boolean close(InterceptedStream<T> stream) {
// Remove stream if present
boolean wasRemoved = streams.remove(stream.getIndex(), stream);
// Close provided stream
close(stream.getStream());
return wasRemoved;
}
/**
* Removes and closes all streams stored within this map, logging any errors
* that occur during closure, and unblocking any in-progress calls to
* waitFor().
*/
public void closeAll() {
// Close any active streams
for (InterceptedStream<T> stream : streams.values())
close(stream.getStream());
// Remove now-useless references
streams.clear();
}
/**
* Blocks until the given stream is closed, or until another stream with
* the same index replaces it.
*
* @param stream
* The stream to wait for.
*/
public void waitFor(InterceptedStream<T> stream) {
T underlyingStream = stream.getStream();
// Wait for stream to close
synchronized (underlyingStream) {
while (streams.get(stream.getIndex()) == stream) {
try {
underlyingStream.wait(STREAM_WAIT_TIMEOUT);
}
catch (InterruptedException e) {
// Ignore
}
}
}
}
/**
* Returns the stream stored in this map under the given index.
*
* @param index
* The index of the stream to return.
*
* @return
* The stream having the given index, or null if no such stream is
* stored within this map.
*/
public InterceptedStream<T> get(String index) {
return streams.get(index);
}
/**
* Adds the given stream to this map, storing it under its associated
* index. If another stream already exists within this map having the same
* index, that stream will be closed and replaced.
*
* @param stream
* The stream to store within this map.
*/
public void put(InterceptedStream<T> stream) {
// Add given stream to map
InterceptedStream<T> oldStream =
streams.put(stream.getIndex(), stream);
// If a previous stream DID exist, close it
if (oldStream != null)
close(oldStream.getStream());
}
}
| 2,340 |
852 | <filename>RecoMuon/MuonIdentification/python/muonShowerInformation_cfi.py
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import *
MuonShowerParameters = cms.PSet(
MuonShowerInformationFillerParameters = cms.PSet(
MuonServiceProxy,
DTRecSegmentLabel = cms.InputTag("dt1DRecHits"),
CSCRecSegmentLabel = cms.InputTag("csc2DRecHits"),
RPCRecSegmentLabel = cms.InputTag("rpcRecHits"),
DT4DRecSegmentLabel = cms.InputTag("dt4DSegments"),
CSCSegmentLabel = cms.InputTag("cscSegments"),
TrackerRecHitBuilder = cms.string('WithTrackAngle'),
MuonRecHitBuilder = cms.string('MuonRecHitBuilder'),
)
)
| 288 |
61,676 | <reponame>ni-ning/django
from django.forms import ChoiceField, Field, Form, Select
from django.test import SimpleTestCase
class BasicFieldsTests(SimpleTestCase):
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A:
def __init__(self):
self.class_a_var = True
super().__init__()
class ComplexField(Field, A):
def __init__(self):
super().__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
def test_field_deepcopies_widget_instance(self):
class CustomChoiceField(ChoiceField):
widget = Select(attrs={'class': 'my-custom-class'})
class TestForm(Form):
field1 = CustomChoiceField(choices=[])
field2 = CustomChoiceField(choices=[])
f = TestForm()
f.fields['field1'].choices = [('1', '1')]
f.fields['field2'].choices = [('2', '2')]
self.assertEqual(f.fields['field1'].widget.choices, [('1', '1')])
self.assertEqual(f.fields['field2'].widget.choices, [('2', '2')])
class DisabledFieldTests(SimpleTestCase):
def test_disabled_field_has_changed_always_false(self):
disabled_field = Field(disabled=True)
self.assertFalse(disabled_field.has_changed('x', 'y'))
| 645 |
565 | <reponame>dumpmemory/zenml
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
After executing a pipeline, the user needs to be able to fetch it from history
and perform certain tasks. The post_execution submodule provides a set of
interfaces with which the user can interact with artifacts, the pipeline, steps,
and the post-run pipeline object.
"""
from zenml.post_execution.artifact import ArtifactView
from zenml.post_execution.pipeline import PipelineView
from zenml.post_execution.pipeline_run import PipelineRunView
from zenml.post_execution.step import StepView
__all__ = ["PipelineView", "PipelineRunView", "StepView", "ArtifactView"]
| 343 |
1,847 | <filename>src/SymbolPaths/QSettingsWrapperTest.cpp
// Copyright (c) 2021 The Orbit Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <gtest/gtest.h>
#include <QCoreApplication>
#include <QSettings>
#include <filesystem>
#include "SymbolPaths/QSettingsWrapper.h"
const std::filesystem::path path0{"/path/to/symbols/path"};
const std::filesystem::path path1{"/home/src/project/build/"};
const std::filesystem::path path2{R"(c:\project\build\)"};
constexpr const char* kOrgName = "The Orbit Authors";
namespace orbit_symbol_paths {
TEST(SymbolPathsManager, LoadAndSave) {
QCoreApplication::setOrganizationDomain(kOrgName);
QCoreApplication::setApplicationName("SymbolPathsManager.SetAndGet");
{ // clear before test;
QSettings settings;
settings.clear();
}
EXPECT_EQ(LoadPaths(), std::vector<std::filesystem::path>{});
std::vector<std::filesystem::path> paths{
path0,
path1,
path2,
};
SavePaths(paths);
EXPECT_EQ(LoadPaths(), paths);
}
} // namespace orbit_symbol_paths | 394 |
501 | #!/usr/bin/env python
'''
This example shows how to use P-junction screening in the SGX module.
P-junction screening (pjs in the module) allows the SGX method to
achieve linear scaling for large systems.
'''
from pyscf import gto
from pyscf import scf
from pyscf import dft
from pyscf import sgx
import time
mol = gto.M(atom='a12.xyz', basis='sto-3g')
mf = dft.RKS(mol)
mf.xc = 'PBE'
mf.kernel()
dm = mf.make_rdm1()
mf = sgx.sgx_fit(scf.RHF(mol), pjs=False)
mf.with_df.dfj = True
mf.build()
ts = time.monotonic()
en0 = mf.energy_tot(dm=dm)
t0 = time.monotonic() - ts
print('Without P-junction screening:', t0, 's')
print('Energy:', en0)
# Turn on P-junction screening. dfj must also be true.
mf.with_df.pjs = True
# Set larger screening tolerance to demonstrate speedup.
mf.direct_scf_tol = 1e-10
mf.build()
ts = time.monotonic()
en1 = mf.energy_tot(dm=dm)
t1 = time.monotonic() - ts
print('With P-junction screening:', t1, 's')
print('Energy:', en1)
print('P-junction screening error:', abs(en1-en0))
print('P-junction screening speedup:', t0/t1)
| 436 |
310 | <filename>gear/software/a/air-video.json
{
"name": "Air Video",
"description": "Software to stream video to your iDevice.",
"url": "http://www.inmethod.com/air-video/"
} | 61 |
1,830 | <filename>protocol-impl/src/main/java/io/camunda/zeebe/protocol/impl/encoding/ExecuteQueryResponse.java
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH under
* one or more contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright ownership.
* Licensed under the Zeebe Community License 1.1. You may not use this file
* except in compliance with the Zeebe Community License 1.1.
*/
package io.camunda.zeebe.protocol.impl.encoding;
import io.camunda.zeebe.protocol.record.ExecuteQueryResponseDecoder;
import io.camunda.zeebe.protocol.record.ExecuteQueryResponseEncoder;
import io.camunda.zeebe.protocol.record.MessageHeaderDecoder;
import io.camunda.zeebe.protocol.record.MessageHeaderEncoder;
import io.camunda.zeebe.util.buffer.BufferReader;
import io.camunda.zeebe.util.buffer.BufferUtil;
import io.camunda.zeebe.util.buffer.BufferWriter;
import org.agrona.DirectBuffer;
import org.agrona.MutableDirectBuffer;
import org.agrona.concurrent.UnsafeBuffer;
public final class ExecuteQueryResponse implements BufferReader, BufferWriter {
private final MessageHeaderEncoder headerEncoder = new MessageHeaderEncoder();
private final MessageHeaderDecoder headerDecoder = new MessageHeaderDecoder();
private final ExecuteQueryResponseEncoder bodyEncoder = new ExecuteQueryResponseEncoder();
private final ExecuteQueryResponseDecoder bodyDecoder = new ExecuteQueryResponseDecoder();
private final DirectBuffer rawBpmnProcessId = new UnsafeBuffer();
public ExecuteQueryResponse() {
reset();
}
public ExecuteQueryResponse reset() {
rawBpmnProcessId.wrap(0, 0);
return this;
}
public String getBpmnProcessId() {
return BufferUtil.bufferAsString(rawBpmnProcessId);
}
public ExecuteQueryResponse setBpmnProcessId(final DirectBuffer bpmnProcessId) {
this.rawBpmnProcessId.wrap(bpmnProcessId);
return this;
}
@Override
public int getLength() {
return headerEncoder.encodedLength() + bodyEncoder.sbeBlockLength();
}
@Override
public void write(final MutableDirectBuffer buffer, final int offset) {
bodyEncoder
.wrapAndApplyHeader(buffer, offset, headerEncoder)
.putBpmnProcessId(rawBpmnProcessId, 0, rawBpmnProcessId.capacity());
}
@Override
public void wrap(final DirectBuffer buffer, final int offset, final int length) {
bodyDecoder.wrapAndApplyHeader(buffer, offset, headerDecoder);
bodyDecoder.wrapBpmnProcessId(rawBpmnProcessId);
}
}
| 775 |
643 | from pysteel import cloudfoundry
def setup(context):
"""
:type context: behave.runner.Context
"""
cf = cloudfoundry.CloudFoundry(context)
# remove previous app
app = 'rabbitmq-connector'
cf.delete_app(app)
# create service
service = 'p-rabbitmq'
plan = 'standard'
instance = 'myRabbitMQService'
cf.delete_service(instance)
cf.create_service(service, plan, instance)
| 160 |
555 | <reponame>maxtomCMU/hfnet<gh_stars>100-1000
import numpy as np
import tensorflow as tf
import logging
import random
from pathlib import Path
from .base_dataset import BaseDataset
from .utils import pipeline
from hfnet.settings import DATA_PATH
tf.data.Dataset.map_parallel = lambda self, fn: self.map(
fn, num_parallel_calls=10)
tf.data.Dataset.keys = lambda self: list(self.output_types.keys())
class Distillation(BaseDataset):
default_config = {
'validation_size': 200,
'load_targets': True,
'image_dirs': [],
'targets': [],
'truncate': None,
'shuffle': True,
'preprocessing': {
'resize': [480, 640],
'grayscale': True},
'cache_in_memory': False,
'augmentation': {
'photometric': {
'enable': False,
'primitives': 'all',
'params': {},
'random_order': True,
},
'homographic': {
'enable': False,
'params': {},
'valid_border_margin': 0,
},
},
'for_batching': True,
}
def _init_dataset(self, **config):
data = {'names': [], 'images': []}
if config['load_targets']:
for i, target in enumerate(config['targets']):
for im in config['image_dirs']:
assert Path(Path(DATA_PATH, im).parent,
target['dir']).exists()
data[i] = []
logging.info('Listing image files')
im_paths = []
names = []
for i, image_dir in enumerate(config['image_dirs']):
paths = Path(DATA_PATH, image_dir).glob('*.jpg')
paths = sorted([str(p) for p in paths])
if config['truncate'] is not None:
t = config['truncate'][i]
if t is not None:
paths = paths[:t]
im_paths.extend(paths)
names.extend([Path(p).stem for p in paths])
if config['load_targets']:
logging.info('Listing target files')
for im, n in zip(im_paths, names):
ok = True
target_paths = []
for target in config['targets']:
target_path = Path(
Path(im).parent.parent, target['dir'], f'{n}.npz')
# target_path = Path(DATA_PATH, target['dir'], f'{n}.npz')
ok &= target_path.exists()
target_paths.append(target_path.as_posix())
if not ok:
continue
data['images'].append(im)
data['names'].append(n)
for i, p in enumerate(target_paths):
data[i].append(p)
else:
data['names'].extend(names)
data['images'].extend(im_paths)
data_list = [dict(zip(data, d)) for d in zip(*data.values())]
if config['shuffle']:
random.Random(0).shuffle(data_list)
data = {k: [dic[k] for dic in data_list] for k in data_list[0]}
logging.info(f'Dataset size: {len(data["images"])}')
return data
def _get_data(self, paths, split_name, **config):
is_training = split_name == 'training'
def _read_image(path):
image = tf.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
return image
# Python function
def _create_npz_reader(keys):
def _read_npz(keys, path):
npz = np.load(path.decode('utf-8'))
return [npz[k].astype(np.float32) for k in keys]
return lambda x: _read_npz(keys, x)
def _preprocess(image):
if config['preprocessing']['resize']:
image = tf.image.resize_images(
image, config['preprocessing']['resize'],
method=tf.image.ResizeMethod.BILINEAR)
if config['preprocessing']['grayscale']:
image = tf.image.rgb_to_grayscale(image)
return image
def _delete_keys(data):
keys = ['keypoints']
for k in keys:
data.pop(k, None)
return data
names = tf.data.Dataset.from_tensor_slices(paths['names'])
images = tf.data.Dataset.from_tensor_slices(paths['images'])
images = images.map_parallel(_read_image)
images = images.map_parallel(_preprocess)
dataset = tf.data.Dataset.zip({'image': images, 'name': names})
if config['load_targets']:
for i, target in enumerate(config['targets']):
t = tf.data.Dataset.from_tensor_slices(paths[i])
reader = _create_npz_reader(target['keys'])
types = [tf.float32]*len(target['keys'])
t = t.map_parallel(lambda p: tf.py_func(reader, [p], types))
dataset = tf.data.Dataset.zip((dataset, t)).map(
lambda da, de: {**da, **{k: de[j]
for j, k in enumerate(target['keys'])}})
# Reversed convention...
if 'keypoints' in dataset.keys():
dataset = dataset.map(
lambda d: {
**d, 'keypoints': tf.reshape(
d['keypoints'][:, ::-1], [-1, 2])})
if split_name == 'validation':
dataset = dataset.take(config['validation_size'])
if split_name == 'training':
dataset = dataset.skip(config['validation_size'])
if config['cache_in_memory']:
tf.logging.info('Caching dataset, fist access will take some time')
dataset = dataset.cache()
if is_training:
if config['augmentation']['photometric']['enable']:
dataset = dataset.map_parallel(
lambda d: pipeline.photometric_augmentation(
d, **config['augmentation']['photometric']))
if config['augmentation']['homographic']['enable']:
dataset = dataset.map_parallel(
lambda d: pipeline.homographic_augmentation(
d, **config['augmentation']['homographic']))
if 'keypoints' in dataset.keys():
dataset = dataset.map_parallel(pipeline.add_keypoint_map)
if config['for_batching']:
dataset = dataset.map_parallel(_delete_keys)
return dataset
| 3,430 |
572 | from django.contrib.auth.models import AbstractUser
class SalesRep(AbstractUser):
USERNAME_FIELD = 'username'
| 36 |
1,142 | # Copyright (c) Fairlearn contributors.
# Licensed under the MIT License.
"""
This script generates an html table of contributors, with names and avatars.
The list is generated from Fairlearn's teams on GitHub.
It's originally copied from scikit-learn and adapted for Fairlearn.
"""
from pathlib import Path
from os import path
REPO_FOLDER = Path(path.abspath(__file__)).parent.parent
MAINTAINERS = {
"adrinjalali": "<NAME>",
"hildeweerts": "<NAME>",
"mmadaio": "<NAME>",
"MiroDudik": "<NAME>",
"riedgar-ms": "<NAME>",
"romanlutz": "<NAME>"
}
def generate_table(maintainers):
lines = [
".. raw :: html\n",
" <!-- Generated by generate_maintainers_table.py -->",
' <div class="maintainers-container">',
" <style>",
" img.avatar {border-radius: 10px;}",
" </style>",
]
for alias, name in maintainers.items():
lines.append(" <div>")
lines.append(
" <a href='%s'><img src='%s' class='avatar' /></a> <br />"
% (f"https://github.com/{alias}", f"https://github.com/{alias}.png")
)
lines.append(" <p>%s</p>" % (name,))
lines.append(" </div>")
lines.append(" </div>")
return "\n".join(lines)
if __name__ == "__main__":
with open(REPO_FOLDER / "docs" / "about" / "maintainers.rst", "w+") as rst_file:
rst_file.write(generate_table(MAINTAINERS))
| 633 |
4,901 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package libcore.java.io;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import junit.framework.TestCase;
public class OldFileWriterTest extends TestCase {
FileWriter fw;
FileInputStream fis;
BufferedWriter bw;
File f;
public void test_ConstructorLjava_io_File_IOException() {
File dir = new File(System.getProperty("java.io.tmpdir"));
try {
fw = new FileWriter(dir);
fail("Test 1: IOException expected.");
} catch (IOException e) {
// Expected.
}
}
public void test_ConstructorLjava_io_FileZ_IOException() {
File dir = new File(System.getProperty("java.io.tmpdir"));
try {
fw = new FileWriter(dir, true);
fail("Test 1: IOException expected.");
} catch (IOException e) {
// Expected.
}
}
public void test_ConstructorLjava_lang_String_IOException() {
try {
fw = new FileWriter(System.getProperty("java.io.tmpdir"));
fail("Test 1: IOException expected.");
} catch (IOException e) {
// Expected.
}
}
public void test_ConstructorLjava_lang_StringZ_IOException() {
try {
fw = new FileWriter(System.getProperty("java.io.tmpdir"), false);
fail("Test 1: IOException expected.");
} catch (IOException e) {
// Expected.
}
}
public void test_handleEarlyEOFChar_1() {
String str = "All work and no play makes Jack a dull boy\n";
int NUMBER = 2048;
int j = 0;
int len = str.length() * NUMBER;
/* == 88064 *//* NUMBER compulsively written copies of the same string */
char[] strChars = new char[len];
for (int i = 0; i < NUMBER; ++i) {
for (int k = 0; k < str.length(); ++k) {
strChars[j++] = str.charAt(k);
}
}
File f = null;
FileWriter fw = null;
try {
f = File.createTempFile("ony", "by_one");
fw = new FileWriter(f);
fw.write(strChars);
fw.close();
InputStreamReader in = null;
FileInputStream fis = new FileInputStream(f);
in = new InputStreamReader(fis);
int b;
int errors = 0;
for (int offset = 0; offset < strChars.length; ++offset) {
b = in.read();
if (b == -1) {
fail("Early EOF at offset " + offset + "\n");
return;
}
}
assertEquals(0, errors);
} catch (IOException e) {
e.printStackTrace();
}
}
public void test_handleEarlyEOFChar_2() throws IOException {
int capacity = 65536;
byte[] bytes = new byte[capacity];
byte[] bs = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
};
for (int i = 0; i < bytes.length; i++) {
bytes[i] = bs[i / 8192];
}
String inputStr = new String(bytes);
int len = inputStr.length();
File f = File.createTempFile("FileWriterBugTest ", null);
FileWriter writer = new FileWriter(f);
writer.write(inputStr);
writer.close();
long flen = f.length();
FileReader reader = new FileReader(f);
char[] outChars = new char[capacity];
int outCount = reader.read(outChars);
String outStr = new String(outChars, 0, outCount);
f.deleteOnExit();
assertEquals(len, flen);
assertEquals(inputStr, outStr);
}
protected void setUp() throws Exception {
f = File.createTempFile("writer", ".tst");
if (f.exists())
if (!f.delete()) {
fail("Unable to delete test file");
}
}
protected void tearDown() {
try {
bw.close();
} catch (Exception e) {
}
try {
fis.close();
} catch (Exception e) {
}
f.delete();
}
}
| 2,268 |
945 | <reponame>arobert01/ITK
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkFileListVideoIO_h
#define itkFileListVideoIO_h
#include "itkVideoIOBase.h"
#include "ITKVideoIOExport.h"
namespace itk
{
/**
*\class FileListVideoIO
*
* \brief VideoIO object for reading and writing videos as a sequence of frame
* files.
*
* This VideoIO treats a sequential list of file names as the frames of a
* video. The frames must be specified in a comma-separated list. Also, the
* SplitFileNames(...) static method is made public in order to allow the
* splitting functionality to be accessed publicly.
*
* \ingroup ITKVideoIO
*
*/
class ITKVideoIO_EXPORT FileListVideoIO : public VideoIOBase
{
public:
ITK_DISALLOW_COPY_AND_MOVE(FileListVideoIO);
/** Standard class type aliases. */
using Self = FileListVideoIO;
using Superclass = VideoIOBase;
using Pointer = SmartPointer<Self>;
using ConstPointer = SmartPointer<const Self>;
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** Run-time type information (and related methods). */
itkTypeMacro(FileListVideoIO, VideoIOBase);
/** Get the internal ImageIOBase object. */
itkGetConstObjectMacro(ImageIO, ImageIOBase);
/** Get the list of files to read. */
itkGetConstMacro(FileNames, std::vector<std::string>);
/** Override SetFileName to do parsing. */
void
SetFileName(const std::string & fileList) override;
void
SetFileName(const char * fileList) override;
/** Close the reader and writer and reset members. */
void
FinishReadingOrWriting() override;
/** Split up the input file names using comma (',') as the separator character.
* This method is made public so that places where FileListVideoIO is used
* can access the individual file names. This is mostly an issue for testing. */
static std::vector<std::string>
SplitFileNames(const std::string & fileList);
/** Set to reading from file. */
void
SetReadFromFile() override;
/** Set to reading from a camera. */
void
SetReadFromCamera() override;
/** Determine the file type. Returns true if this ImageIO can read the
* file specified. */
bool
CanReadFile(const char *) override;
/** Return whether or not the VideoIO can read from a camera. */
bool
CanReadCamera(CameraIDType cameraID) const override;
/** Set the spacing and dimension information for the set filename. */
void
ReadImageInformation() override;
/** Reads the data from disk into the memory buffer provided. */
void
Read(void * buffer) override;
/** Set the next frame that should be read. Return true if you operation
* successful. */
bool
SetNextFrameToRead(FrameOffsetType frameNumber) override;
/** Accessor functions for video specific information. */
TemporalOffsetType
GetPositionInMSec() const override
{
return this->m_PositionInMSec;
}
TemporalOffsetType
GetRatio() const override
{
return this->m_Ratio;
}
FrameOffsetType
GetFrameTotal() const override
{
return this->m_FrameTotal;
}
TemporalRatioType
GetFramesPerSecond() const override
{
return this->m_FramesPerSecond;
}
FrameOffsetType
GetCurrentFrame() const override
{
return this->m_CurrentFrame;
}
itkGetConstMacro(IFrameInterval, FrameOffsetType);
FrameOffsetType
GetLastIFrame() const override
{
return this->m_LastIFrame;
}
/** Override accessors to pass through to internal image reader. */
double
GetSpacing(unsigned int i) const override;
double
GetOrigin(unsigned int i) const override;
std::vector<double>
GetDirection(unsigned int i) const override;
/** Determine the file type. Returns true if this ImageIO can write the
* file specified. */
bool
CanWriteFile(const char *) override;
/** Writes the spacing and dimensions of the image.
* Assumes SetFileName has been called with a valid file name. */
void
WriteImageInformation() override;
/** Writes the data to disk from the memory buffer provided. Make sure
* that the IORegion has been set properly. */
void
Write(const void * buffer) override;
/** Set Writer parameters. */
void
SetWriterParameters(TemporalRatioType framesPerSecond,
const std::vector<SizeValueType> & dim,
const char * fourCC,
unsigned int nChannels,
IOComponentEnum componentType) override;
protected:
FileListVideoIO();
~FileListVideoIO() override;
void
PrintSelf(std::ostream & os, Indent indent) const override;
/** Reset member variables to empty state closed. */
void
ResetMembers();
/** Open the reader if the reader and writer are not open. */
void
OpenReader();
/** Open the writer if the reader and reader are not open. */
void
OpenWriter();
/** Verify that all file names in the have the same extension. */
bool
VerifyExtensions(const std::vector<std::string> & fileList) const;
private:
ImageIOBase::Pointer m_ImageIO;
std::vector<std::string> m_FileNames;
};
} // end namespace itk
#endif // itkFileListVideoIO_h
| 1,817 |
728 | /**
* Copyright (c) 2011, University of Konstanz, Distributed Systems Group All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met: * Redistributions of source code must retain the
* above copyright notice, this list of conditions and the following disclaimer. * Redistributions
* in binary form must reproduce the above copyright notice, this list of conditions and the
* following disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the University of Konstanz nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sirix.service.xml.xpath.filter;
// /*
// * Copyright (c) 2008, <NAME> (Master Thesis), University of Konstanz
// *
// * Permission to use, copy, modify, and/or distribute this software for any
// * purpose with or without fee is hereby granted, provided that the above
// * copyright notice and this permission notice appear in all copies.
// *
// * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// *
// * $Id: PosFilter.java 4238 2008-07-03 13:32:30Z scherer $
// */
//
// package org.sirix.xpath.filter;
//
// import org.sirix.api.IAxis;
// import org.sirix.api.IReadTransaction;
// import org.sirix.axislayer.AbstractAxis;
//
// /**
// * @author <NAME>
// */
// public class PosFilter extends AbstractAxis implements IAxis {
//
// private final int mExpectedPos;
//
// /** The position of the current item. */
// private int mPosCount;
//
// /**
// * Constructor. Initializes the internal state.
// *
// * @param rtx
// * Exclusive (immutable) trx to iterate with.
// * @param expectedPos
// * he expected position
// */
// public PosFilter(final IReadTransaction rtx, final int expectedPos) {
//
// super(rtx);
// mExpectedPos = expectedPos;
// mPosCount = 0;
// }
//
// /**
// * {@inheritDoc}
// */
// @Override
// public final void reset(final long nodeKey) {
//
// super.reset(nodeKey);
// mPosCount = 0;
// }
//
// /**
// * {@inheritDoc}
// */
// public final boolean hasNext() {
//
// resetToLastKey();
//
// // a predicate has to evaluate to true only once.
// if (mExpectedPos == ++mPosCount) {
// return true;
// }
//
// resetToStartKey();
// return false;
//
// }
//
// }
| 1,090 |
373 | <reponame>ADLINK/edk2-platforms
/** @file
*
* Copyright (c) 2016-2018, Hisilicon Limited. All rights reserved.
* Copyright (c) 2016-2018, Linaro Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*
**/
#include <OemNicConfig.h>
EFI_STATUS
EFIAPI OemGetMac2P (
IN OUT EFI_MAC_ADDRESS *Mac,
IN UINTN Port
)
{
OemGetMac (Mac, Port);
return EFI_SUCCESS;
}
EFI_STATUS
EFIAPI OemSetMac2P (
IN EFI_MAC_ADDRESS *Mac,
IN UINTN Port
)
{
OemSetMac (Mac, Port);
return EFI_SUCCESS;
}
HISI_BOARD_NIC_PROTOCOL mHisiBoardNicProtocol2P = {
.GetMac = OemGetMac2P,
.SetMac = OemSetMac2P,
};
EFI_STATUS
EFIAPI
OemNicConfigEntry (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
Status = gBS->InstallProtocolInterface (
&ImageHandle,
&gHisiBoardNicProtocolGuid,
EFI_NATIVE_INTERFACE,
&mHisiBoardNicProtocol2P
);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_ERROR, "[%a]:[%dL] Install Protocol failed %r\n",
__FUNCTION__, __LINE__, Status));
return Status;
}
return EFI_SUCCESS;
}
| 676 |
481 | <reponame>admariner/datapane
from typing import Union
from packaging import version as v
from packaging.specifiers import SpecifierSet
from datapane.common import log
class VersionMismatch(Exception):
pass
def is_version_compatible(
provider_v_in: Union[str, v.Version],
consumer_v_in: Union[str, v.Version],
raise_exception: bool = True,
) -> bool:
"""
Check provider supports consumer and throws exception if not
Set the spec so that the consumer has to be within a micro/patch release of the provider
NOTE - this isn't semver - breaks when have > v1 release as then treats minor as breaking,
e.g. 2.2.5 is not compat with 2.1.5
"""
consumer_v = v.Version(consumer_v_in) if isinstance(consumer_v_in, str) else consumer_v_in
provider_v = v.Version(provider_v_in) if isinstance(provider_v_in, str) else provider_v_in
provider_spec = SpecifierSet(f"~={provider_v.major}.{provider_v.minor}.0")
log.debug(f"Provider spec {provider_spec}, Consumer version {consumer_v}")
if consumer_v not in provider_spec:
if raise_exception:
raise VersionMismatch(f"Consumer ({consumer_v}) and Provider ({provider_spec}) API versions not compatible")
return False
return True
| 435 |
462 | <reponame>Hacky-DH/op<filename>libop/com/dllmain.h
// dllmain.h: 模块类的声明。
#include <atlbase.h>
#include <atlcom.h>
#include <atlctl.h>
class CopModule : public ATL::CAtlDllModuleT< CopModule >
{
public :
DECLARE_LIBID(LIBID_opLib)
DECLARE_REGISTRY_APPID_RESOURCEID(IDR_OP, "{66b9c175-82f2-45e9-af86-58ad5ded5adc}")
};
extern class CopModule _AtlModule;
| 177 |
414 | <reponame>pangang107/sealtalk-ios
//
// RCDChatBackgroundCell.h
// SealTalk
//
// Created by 孙浩 on 2019/8/8.
// Copyright © 2019 RongCloud. All rights reserved.
//
#import <UIKit/UIKit.h>
NS_ASSUME_NONNULL_BEGIN
@interface RCDChatBackgroundCell : UICollectionViewCell
@property (nonatomic, strong) NSString *imageName;
@property (nonatomic, assign) BOOL imgHidden;
@end
NS_ASSUME_NONNULL_END
| 157 |
439 | /**
* Baidu.com,Inc.
* Copyright (c) 2000-2013 All Rights Reserved.
*/
package com.baidu.hsb.parser.ast.stmt.mts;
import com.baidu.hsb.parser.ast.expression.primary.Identifier;
import com.baidu.hsb.parser.ast.stmt.SQLStatement;
import com.baidu.hsb.parser.visitor.SQLASTVisitor;
/**
* @author <EMAIL>
*/
public class MTSReleaseStatement implements SQLStatement {
private final Identifier savepoint;
public MTSReleaseStatement(Identifier savepoint) {
if (savepoint == null) throw new IllegalArgumentException("savepoint is null");
this.savepoint = savepoint;
}
public Identifier getSavepoint() {
return savepoint;
}
@Override
public void accept(SQLASTVisitor visitor) {
visitor.visit(this);
}
}
| 287 |
359 | /**
* Copyright (C) 2016-2017 Xilinx, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
* License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
// Copyright 2016 Xilinx, Inc. All rights reserved.
#include "xocl/config.h"
#include "xocl/core/param.h"
#include "xocl/core/error.h"
#include "xocl/core/device.h"
#include "xocl/core/platform.h"
#include "detail/device.h"
#include <limits>
#include "plugin/xdp/profile_v2.h"
#ifdef _WIN32
# pragma warning ( disable : 4267 )
#endif
namespace xocl {
const size_t maxuint = std::numeric_limits<unsigned int>::max();
static void
validOrError(const cl_device_id device)
{
if (!config::api_checks())
return;
detail::device::validOrError(device);
}
cl_int
clGetDeviceInfo(cl_device_id device,
cl_device_info param_name,
size_t param_value_size,
void * param_value,
size_t * param_value_size_ret)
{
xocl::validOrError(device);
xocl::param_buffer buffer { param_value, param_value_size, param_value_size_ret };
auto xdevice = xocl::xocl(device);
switch(param_name) {
case CL_DEVICE_TYPE:
buffer.as<cl_device_type>() = CL_DEVICE_TYPE_ACCELERATOR;
break;
case CL_DEVICE_VENDOR_ID:
buffer.as<cl_uint>() = 0;
break;
case CL_DEVICE_MAX_COMPUTE_UNITS:
buffer.as<cl_uint>() = xdevice->get_num_cus();
break;
case CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS:
buffer.as<cl_uint>() = 3;
break;
case CL_DEVICE_MAX_WORK_ITEM_SIZES:
buffer.as<size_t>() = xocl::get_range(std::initializer_list<size_t>({maxuint,maxuint,maxuint}));
break;
case CL_DEVICE_MAX_WORK_GROUP_SIZE:
buffer.as<size_t>() = maxuint;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE:
buffer.as<cl_uint>() = 0;
break;
case CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_INT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_MAX_CLOCK_FREQUENCY:
buffer.as<cl_uint>() = xdevice->get_max_clock_frequency();
break;
case CL_DEVICE_ADDRESS_BITS:
buffer.as<cl_uint>() = 64;
break;
case CL_DEVICE_MAX_MEM_ALLOC_SIZE:
buffer.as<cl_ulong>() =
#ifdef __x86_64__
4ULL *1024*1024*1024; // 4GB
#else
128*1024*1024; //128 MB
#endif
break;
case CL_DEVICE_IMAGE_SUPPORT:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_MAX_READ_IMAGE_ARGS:
buffer.as<cl_uint>() = 128;
break;
case CL_DEVICE_MAX_WRITE_IMAGE_ARGS:
buffer.as<cl_uint>() = 8;
break;
case CL_DEVICE_IMAGE2D_MAX_WIDTH:
buffer.as<size_t>() = 8192;
break;
case CL_DEVICE_IMAGE2D_MAX_HEIGHT:
buffer.as<size_t>() = 8192;
break;
case CL_DEVICE_IMAGE3D_MAX_WIDTH:
buffer.as<size_t>() = 2048;
break;
case CL_DEVICE_IMAGE3D_MAX_HEIGHT:
buffer.as<size_t>() = 2048;
break;
case CL_DEVICE_IMAGE3D_MAX_DEPTH:
buffer.as<size_t>() = 2048;
break;
case CL_DEVICE_IMAGE_MAX_BUFFER_SIZE:
buffer.as<size_t>() = 65536;
break;
case CL_DEVICE_IMAGE_MAX_ARRAY_SIZE:
buffer.as<size_t>() = 2048;
break;
case CL_DEVICE_MAX_SAMPLERS:
buffer.as<cl_uint>() = 0;
break;
case CL_DEVICE_MAX_PARAMETER_SIZE:
buffer.as<size_t>() = 2048;
break;
case CL_DEVICE_MEM_BASE_ADDR_ALIGN:
buffer.as<cl_uint>() = xocl(device)->get_alignment() << 3; // in bits
break;
case CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE:
buffer.as<cl_uint>() = 128;
break;
case CL_DEVICE_SINGLE_FP_CONFIG:
buffer.as<cl_device_fp_config>() = CL_FP_ROUND_TO_NEAREST | CL_FP_INF_NAN;
break;
case CL_DEVICE_DOUBLE_FP_CONFIG:
buffer.as<cl_device_fp_config>() = 0;
break;
case CL_DEVICE_GLOBAL_MEM_CACHE_TYPE:
buffer.as<cl_device_mem_cache_type>() = CL_NONE;
break;
case CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE:
buffer.as<cl_uint>() = 64;
break;
case CL_DEVICE_GLOBAL_MEM_CACHE_SIZE:
buffer.as<cl_ulong>() = 0;
break;
case CL_DEVICE_GLOBAL_MEM_SIZE:
buffer.as<cl_ulong>() = xdevice->get_xdevice()->getDdrSize();
break;
case CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE:
buffer.as<cl_ulong>() = 4*1024*1024;
break;
case CL_DEVICE_MAX_CONSTANT_ARGS:
buffer.as<cl_uint>() = 8;
break;
case CL_DEVICE_LOCAL_MEM_TYPE:
buffer.as<cl_device_local_mem_type>() = CL_LOCAL;
break;
case CL_DEVICE_LOCAL_MEM_SIZE:
buffer.as<cl_ulong>() = 16*1024;
break;
case CL_DEVICE_ERROR_CORRECTION_SUPPORT:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_HOST_UNIFIED_MEMORY:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_PROFILING_TIMER_RESOLUTION:
buffer.as<size_t>() = 1;
break;
case CL_DEVICE_ENDIAN_LITTLE:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_AVAILABLE:
buffer.as<cl_bool>() = xdevice->is_available();
break;
case CL_DEVICE_COMPILER_AVAILABLE:
buffer.as<cl_bool>() = CL_FALSE;
break;
case CL_DEVICE_LINKER_AVAILABLE:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_EXECUTION_CAPABILITIES:
buffer.as<cl_device_exec_capabilities>() = CL_EXEC_KERNEL;
break;
case CL_DEVICE_QUEUE_PROPERTIES:
buffer.as<cl_command_queue_properties>() =
(
CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE
| CL_QUEUE_PROFILING_ENABLE
);
break;
case CL_DEVICE_BUILT_IN_KERNELS:
buffer.as<char>() = "";
break;
case CL_DEVICE_PLATFORM:
buffer.as<cl_platform_id>() = xdevice->get_platform();
break;
case CL_DEVICE_NAME:
buffer.as<char>() = xdevice->get_name();
break;
case CL_DEVICE_VENDOR:
buffer.as<char>() = "Xilinx";
break;
case CL_DRIVER_VERSION:
buffer.as<char>() = "1.0";
break;
case CL_DEVICE_PROFILE:
buffer.as<char>() = "EMBEDDED_PROFILE";
break;
case CL_DEVICE_VERSION:
buffer.as<char>() = "OpenCL 1.0";
break;
case CL_DEVICE_OPENCL_C_VERSION:
buffer.as<char>() = "OpenCL C 1.0";
break;
case CL_DEVICE_EXTENSIONS:
buffer.as<char>() = "";
//12: "cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_byte_addressable_store";
break;
case CL_DEVICE_PRINTF_BUFFER_SIZE:
buffer.as<size_t>() = 0;
break;
case CL_DEVICE_PREFERRED_INTEROP_USER_SYNC:
buffer.as<cl_bool>() = CL_TRUE;
break;
case CL_DEVICE_PARENT_DEVICE:
buffer.as<cl_device_id>() = xdevice->get_parent_device();
break;
case CL_DEVICE_PARTITION_MAX_SUB_DEVICES:
buffer.as<cl_uint>() = xdevice->get_num_cus();
break;
case CL_DEVICE_PARTITION_PROPERTIES:
buffer.as<cl_device_partition_property>() =
xocl::get_range(std::initializer_list<cl_device_partition_property>({0,0,0,0}));
break;
case CL_DEVICE_PARTITION_AFFINITY_DOMAIN:
buffer.as<cl_device_affinity_domain>() = 0;
break;
case CL_DEVICE_PARTITION_TYPE:
buffer.as<cl_device_partition_property>() =
xocl::get_range(std::initializer_list<cl_device_partition_property>({0,0,0,0}));
break;
case CL_DEVICE_REFERENCE_COUNT:
buffer.as<cl_uint>() = xdevice->count();
break;
//depricated in OpenCL 1.2
case CL_DEVICE_MAX_PIPE_ARGS:
buffer.as<cl_uint>() = 16;
break;
case CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS:
buffer.as<cl_uint>() = 1;
break;
case CL_DEVICE_PIPE_MAX_PACKET_SIZE:
buffer.as<cl_uint>() = 1024;
break;
case CL_DEVICE_SVM_CAPABILITIES:
buffer.as<cl_device_svm_capabilities>() = CL_DEVICE_SVM_COARSE_GRAIN_BUFFER;
break;
case CL_DEVICE_PCIE_BDF:
buffer.as<char>() = xdevice->get_bdf();
break;
case CL_DEVICE_HANDLE:
buffer.as<void*>() = xdevice->get_handle();
break;
case CL_DEVICE_NODMA:
buffer.as<cl_bool>() = xdevice->is_nodma();
break;
case CL_DEVICE_KDMA_COUNT:
buffer.as<cl_uint>() = static_cast<cl_uint>(xdevice->get_num_cdmas());
break;
default:
throw error(CL_INVALID_VALUE,"clGetDeviceInfo: invalid param_name");
break;
}
return CL_SUCCESS;
}
namespace api {
cl_int
clGetDeviceInfo(cl_device_id device,
cl_device_info param_name,
size_t param_value_size,
void * param_value,
size_t * param_value_size_ret)
{
return ::xocl::clGetDeviceInfo
(device,param_name,param_value_size,param_value,param_value_size_ret);
}
} // api
} // xocl
cl_int
clGetDeviceInfo(cl_device_id device,
cl_device_info param_name,
size_t param_value_size,
void * param_value,
size_t * param_value_size_ret)
{
try {
PROFILE_LOG_FUNCTION_CALL;
LOP_LOG_FUNCTION_CALL;
return xocl::clGetDeviceInfo
(device, param_name, param_value_size,param_value, param_value_size_ret);
}
catch (const xocl::error& ex) {
xocl::send_exception_message(ex.what());
return ex.get_code();
}
catch (const std::exception& ex) {
xocl::send_exception_message(ex.what());
return CL_OUT_OF_HOST_MEMORY;
}
}
| 4,934 |
887 | <filename>source/neuropod/bindings/java/src/main/native/com_uber_neuropod_LibraryLoader.h
/* Copyright (c) 2020 UATC, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_uber_neuropod_LibraryLoader */
#ifndef _Included_com_uber_neuropod_LibraryLoader
#define _Included_com_uber_neuropod_LibraryLoader
#ifdef __cplusplus
extern "C" {
#endif
#undef com_uber_neuropod_LibraryLoader_BUFFER_SIZE
#define com_uber_neuropod_LibraryLoader_BUFFER_SIZE 1048576L
/*
* Class: com_uber_neuropod_LibraryLoader
* Method: nativeIsLoaded
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_com_uber_neuropod_LibraryLoader_nativeIsLoaded(JNIEnv *, jclass);
/*
* Class: com_uber_neuropod_LibraryLoader
* Method: nativeExport
* Signature: (Ljava/lang/String;)V
*/
JNIEXPORT void JNICALL Java_com_uber_neuropod_LibraryLoader_nativeExport(JNIEnv *, jclass, jstring);
#ifdef __cplusplus
}
#endif
#endif
| 494 |
320 | <reponame>SanggunLee/edgetpu<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ctypes
import platform
class EdgeTpuDevice(ctypes.Structure):
_fields_ = [("type", ctypes.c_int),
("path", ctypes.c_char_p)]
EDGETPU_APEX_PCI = 0
EDGETPU_APEX_USB = 1
def edgetpu_type(t):
if t == EDGETPU_APEX_PCI:
return 'PCI'
if t == EDGETPU_APEX_USB:
return 'USB'
return 'Unknown'
def edgetpulib_default():
system = platform.system()
if system == 'Windows':
return 'edgetpu.dll'
if system == 'Darwin':
return 'libedgetpu.1.dylib'
if system == 'Linux':
return 'libedgetpu.so.1'
raise Exception('This operating system is not supported.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--edgetpulib',
default=edgetpulib_default(),
help='Path to edgetpu dynamic library')
args = parser.parse_args()
lib = ctypes.pydll.LoadLibrary(args.edgetpulib)
lib.edgetpu_list_devices.argtypes = [ctypes.POINTER(ctypes.c_size_t)]
lib.edgetpu_list_devices.restype = ctypes.POINTER(EdgeTpuDevice)
num_devices = ctypes.c_size_t()
devices = lib.edgetpu_list_devices(ctypes.byref(num_devices))
for i in range(num_devices.value):
print(i, edgetpu_type(devices[i].type), devices[i].path.decode('utf-8'))
if __name__ == '__main__':
main()
| 710 |
940 | {
"token": "<PASSWORD>",
"full_token": "<PASSWORD>",
"tap": "homebrew/cask",
"name": [
"Isolator"
],
"desc": null,
"homepage": "https://www.willmore.eu/software/isolator/",
"url": "https://willmore.eu/software/download/Isolator-4.99beta.dmg",
"appcast": null,
"version": "4.99beta",
"versions": {
},
"installed": null,
"outdated": false,
"sha256": "8ab9344efd503606c88dbd5a139e932f37d25ec9b8d7c574a6af9f6cf1d9bcaf",
"artifacts": [
[
"Isolator.app"
]
],
"caveats": null,
"depends_on": {
},
"conflicts_with": null,
"container": null,
"auto_updates": null
}
| 289 |
716 | from .anchor_config import *
from config.utils import *
def isAnchor(context, seen_strings, seen_consts, functions_list, logger):
"""Check if the given context represents an Anchor function.
Args:
context (FunctionContext): canonical representation of a source function
seen_strings (set): set of unique strings to be used for the scoring
seen_consts (set): set of unique (numeric) consts to be used for the scoring
functions_list (list): list of all source functions names
logger (logger): logger instance
Return Value:
is string criteria (True / False), threshold count, Matching anchor criteria (list of string for instance), or None if not an anchor
"""
unique_strings = [s for s in context.strings if seen_strings.count(s) == 1]
# Case #1. Huge unique string
huge_strings = [s for s in unique_strings if len(s) >= STRING_HUGE_LIMIT]
if len(huge_strings) >= STRING_HUGE_GROUP:
logger.debug(f"Found an Anchor: {context.name} ==> Unique HUGE string ({len(huge_strings[0])})")
return True, STRING_HUGE_GROUP, huge_strings
# Case #2. Unique string with a function name in it
for unique_str in unique_strings:
for func_name in functions_list:
if func_name in unique_str:
logger.debug(f"Found an Anchor: {context.name} ==> Unique string ({unique_str}) containing a function name ({func_name})")
return True, 1, [unique_str]
# Case #3. X unique strings with long length
unique_long_strings = [s for s in unique_strings if len(s) >= STRING_LONG_LIMIT]
if len(unique_long_strings) >= STRING_LONG_GROUP:
logger.debug(f"Found an Anchor: {context.name} ==> {len(unique_long_strings)} unique long strings")
return True, STRING_LONG_GROUP, unique_long_strings
# Case #4. X unique strings with medium length
unique_medium_strings = [s for s in unique_strings if len(s) >= STRING_MEDIUM_LIMIT]
if len(unique_medium_strings) >= STRING_MEDIUM_GROUP:
logger.debug(f"Found an Anchor: {context.name} ==> {len(unique_medium_strings)} unique medium strings")
return True, STRING_MEDIUM_GROUP, unique_medium_strings
# Case #5. Unique const with high entropy
unique_complex_consts = [c for c in context.consts if rankConst(c, context) >= CONST_COMPLEX_LIMIT and seen_consts.count(c) == 1]
if len(unique_complex_consts) >= CONST_COMPLEX_GROUP:
logger.debug(f"Found an Anchor: {context.name} ==> len(unique_complex_consts) unique complex consts: 0x{unique_complex_consts[0]:x}")
return False, CONST_COMPLEX_GROUP, unique_complex_consts
# If we reached this line it means we found nothing :(
return False, 0, None
def isAgent(context, unique_strings, unique_consts, logger):
"""Check if the given context represents an Agent function inside it's file.
Args:
context (FunctionContext): canonical representation of a source function
unique_strings (set): set of unique strings to be used for the scoring
unique_consts (set): set of unique (numeric) consts to be used for the scoring
logger (logger): logger instance
Return Value:
is string criteria (True / False), threshold count, Matching agent criteria (list of string for instance), or None if not an agent
"""
unique_local_strings = unique_strings & context.strings
# Case #1. Medium unique string
medium_strings = [s for s in unique_local_strings if len(s) >= STRING_MEDIUM_LIMIT]
if len(medium_strings) > 0:
logger.debug(f"Found an Agent: {context.name} ==> Unique medium string ({len(medium_strings[0])})")
return True, 1, medium_strings
# Case #2. X unique strings with short length
unique_short_strings = [s for s in unique_local_strings if len(s) >= STRING_SHORT_LIMIT]
if len(unique_short_strings) >= STRING_SHORT_GROUP:
logger.debug(f"Found an Agent: {context.name} ==> {len(unique_short_strings)} unique long strings")
return True, STRING_SHORT_GROUP, unique_short_strings
# Case #3. Unique const with medium entropy
unique_medium_consts = [c for c in unique_consts & context.consts if rankConst(c, context) >= CONST_MEDIUM_LIMIT]
if len(unique_medium_consts) > 0:
logger.debug(f"Found an Agent: {context.name} ==> {len(unique_medium_consts)} unique medium consts")
return False, 1, unique_medium_consts
# If we reached this line it means we found nothing :(
return False, 0, None
| 1,591 |
1,013 | /*!
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
*/
#pragma once
#include <vector>
#include <cstddef>
#include <pyclustering/definitions.hpp>
namespace pyclustering {
namespace nnet {
/*!
@class som_conn_type som.hpp pyclustering/nnet/som.hpp
@brief Connection structures that can be established between neurons in self-organized feature map.
*/
enum class som_conn_type {
/*!< Each node is connected with four neighbors: left, upper, right and lower. */
SOM_GRID_FOUR = 0,
/*!< Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors. */
SOM_GRID_EIGHT = 1,
/*!< Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors. */
SOM_HONEYCOMB = 2,
/*!< Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation. */
SOM_FUNC_NEIGHBOR = 3
};
/**
*
* @brief Types of inititalization of weights in self-organized feature map.
*
*/
enum class som_init_type {
/*!< Weights are randomly distributed using Gaussian distribution (0, 1). */
SOM_RANDOM = 0,
/*!< Weights are randomly distributed using Gaussian distribution (input data centroid, 1). */
SOM_RANDOM_CENTROID = 1,
/*!< Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data). */
SOM_RANDOM_SURFACE = 2,
/*!< Weights are distributed as a uniform grid that covers whole surface of the input data. */
SOM_UNIFORM_GRID = 3
};
/*!
@class som_parameters som.hpp pyclustering/nnet/som.hpp
@brief Parameters of self-organized feature map.
*/
struct som_parameters {
som_init_type init_type = som_init_type::SOM_UNIFORM_GRID; /**< Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid). */
double init_radius = 0.0; /**< Initial radius. If the initial radius is not specified (equals to `0.0`) then it will be calculated by SOM. */
double init_learn_rate = 0.1; /**< Rate of learning. */
double adaptation_threshold = 0.01; /**< Condition that defines when the learining process should be stopped. It is used when the autostop mode is on. */
long long random_state = RANDOM_STATE_CURRENT_TIME; /**< Seed for random state (by default is `RANDOM_STATE_CURRENT_TIME`, current system time is used). */
public:
/*!
@brief Default constructor of SOM parameters.
*/
som_parameters() = default;
/*!
@brief Default move constructor of SOM parameters.
*/
som_parameters(som_parameters && p_other) = default;
/*!
@brief Default copy constructor of SOM parameters.
*/
som_parameters(const som_parameters & p_other) = default;
/*!
@brief Default destructor of SOM parameters.
*/
~som_parameters() = default;
public:
/*!
@brief Set parameters by copy it from another object.
@param[in] p_other: another SOM parameters.
*/
som_parameters & operator=(const som_parameters & p_other);
};
using som_award_sequence = std::vector<size_t>;
using som_gain_sequence = std::vector<std::vector<size_t> >;
using som_neighbor_sequence = std::vector<std::vector<size_t> >;
/*!
@class som som.hpp pyclustering/nnet/som.hpp
@brief Self-Orzanized Feature Map based on Kohonen desription of SOM.
*/
class som {
private:
/* network description */
std::size_t m_rows;
std::size_t m_cols;
std::size_t m_size;
som_conn_type m_conn_type;
dataset m_weights;
dataset m_previous_weights;
som_award_sequence m_awards;
/* store pointer to training data for convinience */
const dataset * m_data = nullptr;
/* just for convenience (avoid excess calculation during learning) */
dataset m_location;
dataset m_sqrt_distances;
som_gain_sequence m_capture_objects;
som_neighbor_sequence m_neighbors;
/* describe learning process and internal state */
std::size_t m_epouchs = 0;
som_parameters m_params;
/* dynamic changes learning parameters */
double m_local_radius = 0.0;
double m_learn_rate = 0.0;
public:
/**
*
* @brief Constructor of self-organized map.
*
* @param[in] num_rows: number of neurons in the column (number of rows).
* @param[in] num_cols: number of neurons in the row (number of columns).
* @param[in] type_conn: type of connection between oscillators in the network.
* @param[in] parameters: others parameters of the network.
*
*/
som(const std::size_t num_rows, const std::size_t num_cols, const som_conn_type type_conn, const som_parameters & parameters);
/**
*
* @brief Copy constructor.
*
* @param[in] p_other: self-organized map that should be copied.
*
*/
som(const som & p_other);
/**
*
* @brief Default destructor.
*
*/
~som();
public:
/**
*
* @brief Trains self-organized feature map (SOM).
*
* @param[in] input_data: input dataset for training.
* @param[in] num_epochs: number of epochs for training.
* @param[in] autostop: stop learining when convergance is too low.
*
* @return Returns number of learining iterations.
*
*/
std::size_t train(const dataset & input_data, const size_t num_epochs, bool autostop);
/**
*
* @brief Initialize SOM network by loading weights.
* @details This method is provided service to load trained network parameters to avoid network training that may take
* a lot of time.
*
* @param[in] p_weights: neuron weights.
* @param[in] p_awards: amount of captured objects by each neuron during training (can be empty if it is not required).
* @param[in] p_capture_objects: captured objects by each neuron during training (can be empty if it is not required).
*
* @return Returns number of learining iterations.
*
*/
void load(const dataset & p_weights, const som_award_sequence & p_awards, const som_gain_sequence & p_capture_objects);
/**
*
* @brief Processes input pattern (no learining) and returns index of neuron-winner.
* @details Using index of neuron winner catched object can be obtained by get_capture_objects().
*
* @param[in] input_pattern: input pattern for processing.
*
* @return Returns index of neuron-winner.
*
*/
std::size_t simulate(const pattern & input_pattern) const;
/**
*
* @return Returns number of winner at the last step of learning process.
*
*/
std::size_t get_winner_number() const;
/**
*
* @return Returns size of self-organized map (number of neurons).
*
*/
inline size_t get_size() const { return m_size; }
/**
*
* @return Constant reference to neurons weights for read-only purposes.
*
*/
inline const dataset & get_weights() const {
return m_weights;
}
/**
*
* @return Constant reference to sequence of captured objects by each neuron during training for read-only purposes.
*
*/
inline const som_gain_sequence & get_capture_objects() const {
return m_capture_objects;
}
/**
*
* @return Constant reference to neighbors of each neuron for read-only purposes.
*
*/
inline const som_neighbor_sequence & get_neighbors() const {
return m_neighbors;
}
/**
*
* @return Constant reference to amount of captured objects by each neuron during training for read-only purposes.
*
*/
inline const som_award_sequence & get_awards() const {
return m_awards;
}
/**
*
* @return Reference to amount of captured objects by each neuron during training.
*
*/
inline som_award_sequence & get_awards() {
return m_awards;
}
private:
/**
*
* @brief Create connections in line with input rule (grid four, grid eight, honeycomb,
* function neighbour).
*
* @param[in] type: type of connection between oscillators in the network.
*
*/
void create_connections(const som_conn_type type);
/**
*
* @brief Creates initial weights for neurons in line with the specified initialization.
*
* @param[in] type: type of initialization of initial neuron weights (random,
* random in center of the input data, random distributed in
* data, ditributed in line with uniform grid).
*
*/
void create_initial_weights(const som_init_type type);
/**
*
* @brief Returns neuron winner (distance, neuron index).
*
* @param[in] input_pattern: input pattern from the input data set, for example it can be
* coordinates of point.
*
* @return Returns index of neuron that is winner.
*
*/
std::size_t competition(const pattern & input_pattern) const;
/**
*
* @brief Change weight of neurons in line with won neuron.
*
* @param[in] index_winner: index of neuron-winner.
* @param[in] input_pattern: input pattern from the input data set.
*
*/
std::size_t adaptation(const size_t index_winner, const pattern & input_pattern);
/**
*
* @brief Returns maximum changes of weight in line with comparison between previous weights
* and current weights.
*
* @return Returns value that represents maximum changes of weight after adaptation process.
*
*/
double calculate_maximal_adaptation() const;
/**
*
* @brief Calculates appropriate initial radius.
*
* @param[in] p_rows: amount of rows in the map.
* @param[in] p_cals: amount of columns in the map.
*
* @return Initial radius.
*
*/
static double calculate_init_radius(const size_t p_rows, const size_t p_cols);
public:
/**
*
* @brief Store network to stream.
*
* @param[in] p_stream: stream that is used to store network.
* @param[in] p_network: SOM network that is stored to the stream 'p_stream'.
*
* @return Stream where network is stored.
*
*/
friend std::ostream & operator<<(std::ostream & p_stream, const som & p_network);
/**
*
* @brief Overloaded assignment operator to make deep copy of SOM.
*
* @param[in] p_other: another instance of SOM.
*
* @return Reference to updated SOM instance.
*
*/
som & operator=(const som & p_other);
};
}
} | 4,586 |
774 | package com.darwinsys.cplist;
import java.util.ArrayList;
import java.util.List;
import android.app.ListActivity;
import android.content.Intent;
import android.content.pm.ProviderInfo;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ArrayAdapter;
import android.widget.ListAdapter;
public class CpListActivity extends ListActivity {
private static final String TAG = "Cp24"; // Toronto joke
List<ProviderInfo> mProviders;
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_cp_list);
mProviders = getPackageManager().queryContentProviders(null, 0, 0);
List<String> names = new ArrayList<>(mProviders.size());
for (ProviderInfo pi : mProviders) {
Log.d(TAG, "Provider " + pi.name);
names.add(pi.name);
}
setListAdapter(new ArrayAdapter<String>(this, R.layout.cp_list_item, names));
getListView().setOnItemClickListener(new OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int pos, long id) {
ProviderInfo pi = mProviders.get(pos);
Intent intent = new Intent(CpListActivity.this, CpDetailActivity.class);
intent.putExtra("provider", pi);
startActivity(intent);
}
});
}
}
| 515 |
473 | <filename>challenges/BIRC/poller/for-release/machine.py
from generator.actions import Actions
import random
import string
import struct
import numpy as np
import math
import datetime as dt
import ctypes
MAX_USERS = 256
REGISTER_USER_CMD_ID = struct.pack("H", 11382)
SEARCH_FOR_USER_CMD_ID = struct.pack("H", 11591)
DELETE_USER_CMD_ID = struct.pack("H", 11498)
JOIN_CHANNEL_CMD_ID = struct.pack("H", 22193)
SEARCH_FOR_CHANNEL_CMD_ID = struct.pack("H", 2201)
PART_CHANNEL_CMD_ID = struct.pack("H", 22881)
DELETE_CHANNEL_CMD_ID = struct.pack("H", 22117)
CHANNEL_CHAT_CMD_ID = struct.pack("H", 3190)
USER_CHAT_CMD_ID = struct.pack("H", 31900)
EXIT_SERVER_CMD_ID = struct.pack("H", 9999)
REMOTE_ADMIN_TOKEN_REQUEST_CMD_ID = struct.pack("H", 7777)
LOGIN_CMD_ID = struct.pack("H", 8711)
LOGOUT_CMD_ID = struct.pack("H", 8719)
CLIENT_SUCCESS = struct.pack("H", 1234)
CLIENT_FAIL = struct.pack("H", 4321)
CLIENT_LOGOUT_ALERT = struct.pack("H", 8791)
CLIENT_SESSION_TIMEOUT_ALERT = struct.pack("H", 2431)
pack_param = lambda x: struct.pack("H{0}s".format(len(x)), len(x), x)
def random_word(max_size=10, min_size=2):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
return ("".join(random.choice(characters) for c in range(max_size))).lower()
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
def rand_bytes(min_size=3, max_size=30):
rndbytes = ''
for x in xrange(random.randint(min_size, max_size)):
rndbytes += chr(random.randint(0,255))
return rndbytes
class User():
def __init__(self):
self.name = ''
self.password = ''
def register_msg(self):
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
def search_for_user_msg():
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
def delete_msg():
msg = struct.pack("H", len(self.password))
msg += struct.pack("{0}s".format(len(self.password)), self.password)
def login_msg():
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
msg += struct.pack("H", len(self.password))
msg += struct.pack("{0}s".format(len(self.password)), self.password)
return msg
def password_resp_msg(self):
msg = struct.pack("H", len(self.password))
msg += struct.pack("{0}s".format(len(self.password)), self.password)
return msg
@classmethod
def random(cls):
user = cls()
user.name = rand_bytes()
return user
class Channel():
def __init__(self, creator):
self.name = ''
self.creator = creator
self.users = [ creator ]
def join_msg(self):
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
def search_for_channel_msg(self):
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
def part_msg(self):
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
def delete_msg(self):
msg = struct.pack("H", len(self.name))
msg += struct.pack("{0}s".format(len(self.name)), self.name)
return msg
@classmethod
def random(cls, creator):
channel = cls(creator)
channel.name = rand_bytes()
return channel
class Brc(Actions):
def _get_random_byte(self):
self.prng1 = ctypes.c_uint32(36969 * (self.prng1 & 65535)).value + ctypes.c_uint32((self.prng1 >> 16)).value
self.prng2 = ctypes.c_uint32(18000 * (self.prng2 & 65535)).value + ctypes.c_uint32((self.prng2 >> 16)).value
byte_idx = (ctypes.c_uint32((self.prng1 << 16)).value + self.prng2) % 4096;
return struct.unpack('<B', self.magic_page[byte_idx])[0]
def _generate_password(self):
pw = ''
for x in xrange(10):
pw += chr(self._get_random_byte())
return pw
def _search_all_users(self, user):
for u in self.all_users:
if user.name == u.name:
return True
return False
def _delete_cur_user(self):
if not self.cur_user:
return
for x in xrange(len(self.all_users)):
if self.cur_user.name == self.all_users[x].name:
self.all_users.pop(x)
break
chans_to_delete = []
for x in xrange(len(self.all_channels)):
if self.cur_user.name == self.all_channels[x].creator.name:
chans_to_delete.insert(0,x)
else:
for y in xrange(len(self.all_channels[x].users)):
if self.cur_user.name == self.all_channels[x].users[y].name:
self.all_channels[x].users.pop(y)
break
for x in chans_to_delete:
self.all_channels.pop(x)
self.cur_user = None
def _search_all_channels(self, chan):
for c in self.all_channels:
if chan.name == c.name:
return True
return False
def _join_channel(self, chan):
channel = None
for c in self.all_channels:
if chan.name == c.name:
channel = c
break
if channel:
for u in channel.users:
if self.cur_user.name == u.name:
return False
channel.users.append(self.cur_user)
else:
self.all_channels.append(chan)
return True
def _part_channel(self, chan):
channel = None
for c in self.all_channels:
if chan.name == c.name:
channel = c
if channel:
for x in xrange(len(channel.users)):
if self.cur_user.name == channel.users[x].name:
channel.users.pop(x)
return True
return False
def _delete_channel(self, chan):
for x in xrange(len(self.all_channels)):
if chan.name == self.all_channels[x].name:
if self.cur_user.name == self.all_channels[x].creator.name:
self.all_channels.pop(x)
return True
return False
def _read(self, data):
self.read(length=len(data), expect=data)
def _read_success_resp(self):
self._read(CLIENT_SUCCESS)
def _read_fail_resp(self):
self._read(CLIENT_FAIL)
def _read_logout_resp(self):
self._read(CLIENT_LOGOUT_ALERT)
def _read_timeout_alert(self):
self._read(CLIENT_SESSION_TIMEOUT_ALERT)
def _write_param(self, param):
self.write(pack_param(param))
def start(self):
#self.delay(100)
self.all_users = []
self.all_channels = []
self.cur_user = None
self.session_timeout = 30
self.prng1 = ctypes.c_uint32(struct.unpack('<I', self.magic_page[0:4])[0]).value
self.prng2 = ctypes.c_uint32(struct.unpack('<I', self.magic_page[4:8])[0]).value
self.session_timeout -= 1
def awaiting_input(self):
self.session_timeout -= 1
if self.cur_user and self.session_timeout == 0:
self._read_timeout_alert()
if random.randint(0,9) > 2:
self._write_param(self.cur_user.password)
self._read_success_resp()
self.session_timeout = 30
else:
self._write_param(rand_bytes(33,40))
self._read_fail_resp()
self._read_logout_resp()
self.cur_user = None
def register_user(self):
self.write(REGISTER_USER_CMD_ID)
if self.cur_user:
self._read_fail_resp()
return
user = User.random()
self.write(user.register_msg())
if not self._search_all_users(user):
self._read_success_resp()
user.password = <PASSWORD>._<PASSWORD>_password()
self._read(user.password_resp_msg())
self.all_users.append(user)
self.cur_user = user
self.session_timeout = 30
else:
self._read_fail_resp()
def search_for_user(self):
self.write(SEARCH_FOR_USER_CMD_ID)
if self.all_users and random.randint(0,9) > 3:
user = self.all_users[random.randint(0, len(self.all_users) - 1)]
self._write_param(user.name)
self._read_success_resp()
else:
self._write_param(rand_bytes(33,40))
self._read_fail_resp()
def delete_user(self):
self.write(DELETE_USER_CMD_ID)
if not self.cur_user:
self._read_fail_resp()
return
if random.randint(0,9) > 3:
self._write_param(self.cur_user.password)
self._delete_cur_user()
self._read_success_resp()
else:
self._write_param(rand_bytes(33,40))
self._read_fail_resp()
def join_channel(self):
self.write(JOIN_CHANNEL_CMD_ID)
if not self.cur_user:
self._read_fail_resp()
return
if self.all_channels and random.randint(0,9) > 2:
chan = self.all_channels[random.randint(0, len(self.all_channels) - 1)]
else:
chan = Channel.random(self.cur_user)
self.write(chan.join_msg())
if self._join_channel(chan):
self._read_success_resp()
else:
self._read_fail_resp()
def search_for_channel(self):
self.write(SEARCH_FOR_CHANNEL_CMD_ID)
if self.all_channels and random.randint(0,9) > 5:
chan = self.all_channels[random.randint(0, len(self.all_channels) - 1)]
self._write_param(chan.name)
self._read_success_resp()
else:
self._write_param(rand_bytes(33,40))
self._read_fail_resp()
def part_channel(self):
self.write(PART_CHANNEL_CMD_ID)
if not self.cur_user:
self._read_fail_resp()
return
if self.all_channels and random.randint(0,9) > 4:
chan = self.all_channels[random.randint(0, len(self.all_channels) - 1)]
else:
chan = Channel.random(self.cur_user)
self.write(chan.part_msg())
if self._part_channel(chan):
self._read_success_resp()
else:
self._read_fail_resp()
def delete_channel(self):
self.write(DELETE_CHANNEL_CMD_ID)
if not self.cur_user:
self._read_fail_resp()
return
if self.all_channels and random.randint(0,9) > 3:
chan = self.all_channels[random.randint(0, len(self.all_channels) - 1)]
else:
chan = Channel.random(self.cur_user)
self.write(chan.delete_msg())
if self._delete_channel(chan):
self._read_success_resp()
else:
self._read_fail_resp()
def login(self):
self.write(LOGIN_CMD_ID)
if self.cur_user:
self._read_fail_resp()
return
if self.all_users:
self.cur_user = self.all_users[random.randint(0, len(self.all_users) - 1)]
self._write_param(self.cur_user.name)
self._write_param(self.cur_user.password)
self._read_success_resp()
self.session_timeout = 30
else:
self._write_param(rand_bytes(33, 40))
self._write_param(rand_bytes(33, 40))
self._read_fail_resp()
def logout(self):
self.write(LOGOUT_CMD_ID)
if not self.cur_user:
self._read_fail_resp()
return
self.cur_user = None
self._read_success_resp()
self._read_logout_resp()
def exit_server(self):
self.write(EXIT_SERVER_CMD_ID)
def remote_admin_token_request():
page_idx = random.randint(0, 1023)
msg = REMOTE_ADMIN_TOKEN_REQUEST_CMD_ID
msg += struct.pack("H", page_idx)
| 6,222 |
324 | package com.bfwg.security.auth;
import com.bfwg.model.User;
import com.bfwg.model.UserTokenState;
import com.bfwg.security.TokenHelper;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.security.core.Authentication;
import org.springframework.security.web.authentication.SimpleUrlAuthenticationSuccessHandler;
import org.springframework.stereotype.Component;
import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
/**
* Created by fan.jin on 2016-11-07.
*/
@Component
public class AuthenticationSuccessHandler extends SimpleUrlAuthenticationSuccessHandler {
private final TokenHelper tokenHelper;
private final ObjectMapper objectMapper;
@Value("${jwt.expires_in}")
private int EXPIRES_IN;
@Value("${jwt.cookie}")
private String TOKEN_COOKIE;
@Autowired
public AuthenticationSuccessHandler(TokenHelper tokenHelper, ObjectMapper objectMapper) {
this.tokenHelper = tokenHelper;
this.objectMapper = objectMapper;
}
@Override
public void onAuthenticationSuccess(HttpServletRequest request, HttpServletResponse response,
Authentication authentication) throws IOException, ServletException {
clearAuthenticationAttributes(request);
User user = (User) authentication.getPrincipal();
String jws = tokenHelper.generateToken(user.getUsername());
// Create token auth Cookie
Cookie authCookie = new Cookie(TOKEN_COOKIE, (jws));
authCookie.setHttpOnly(true);
authCookie.setMaxAge(EXPIRES_IN);
authCookie.setPath("/");
// Add cookie to response
response.addCookie(authCookie);
// JWT is also in the response
UserTokenState userTokenState = new UserTokenState(jws, EXPIRES_IN);
String jwtResponse = objectMapper.writeValueAsString(userTokenState);
response.setContentType("application/json");
response.getWriter().write(jwtResponse);
}
}
| 793 |
1,662 | """
Example of serving a Flexx app using a regular web server. In this case aiohttp.
Flexx' own server does quite a lot of things for each connection, which
makes it less suited for long running and/or heavy duty server
processes. Firstly, we don't expect the Flexx server to scale well to
say thousands of connections (tens to a few hundred at a time should
work fine though). Secondly, the amount of work and complexity of each
connection may make the server less stable and potentially vulnerable.
Part of these concerns can be alleviated by running the Flexx server
in an auto-restarting Docker container (as we do with our demo server).
Nevertheless, we want to offer a simple path to build reliable and
performant websites using Flexx. The way that this works is that one
builds the client-side of the app in Flexx, which is then "dumped" (say
exported in-memory) to its bare html/js/css assets, which can be served
by any kind of web server.
"""
import mimetypes
from aiohttp import web
from flexx import flx
from flexxamples.howtos.editor_cm import CodeEditor
# Define an app
class MyApp(flx.Widget):
def init(self):
with flx.HBox():
CodeEditor(flex=1)
flx.Widget(flex=1)
# Dump it to a dictionary of assets that we can serve. Make the main
# page index.html. The link=2 means to use seperate files. We can also
# use link=0 to pack the whole app into a single html page (note that
# data (e.g. images) will still be separate).
app = flx.App(MyApp)
assets = app.dump('index.html', link=2)
# Define a request handler for aiohttp
def handler(request):
# Get what path is requested
path = request.path.lstrip('/') or 'index.html'
print(request.method, path)
# Get the associated asset (is a bytes object)
asset = assets.get(path, None)
# If there is no such asset, return 404 not found
if asset is None:
return web.Response(status=404, text='not found')
# Otherwise, get the content type and return
ct = mimetypes.guess_type(path)[0] or 'application/octet-stream'
return web.Response(body=asset, content_type=ct)
if __name__ == '__main__':
# Here are some aiohttp specifics. Note that all assets except the
# main app are prefixed with "flexx/...", we can use that in the routing.
app = web.Application()
app.router.add_get('/', handler)
app.router.add_get('/{tail:flexx/.*}', handler)
web.run_app(app, host='0.0.0.0', port=8080)
| 794 |
362 | // Copyright (c) 2016, Baidu.com, Inc. All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BAIDU_GALAXY_CLIENT_JOB_ACTION_H
#define BAIDU_GALAXY_CLIENT_JOB_ACTION_H
#include "galaxy_util.h"
#include <gflags/gflags.h>
#include "sdk/galaxy_sdk_appmaster.h"
#include "sdk/galaxy_sdk_resman.h"
namespace baidu {
namespace galaxy {
namespace client {
class JobAction {
public:
JobAction();
~JobAction();
bool SubmitJob(const std::string& json_file);
bool UpdateJob(const std::string& json_file, const std::string& jobid,
const std::string& operation, uint32_t update_break_count);
bool StopJob(const std::string& jobid);
bool RemoveJob(const std::string& jobid);
bool ListJobs(const std::string& soptions);
bool ShowJob(const std::string& jobid, const std::string& soptions, bool show_meta);
bool RecoverInstance(const std::string& jobid, const std::string& podid);
bool ExecuteCmd(const std::string& jobid, const std::string& cmd);
bool CalRes(const std::string& json_file, const std::string& soptions);
bool GenerateJson(const std::string& jobid);
private:
bool Init();
bool InitResman();
std::string StringUnit(int64_t num);
static void* ListContainers(void* param);
static void* ListJobs(void* param);
static void* ShowJob(void* param);
static void* ShowContainerGroup(void* param);
//-1 cpu not enough
//-2 memory not enough
//-3 disk not enough
int CalResPolicy(int64_t need_cpu, int64_t need_mem,
const ::baidu::galaxy::sdk::VolumRequired& workspace_volum,
const std::vector< ::baidu::galaxy::sdk::VolumRequired>& data_volums,
const ::baidu::galaxy::sdk::AgentStatistics& agent,
int* max_per_host);
private:
::baidu::galaxy::sdk::AppMaster* app_master_;
::baidu::galaxy::sdk::User user_;
::baidu::galaxy::sdk::ResourceManager* resman_;
}; // end class JobAction
} // end namespace client
} // end namespace galaxy
} // end namespace baidu
#endif // BAIDU_GALAXY_CLIENT_JOB_ACTION_H
/* vim: set ts=4 sw=4 sts=4 tw=100 */
| 869 |
12,252 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.adapters.saml.config.parsers;
import static org.hamcrest.CoreMatchers.*;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
import org.junit.Test;
import org.keycloak.adapters.saml.config.IDP;
import org.keycloak.adapters.saml.config.Key;
import org.keycloak.adapters.saml.config.KeycloakSamlAdapter;
import org.keycloak.adapters.saml.config.SP;
import org.keycloak.saml.common.util.StaxParserUtil;
import java.io.InputStream;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
import org.keycloak.saml.common.exceptions.ParsingException;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.hamcrest.Matchers;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @version $Revision: 1 $
*/
public class KeycloakSamlAdapterXMLParserTest {
private static final String CURRENT_XSD_LOCATION = "/schema/keycloak_saml_adapter_1_13.xsd";
@Rule
public ExpectedException expectedException = ExpectedException.none();
private void testValidationValid(String fileName) throws Exception {
InputStream schema = getClass().getResourceAsStream(CURRENT_XSD_LOCATION);
InputStream is = getClass().getResourceAsStream(fileName);
assertThat(is, Matchers.notNullValue());
assertThat(schema, Matchers.notNullValue());
StaxParserUtil.validate(is, schema);
}
@Test
public void testValidationSimpleFile() throws Exception {
testValidationValid("keycloak-saml.xml");
}
@Test
public void testValidationMultipleKeys() throws Exception {
testValidationValid("keycloak-saml-multiple-signing-keys.xml");
}
@Test
public void testValidationWithHttpClient() throws Exception {
testValidationValid("keycloak-saml-wth-http-client-settings.xml");
}
@Test
public void testValidationWithMetadataUrl() throws Exception {
testValidationValid("keycloak-saml-with-metadata-url.xml");
}
@Test
public void testValidationWithAllowedClockSkew() throws Exception {
testValidationValid("keycloak-saml-with-allowed-clock-skew-with-unit.xml");
}
@Test
public void testValidationWithRoleMappingsProvider() throws Exception {
testValidationValid("keycloak-saml-with-role-mappings-provider.xml");
}
@Test
public void testValidationWithKeepDOMAssertion() throws Exception {
testValidationValid("keycloak-saml-keepdomassertion.xml");
// check keep dom assertion is TRUE
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-keepdomassertion.xml", KeycloakSamlAdapter.class);
assertThat(config, Matchers.notNullValue());
assertThat(config.getSps(), hasSize(1));
SP sp = config.getSps().get(0);
assertThat(sp.isKeepDOMAssertion(), is(true));
}
@Test
public void testValidationKeyInvalid() throws Exception {
InputStream schemaIs = KeycloakSamlAdapterV1Parser.class.getResourceAsStream(CURRENT_XSD_LOCATION);
InputStream is = getClass().getResourceAsStream("keycloak-saml-invalid.xml");
assertThat(is, Matchers.notNullValue());
assertThat(schemaIs, Matchers.notNullValue());
expectedException.expect(ParsingException.class);
StaxParserUtil.validate(is, schemaIs);
}
@Test
public void testParseSimpleFileNoNamespace() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-no-namespace.xml", KeycloakSamlAdapter.class);
}
@Test
public void testXmlParserBaseFile() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), hasSize(1));
SP sp = config.getSps().get(0);
assertThat(sp.getEntityID(), is("sp"));
assertThat(sp.getSslPolicy(), is("EXTERNAL"));
assertThat(sp.getNameIDPolicyFormat(), is("format"));
assertThat(sp.isForceAuthentication(), is(true));
assertThat(sp.isIsPassive(), is(true));
assertThat(sp.isAutodetectBearerOnly(), is(false));
assertThat(sp.isKeepDOMAssertion(), is(false));
assertThat(sp.getKeys(), hasSize(2));
Key signing = sp.getKeys().get(0);
assertThat(signing.isSigning(), is(true));
Key.KeyStoreConfig keystore = signing.getKeystore();
assertThat(keystore, notNullValue());
assertThat(keystore.getFile(), is("file"));
assertThat(keystore.getResource(), is("cp"));
assertThat(keystore.getPassword(), is("pw"));
assertThat(keystore.getPrivateKeyAlias(), is("private alias"));
assertThat(keystore.getPrivateKeyPassword(), is("private pw"));
assertThat(keystore.getCertificateAlias(), is("cert alias"));
Key encryption = sp.getKeys().get(1);
assertThat(encryption.isEncryption(), is(true));
assertThat(encryption.getPrivateKeyPem(), is("private pem"));
assertThat(encryption.getPublicKeyPem(), is("public pem"));
assertThat(sp.getPrincipalNameMapping().getPolicy(), is("FROM_ATTRIBUTE"));
assertThat(sp.getPrincipalNameMapping().getAttributeName(), is("attribute"));
assertThat(sp.getRoleAttributes(), hasSize(1));
assertThat(sp.getRoleAttributes(), Matchers.contains("member"));
IDP idp = sp.getIdp();
assertThat(idp.getEntityID(), is("idp"));
assertThat(idp.getSignatureAlgorithm(), is("RSA_SHA256"));
assertThat(idp.getSignatureCanonicalizationMethod(), is("canon"));
assertThat(idp.getSingleSignOnService().isSignRequest(), is(true));
assertThat(idp.getSingleSignOnService().isValidateResponseSignature(), is(true));
assertThat(idp.getSingleSignOnService().getRequestBinding(), is("POST"));
assertThat(idp.getSingleSignOnService().getBindingUrl(), is("url"));
assertThat(idp.getSingleLogoutService().isSignRequest(), is(false));
assertThat(idp.getSingleLogoutService().isSignResponse(), is(true));
assertThat(idp.getSingleLogoutService().isValidateRequestSignature(), is(true));
assertThat(idp.getSingleLogoutService().isValidateResponseSignature(), is(true));
assertThat(idp.getSingleLogoutService().getRequestBinding(), is("REDIRECT"));
assertThat(idp.getSingleLogoutService().getResponseBinding(), is("POST"));
assertThat(idp.getSingleLogoutService().getPostBindingUrl(), is("posturl"));
assertThat(idp.getSingleLogoutService().getRedirectBindingUrl(), is("redirecturl"));
assertThat(idp.getKeys(), hasSize(1));
assertThat(idp.getKeys().get(0).isSigning(), is(true));
assertThat(idp.getKeys().get(0).getCertificatePem(), is("cert pem"));
}
private <T> T parseKeycloakSamlAdapterConfig(String fileName, Class<T> targetClass) throws ParsingException, IOException {
try (InputStream is = getClass().getResourceAsStream(fileName)) {
KeycloakSamlAdapterParser parser = KeycloakSamlAdapterParser.getInstance();
return targetClass.cast(parser.parse(is));
}
}
@Test
public void testXmlParserMultipleSigningKeys() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-multiple-signing-keys.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), hasSize(1));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(idp.getKeys(), hasSize(4));
for (int i = 0; i < 4; i++) {
Key key = idp.getKeys().get(i);
assertThat(key.isSigning(), is(true));
assertThat(idp.getKeys().get(i).getCertificatePem(), is("cert pem " + i));
}
}
@Test
public void testXmlParserHttpClientSettings() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-wth-http-client-settings.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), hasSize(1));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(idp.getHttpClientConfig(), notNullValue());
assertThat(idp.getHttpClientConfig().getClientKeystore(), is("ks"));
assertThat(idp.getHttpClientConfig().getClientKeystorePassword(), is("<PASSWORD>"));
assertThat(idp.getHttpClientConfig().getProxyUrl(), is("pu"));
assertThat(idp.getHttpClientConfig().getTruststore(), is("ts"));
assertThat(idp.getHttpClientConfig().getTruststorePassword(), is("<PASSWORD>"));
assertThat(idp.getHttpClientConfig().getConnectionPoolSize(), is(42));
assertThat(idp.getHttpClientConfig().isAllowAnyHostname(), is(true));
assertThat(idp.getHttpClientConfig().isDisableTrustManager(), is(true));
assertThat(idp.getHttpClientConfig().getSocketTimeout(), is(6000L));
assertThat(idp.getHttpClientConfig().getConnectionTimeout(), is(7000L));
assertThat(idp.getHttpClientConfig().getConnectionTTL(), is(200L));
}
@Test
public void testXmlParserSystemPropertiesNoPropertiesSet() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-properties.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(sp.getEntityID(), is("sp"));
assertThat(sp.getSslPolicy(), is("${keycloak-saml-properties.sslPolicy}"));
assertThat(idp.isSignaturesRequired(), is(false));
assertThat(idp.getSingleLogoutService().isSignRequest(), is(true));
assertThat(idp.getSingleLogoutService().isSignResponse(), is(false));
assertThat(idp.getSingleSignOnService().isSignRequest(), is(true));
assertThat(idp.getSingleSignOnService().isValidateResponseSignature(), is(true));
// These should take default from IDP.signaturesRequired
assertThat(idp.getSingleLogoutService().isValidateRequestSignature(), is(false));
assertThat(idp.getSingleLogoutService().isValidateResponseSignature(), is(false));
assertThat(idp.getSingleSignOnService().isValidateAssertionSignature(), is(false));
}
@Test
public void testXmlParserSystemPropertiesWithPropertiesSet() throws Exception {
try {
System.setProperty("keycloak-saml-properties.entityID", "meid");
System.setProperty("keycloak-saml-properties.sslPolicy", "INTERNAL");
System.setProperty("keycloak-saml-properties.signaturesRequired", "true");
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-properties.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(sp.getEntityID(), is("meid"));
assertThat(sp.getSslPolicy(), is("INTERNAL"));
assertThat(idp.isSignaturesRequired(), is(true));
assertThat(idp.getSingleLogoutService().isSignRequest(), is(true));
assertThat(idp.getSingleLogoutService().isSignResponse(), is(false));
assertThat(idp.getSingleSignOnService().isSignRequest(), is(true));
assertThat(idp.getSingleSignOnService().isValidateResponseSignature(), is(true));
// These should take default from IDP.signaturesRequired
assertThat(idp.getSingleLogoutService().isValidateRequestSignature(), is(true));
assertThat(idp.getSingleLogoutService().isValidateResponseSignature(), is(true));
// This is false by default
assertThat(idp.getSingleSignOnService().isValidateAssertionSignature(), is(false));
} finally {
System.clearProperty("keycloak-saml-properties.entityID");
System.clearProperty("keycloak-saml-properties.sslPolicy");
System.clearProperty("keycloak-saml-properties.signaturesRequired");
}
}
@Test
public void testMetadataUrl() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-with-metadata-url.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(idp.getMetadataUrl(), is("https:///example.com/metadata.xml"));
}
@Test
public void testAllowedClockSkewDefaultUnit() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-with-allowed-clock-skew-default-unit.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(idp.getAllowedClockSkew(), is(3));
assertThat(idp.getAllowedClockSkewUnit(), is(TimeUnit.SECONDS));
}
@Test
public void testAllowedClockSkewWithUnit() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-with-allowed-clock-skew-with-unit.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
IDP idp = sp.getIdp();
assertThat(idp.getAllowedClockSkew(), is(3500));
assertThat(idp.getAllowedClockSkewUnit(), is (TimeUnit.MILLISECONDS));
}
@Test
public void testParseRoleMappingsProvider() throws Exception {
KeycloakSamlAdapter config = parseKeycloakSamlAdapterConfig("keycloak-saml-with-role-mappings-provider.xml", KeycloakSamlAdapter.class);
assertThat(config, notNullValue());
assertThat(config.getSps(), Matchers.contains(instanceOf(SP.class)));
SP sp = config.getSps().get(0);
SP.RoleMappingsProviderConfig roleMapperConfig = sp.getRoleMappingsProviderConfig();
assertThat(roleMapperConfig, notNullValue());
assertThat(roleMapperConfig.getId(), is("properties-based-role-mapper"));
Properties providerConfig = roleMapperConfig.getConfiguration();
assertThat(providerConfig.size(), is(2));
assertThat(providerConfig.containsKey("properties.resource.location"), is(true));
assertThat(providerConfig.getProperty("properties.resource.location"), is("role-mappings.properties"));
assertThat(providerConfig.containsKey("another.property"), is(true));
assertThat(providerConfig.getProperty("another.property"), is("another.value"));
}
}
| 6,035 |
1,099 | /*
* This source file is part of RmlUi, the HTML/CSS Interface Middleware
*
* For the latest information, see http://github.com/mikke89/RmlUi
*
* Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd
* Copyright (c) 2019 The RmlUi Team, and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "XmlNodeHandlers.h"
#include <RmlUi/Core/Types.h>
#include <RmlUi/Core/XMLNodeHandler.h>
#include <RmlUi/Core/XMLParser.h>
using namespace Rml;
XMLNodeHandlerMeta::XMLNodeHandlerMeta()
{}
XMLNodeHandlerMeta::~XMLNodeHandlerMeta()
{}
/// Called when a new element start is opened
Element* XMLNodeHandlerMeta::ElementStart(XMLParser* /*parser*/, const String& /*name*/, const XMLAttributes& attributes)
{
MetaItem item;
auto it_name = attributes.find("name");
if (it_name != attributes.end())
item.name = it_name->second.Get<String>();
auto it_content = attributes.find("content");
if (it_content != attributes.end())
item.content = it_content->second.Get<String>();
if (!item.name.empty() && !item.content.empty())
meta_list.push_back(std::move(item));
return nullptr;
}
/// Called when an element is closed
bool XMLNodeHandlerMeta::ElementEnd(XMLParser* /*parser*/, const String& /*name*/)
{
return true;
}
/// Called for element data
bool XMLNodeHandlerMeta::ElementData(XMLParser* /*parser*/, const String& /*data*/, XMLDataType /*type*/)
{
return true;
}
XMLNodeHandlerLink::XMLNodeHandlerLink()
{
node_handler_head = XMLParser::GetNodeHandler("head");
RMLUI_ASSERT(node_handler_head);
}
XMLNodeHandlerLink::~XMLNodeHandlerLink() {}
Element* XMLNodeHandlerLink::ElementStart(XMLParser* parser, const String& name, const XMLAttributes& attributes)
{
RMLUI_ASSERT(name == "link");
const String type = StringUtilities::ToLower(Get<String>(attributes, "type", ""));
const String rel = Get<String>(attributes, "rel", "");
const String href = Get<String>(attributes, "href", "");
if (!type.empty() && !href.empty())
{
// Pass it on to the head handler if it's a type it handles.
if (type == "text/rcss" || type == "text/css" || type == "text/template")
{
return node_handler_head->ElementStart(parser, name, attributes);
}
}
link_list.push_back(LinkItem{ rel, href });
return nullptr;
}
bool XMLNodeHandlerLink::ElementEnd(XMLParser* parser, const String& name)
{
return node_handler_head->ElementEnd(parser, name);
}
bool XMLNodeHandlerLink::ElementData(XMLParser* parser, const String& data, XMLDataType type)
{
return node_handler_head->ElementData(parser, data, type);
}
| 1,115 |
845 | {
"mixin": true,
"group": "users",
"name": "users.id",
"desc": "This method returns the ID of a team user.",
"args": {
"user": {
"required": true,
"example": "#general",
"desc": "User to get ID for, prefixed with @.",
"type": "user"
}
},
"errors": {
"user_not_found": "Value passed for user was invalid.",
"not_authed": "No authentication token provided.",
"invalid_auth": "Invalid authentication token.",
"account_inactive": "Authentication token is for a deleted user or team."
}
}
| 205 |
341 | <gh_stars>100-1000
"""Custom TF 'ops' as meant in the TensorFlow definition of ops."""
import numpy as np
import tensorflow as tf
from utils import io_utils
from tensorflow.python.util import nest
def dynamic_sampled_softmax_loss(labels, logits, output_projection, vocab_size,
from_scratch=False, num_samples=512, name=None):
"""Sampled softmax loss function able to accept 3D Tensors as input,
as opposed to the official TensorFlow support for <= 2D. This is
dynamic because it can be applied across variable-length sequences,
which are unspecified at initialization with size 'None'.
Args:
labels: 2D integer tensor of shape [batch_size, None] containing
the word ID labels for each individual rnn state from logits.
logits: 3D float tensor of shape [batch_size, None, state_size] as
ouput by a DynamicDecoder instance.
from_scratch: (bool) Whether to use the version I wrote from scratch, or to use
the version I wrote that applies map_fn(sampled_softmax) across timeslices, which
is probably less efficient. (Currently testing)
num
Returns:
loss as a scalar Tensor, computed as the mean over all batches and sequences.
"""
if from_scratch:
return _dynamic_sampled_from_scratch(labels, logits, output_projection, vocab_size,
num_samples=num_samples, name=name)
else:
return _dynamic_sampled_map(labels, logits, output_projection, vocab_size,
num_samples=num_samples, name=name)
def _dynamic_sampled_map(labels, logits, output_projection, vocab_size,
num_samples=512, name=None):
"""Sampled softmax loss function able to accept 3D Tensors as input,
as opposed to the official TensorFlow support for <= 2D. This is
dynamic because it can be applied across variable-length sequences,
which are unspecified at initialization with size 'None'.
Args:
labels: 2D integer tensor of shape [batch_size, None] containing
the word ID labels for each individual rnn state from logits.
logits: 3D float tensor of shape [batch_size, None, state_size] as
ouput by a DynamicDecoder instance.
Returns:
loss as a scalar Tensor, computed as the mean over all batches and sequences.
"""
with tf.name_scope(name, "dynamic_sampled_softmax_loss", [labels, logits, output_projection]):
seq_len = tf.shape(logits)[1]
st_size = tf.shape(logits)[2]
time_major_outputs = tf.reshape(logits, [seq_len, -1, st_size])
time_major_labels = tf.reshape(labels, [seq_len, -1])
# Reshape is apparently faster (dynamic) than transpose.
w_t = tf.reshape(output_projection[0], [vocab_size, -1])
b = output_projection[1]
def sampled_loss(elem):
logits, lab = elem
lab = tf.reshape(lab, [-1, 1])
# TODO: Figure out how this accurately gets loss without requiring weights,
# like sparse_softmax_cross_entropy requires.
return tf.reduce_mean(
tf.nn.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=lab,
inputs=logits,
num_sampled=num_samples,
num_classes=vocab_size,
partition_strategy='div'))
batch_losses = tf.map_fn(sampled_loss,
(time_major_outputs, time_major_labels),
dtype=tf.float32)
loss = tf.reduce_mean(batch_losses)
return loss
def _dynamic_sampled_from_scratch(labels, logits, output_projection, vocab_size,
num_samples, name=None):
"""Note: I closely follow the notation from Tensorflow's Candidate Sampling reference.
- Link: https://www.tensorflow.org/extras/candidate_sampling.pdf
Args:
output_projection: (tuple) returned by any DynamicDecoder.get_projections_tensors()
- output_projection[0] == w tensor. [state_size, vocab_size]
- output_projection[0] == b tensor. [vocab_size]
labels: 2D Integer tensor. [batch_size, None]
logits: 3D float Tensor [batch_size, None, state_size].
- In this project, usually is the decoder batch output sequence (NOT projected).
num_samples: number of classes out of vocab_size possible to use.
vocab_size: total number of classes.
"""
with tf.name_scope(name, "dynamic_sampled_from_scratch", [labels, logits, output_projection]):
batch_size, seq_len, state_size = tf.unstack(tf.shape(logits))
time_major_outputs = tf.reshape(logits, [seq_len, batch_size, state_size])
time_major_labels = tf.reshape(labels, [seq_len, batch_size])
weights = tf.transpose(output_projection[0])
biases = output_projection[1]
def sampled_loss_single_timestep(args):
"""
Args: 2-tuple (because map_fn below)
targets: 1D tensor (sighs loudly) of shape [batch_size]
logits: 2D tensor (sighs intensify) of shape [batch_size, state_size].
"""
logits, targets = args
with tf.name_scope("compute_sampled_logits", [weights, biases, logits, targets]):
targets = tf.cast(targets, tf.int64)
sampled_values = tf.nn.log_uniform_candidate_sampler(
true_classes=tf.expand_dims(targets, -1),
num_true=1,
num_sampled=num_samples,
unique=True,
range_max=vocab_size)
S, Q_true, Q_samp = (tf.stop_gradient(s) for s in sampled_values)
# Get concatenated 1D tensor of shape [batch_size * None + num_samples],
all_ids = tf.concat([targets, S], 0)
_W = tf.nn.embedding_lookup(weights, all_ids, partition_strategy='div')
_b = tf.nn.embedding_lookup(biases, all_ids)
W = {'targets': tf.slice(_W, begin=[0, 0], size=[batch_size, state_size]),
'samples': tf.slice(_W, begin=[batch_size, 0], size=[num_samples, state_size])}
b = {'targets': tf.slice(_b, begin=[0], size=[batch_size]),
'samples': tf.slice(_b, begin=[batch_size], size=[num_samples])}
true_logits = tf.reduce_sum(tf.multiply(logits, W['targets']), 1)
true_logits += b['targets'] - tf.log(Q_true)
sampled_logits = tf.matmul(logits, W['samples'], transpose_b=True)
sampled_logits += b['samples'] - tf.log(Q_samp)
F = tf.concat([true_logits, sampled_logits], 1)
def fn(s_i): return tf.where(targets == s_i, tf.ones_like(targets), tf.zeros_like(targets))
sample_labels = tf.transpose(tf.map_fn(fn, S))
out_targets = tf.concat([tf.ones_like(true_logits, dtype=tf.int64), sample_labels], 1)
return tf.losses.softmax_cross_entropy(out_targets, logits=F)
return tf.reduce_mean(tf.map_fn(sampled_loss_single_timestep,
(time_major_outputs, time_major_labels),
dtype=tf.float32))
def cross_entropy_sequence_loss(logits, labels, weights):
"""My version of various tensorflow sequence loss implementations I've
seen. They all seem to do the basic operations below, but in a much more
roundabout way. This version is able to be simpler because it assumes that
the inputs are coming from a chatbot.Model subclass.
"""
with tf.name_scope('cross_entropy_sequence_loss'):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
# We can get the sequence lengths simply by casting all PAD labels
# with 0 and everything else with 1.
weights = tf.to_float(weights)
losses = tf.multiply(losses, weights)
return tf.reduce_sum(losses) / tf.reduce_sum(weights)
def dot_prod(x, y):
return tf.reduce_sum(tf.multiply(x, y))
def bahdanau_score(attention_dim, h_j, s_i):
state_size = tf.get_shape(h_j)[0]
h_proj = tf.get_variable('W_1',
[attention_dim, state_size],
dtype=tf.float32)
s_proj = tf.get_variable('W_2',
[attention_dim, state_size],
dtype=tf.float32)
v = tf.get_variable('v',
[attention_dim, state_size],
dtype=tf.float32)
score = dot_prod(v, tf.tanh(h_proj + s_proj))
return score
def luong_score(attention_dim, h_j, s_i):
h_proj = tf.get_variable('W_1',
[attention_dim, tf.get_shape(h_j)[0]],
dtype=tf.float32)
s_proj = tf.get_variable('W_2',
[attention_dim, tf.get_shape(s_i)[0]],
dtype=tf.float32)
score = dot_prod(h_proj, s_proj)
return score
def linear_map(args, output_size, biases=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Basically, you pass in a bunch of vectors (ok you got me, 2D tensors because
batch dimensions) that you want added together but need their dimensions
to match. This function has you covered.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
biases: tensor of shape [output_size] added to all in batch if not None.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
"""
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [tf.shape(a)[1] for a in args]
for shape in shapes:
total_arg_size = tf.add(total_arg_size, shape)
dtype = args[0].dtype
# Now the computation.
scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable('weights',
[total_arg_size, output_size],
dtype=dtype)
if len(args) == 1:
res = tf.matmul(args[0], weights)
else:
res = tf.matmul(tf.concat(args, 1), weights)
return res if not biases else tf.nn.bias_add(res, biases)
| 5,080 |
1,133 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_y_reverse(scale):
"""
Reverse y axis
Examples
--------
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_y_reverse()
"""
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_y_reverse = True
return gg
class scale_x_reverse(scale):
"""
Reverse x axis
Examples
--------
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_x_reverse()
"""
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_x_reverse = True
return gg
| 325 |
8,315 | <filename>epoxy-processortest/src/test/resources/GridSpanCountView.java
package com.airbnb.epoxy;
import android.content.Context;
import android.view.View;
@ModelView(defaultLayout = 1, fullSpan = false)
public class GridSpanCountView extends View {
public GridSpanCountView(Context context) {
super(context);
}
@ModelProp
public void setClickListener(String title) {
}
} | 124 |
1,261 | {
"name": "curtainsjs",
"version": "8.1.3",
"description": "<h2>What is it ?</h2>\r <p>\r Shaders are the next front-end web developpment big thing, with the ability to create very powerful 3D interactions and animations. A lot of very good javascript libraries already handle WebGL but with most of them it's kind of a headache to position your meshes relative to the DOM elements of your web page.\r </p>\r <p>\r curtains.js was created with just that issue in mind. It is a small vanilla WebGL javascript library that converts HTML elements containing images and videos into 3D WebGL textured planes, allowing you to animate them via shaders.<br />\r You can define each plane size and position via CSS, which makes it super easy to add WebGL responsive planes all over your pages.\r </p>\r <p>\r <a href=\"https://www.curtainsjs.com/\" title=\"Documentation\" target=\"_blank\">Documentation</a> - <a href=\"https://github.com/martinlaxenaire/curtainsjs\" title=\"GitHub\" target=\"_blank\">GitHub</a> </p>\r ",
"main": "src/index.mjs",
"directories": {
"example": "examples",
"documentation": "documentation"
},
"keywords": [
"webgl"
],
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"url": "https://twitter.com/webdesign_ml"
},
"homepage": "https://github.com/martinlaxenaire/curtainsjs",
"bugs": {
"url": "https://github.com/martinlaxenaire/curtainsjs/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/martinlaxenaire/curtainsjs.git"
},
"license": "MIT",
"devDependencies": {
"@babel/core": "^7.15.5",
"@babel/preset-env": "^7.15.6",
"@rollup/plugin-babel": "^5.3.0",
"rollup": "^2.56.3",
"rollup-plugin-terser": "^6.1.0"
},
"scripts": {
"build": "rollup -c",
"prepare": "npm run build"
}
}
| 686 |
2,881 | package com.salesmanager.core.business.repositories.user;
import com.salesmanager.core.business.exception.ServiceException;
import com.salesmanager.core.model.common.Criteria;
import com.salesmanager.core.model.common.GenericEntityList;
import com.salesmanager.core.model.user.User;
public interface UserRepositoryCustom {
GenericEntityList<User> listByCriteria(Criteria criteria) throws ServiceException;
}
| 125 |
1,510 | <filename>android/ReactAndroid/src/main/jni/react/jni/NativeDeltaClient.h
// Copyright (c) Facebook, Inc. and its affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <memory>
#include <cxxreact/JSDeltaBundleClient.h>
#include <fb/fbjni.h>
#include <fb/fbjni/Hybrid.h>
#include <fb/fbjni/ReadableByteChannel.h>
namespace facebook {
namespace react {
class NativeDeltaClient : public jni::HybridClass<NativeDeltaClient> {
public:
static constexpr auto kJavaDescriptor =
"Lcom/facebook/react/bridge/NativeDeltaClient;";
static jni::local_ref<jhybriddata> initHybrid(jni::alias_ref<jclass>);
static void registerNatives();
~NativeDeltaClient() override = default;
std::shared_ptr<const JSDeltaBundleClient> getDeltaClient() {
return deltaClient_;
}
private:
friend HybridBase;
void jniProcessDelta(jni::alias_ref<jni::JReadableByteChannel> delta);
void jniReset();
const std::shared_ptr<JSDeltaBundleClient> deltaClient_ =
std::make_shared<JSDeltaBundleClient>();
};
} // namespace react
} // namespace facebook
| 390 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.data.tables.implementation;
import com.azure.core.util.logging.ClientLogger;
/**
* Type representing a storage connection string.
*/
public final class StorageConnectionString {
/**
* The storage account name.
*/
private final String accountName;
/**
* The settings for storage authentication.
*/
private final StorageAuthenticationSettings storageAuthSettings;
/**
* The blob endpoint.
*/
private final StorageEndpoint blobEndpoint;
/**
* The file endpoint.
*/
private final StorageEndpoint fileEndpoint;
/**
* The queue endpoint.
*/
private final StorageEndpoint queueEndpoint;
/**
* The table endpoint.
*/
private final StorageEndpoint tableEndpoint;
/**
* @return The storage account name.
*/
public String getAccountName() {
return this.accountName;
}
/**
* @return The {@link StorageAuthenticationSettings} associated with this connection string.
*/
public StorageAuthenticationSettings getStorageAuthSettings() {
return this.storageAuthSettings;
}
/**
* Get the {@link StorageEndpoint endpoint} for the storage blob service.
*
* @return The {@link StorageEndpoint blob endpoint} associated with this connection string.
*/
public StorageEndpoint getBlobEndpoint() {
return this.blobEndpoint;
}
/**
* Get the {@link StorageEndpoint endpoint} for the storage file service.
*
* @return The {@link StorageEndpoint file endpoint} associated with this connection string.
*/
public StorageEndpoint getFileEndpoint() {
return this.fileEndpoint;
}
/**
* Get the {@link StorageEndpoint endpoint} for the storage queue service.
*
* @return The {@link StorageEndpoint queue endpoint} associated with this connection string.
*/
public StorageEndpoint getQueueEndpoint() {
return this.queueEndpoint;
}
/**
* Get the {@link StorageEndpoint endpoint} for the storage table service.
*
* @return The {@link StorageEndpoint table endpoint} associated with this connection string.
*/
public StorageEndpoint getTableEndpoint() {
return this.tableEndpoint;
}
/**
* Create a {@link StorageConnectionString} from the given connection string.
*
* @param connectionString The connection string.
* @param logger The {@link ClientLogger}.
*
* @return A {@link StorageConnectionString} based on the provided connection string.
*/
public static StorageConnectionString create(final String connectionString, final ClientLogger logger) {
if (connectionString == null || connectionString.length() == 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid connection string."));
}
ConnectionSettings settings = ConnectionSettings.fromConnectionString(connectionString, logger);
StorageConnectionString emulatorConnString = StorageEmulatorConnectionString.tryCreate(settings, logger);
if (emulatorConnString != null) {
return emulatorConnString;
}
StorageConnectionString serviceConnString = StorageServiceConnectionString.tryCreate(settings, logger);
if (serviceConnString != null) {
return serviceConnString;
}
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid connection string."));
}
/**
* Creates a {@link StorageConnectionString}.
*
* @param storageAuthSettings The storage authentication settings.
* @param blobEndpoint The blob service endpoint.
* @param queueEndpoint The queue service endpoint.
* @param tableEndpoint The table service endpoint.
* @param fileEndpoint The file service endpoint.
* @param accountName The storage account name.
*/
StorageConnectionString(final StorageAuthenticationSettings storageAuthSettings, final StorageEndpoint blobEndpoint,
final StorageEndpoint queueEndpoint, final StorageEndpoint tableEndpoint,
final StorageEndpoint fileEndpoint, final String accountName) {
this.storageAuthSettings = storageAuthSettings;
this.blobEndpoint = blobEndpoint;
this.fileEndpoint = fileEndpoint;
this.queueEndpoint = queueEndpoint;
this.tableEndpoint = tableEndpoint;
this.accountName = accountName;
}
}
| 1,532 |
852 | import FWCore.ParameterSet.Config as cms
candidateVertexMerger = cms.EDProducer("CandidateVertexMerger",
secondaryVertices = cms.InputTag("inclusiveCandidateVertexFinder"),
maxFraction = cms.double(0.7),
minSignificance = cms.double(2)
)
| 103 |
1,002 | // Copyright (c) Microsoft Corporation. All rights reserved.
//
// Licensed under the MIT License. See LICENSE.txt in the project root for license information.
#include "pch.h"
#include "CanvasCachedGeometry.h"
using namespace ABI::Microsoft::Graphics::Canvas::Geometry;
using namespace ABI::Microsoft::Graphics::Canvas;
IFACEMETHODIMP CanvasCachedGeometryFactory::CreateFill(
ICanvasGeometry* geometry,
ICanvasCachedGeometry** cachedGeometry)
{
return CreateFillWithFlatteningTolerance(
geometry,
D2D1_DEFAULT_FLATTENING_TOLERANCE,
cachedGeometry);
}
IFACEMETHODIMP CanvasCachedGeometryFactory::CreateFillWithFlatteningTolerance(
ICanvasGeometry* geometry,
float flatteningTolerance,
ICanvasCachedGeometry** cachedGeometry)
{
return ExceptionBoundary(
[&]
{
CheckInPointer(geometry);
CheckAndClearOutPointer(cachedGeometry);
ComPtr<ICanvasDevice> device;
ThrowIfFailed(geometry->get_Device(&device));
auto newCanvasCachedGeometry = CanvasCachedGeometry::CreateNew(device.Get(), geometry, flatteningTolerance);
ThrowIfFailed(newCanvasCachedGeometry.CopyTo(cachedGeometry));
});
}
IFACEMETHODIMP CanvasCachedGeometryFactory::CreateStroke(
ICanvasGeometry* geometry,
float strokeWidth,
ICanvasCachedGeometry** cachedGeometry)
{
return ExceptionBoundary(
[&]
{
CreateStrokeImpl(geometry, strokeWidth, nullptr, D2D1_DEFAULT_FLATTENING_TOLERANCE, cachedGeometry);
});
}
IFACEMETHODIMP CanvasCachedGeometryFactory::CreateStrokeWithStrokeStyle(
ICanvasGeometry* geometry,
float strokeWidth,
ICanvasStrokeStyle* strokeStyle,
ICanvasCachedGeometry** cachedGeometry)
{
return ExceptionBoundary(
[&]
{
CheckInPointer(strokeStyle);
CreateStrokeImpl(geometry, strokeWidth, strokeStyle, D2D1_DEFAULT_FLATTENING_TOLERANCE, cachedGeometry);
});
}
IFACEMETHODIMP CanvasCachedGeometryFactory::CreateStrokeWithStrokeStyleAndFlatteningTolerance(
ICanvasGeometry* geometry,
float strokeWidth,
ICanvasStrokeStyle* strokeStyle,
float flatteningTolerance,
ICanvasCachedGeometry** cachedGeometry)
{
return ExceptionBoundary(
[&]
{
CheckInPointer(strokeStyle);
CreateStrokeImpl(geometry, strokeWidth, strokeStyle, flatteningTolerance, cachedGeometry);
});
}
void CanvasCachedGeometryFactory::CreateStrokeImpl(
ICanvasGeometry* geometry,
float strokeWidth,
ICanvasStrokeStyle* strokeStyle,
float flatteningTolerance,
ICanvasCachedGeometry** cachedGeometry)
{
CheckInPointer(geometry);
CheckAndClearOutPointer(cachedGeometry);
ComPtr<ICanvasDevice> device;
ThrowIfFailed(geometry->get_Device(&device));
auto newCanvasCachedGeometry = CanvasCachedGeometry::CreateNew(
device.Get(),
geometry,
strokeWidth,
strokeStyle,
flatteningTolerance);
ThrowIfFailed(newCanvasCachedGeometry.CopyTo(cachedGeometry));
}
CanvasCachedGeometry::CanvasCachedGeometry(
ICanvasDevice* device,
ID2D1GeometryRealization* d2dGeometryRealization)
: ResourceWrapper(d2dGeometryRealization)
, m_canvasDevice(device)
{
}
IFACEMETHODIMP CanvasCachedGeometry::Close()
{
m_canvasDevice.Close();
return ResourceWrapper::Close();
}
IFACEMETHODIMP CanvasCachedGeometry::get_Device(ICanvasDevice** device)
{
return ExceptionBoundary(
[&]
{
CheckAndClearOutPointer(device);
auto& canvasDevice = m_canvasDevice.EnsureNotClosed();
ThrowIfFailed(canvasDevice.CopyTo(device));
});
}
// Cached fills
ComPtr<CanvasCachedGeometry> CanvasCachedGeometry::CreateNew(
ICanvasDevice* device,
ICanvasGeometry* geometry,
float flatteningTolerance)
{
CheckInPointer(device);
CheckInPointer(geometry);
auto deviceInternal = As<ICanvasDeviceInternal>(device);
auto d2dGeometry = GetWrappedResource<ID2D1Geometry>(geometry);
auto d2dGeometryRealization = deviceInternal->CreateFilledGeometryRealization(
d2dGeometry.Get(),
flatteningTolerance);
auto canvasCachedGeometry = Make<CanvasCachedGeometry>(device, d2dGeometryRealization.Get());
CheckMakeResult(canvasCachedGeometry);
return canvasCachedGeometry;
}
// Cached strokes
ComPtr<CanvasCachedGeometry> CanvasCachedGeometry::CreateNew(
ICanvasDevice* device,
ICanvasGeometry* geometry,
float strokeWidth,
ICanvasStrokeStyle* strokeStyle,
float flatteningTolerance)
{
CheckInPointer(device);
CheckInPointer(geometry);
auto deviceInternal = As<ICanvasDeviceInternal>(device);
auto d2dGeometry = GetWrappedResource<ID2D1Geometry>(geometry);
auto d2dGeometryRealization = deviceInternal->CreateStrokedGeometryRealization(
d2dGeometry.Get(),
strokeWidth,
MaybeGetStrokeStyleResource(d2dGeometry.Get(), strokeStyle).Get(),
flatteningTolerance);
auto canvasCachedGeometry = Make<CanvasCachedGeometry>(device, d2dGeometryRealization.Get());
CheckMakeResult(canvasCachedGeometry);
return canvasCachedGeometry;
}
ActivatableClassWithFactory(CanvasCachedGeometry, CanvasCachedGeometryFactory);
| 2,089 |
1,338 | <filename>src/apps/processcontroller/TeamBarMenuItem.h
/*
* Copyright 2000, <NAME>. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _TEAM_BAR_MENU_ITEM_H_
#define _TEAM_BAR_MENU_ITEM_H_
#include <MenuItem.h>
class BBitmap;
class TeamBarMenuItem : public BMenuItem {
public:
TeamBarMenuItem(BMenu* menu, BMessage* kill_team, team_id team,
BBitmap* icon, bool deleteIcon);
virtual ~TeamBarMenuItem();
virtual void DrawContent();
virtual void GetContentSize(float* width, float* height);
void DrawIcon();
void DrawBar(bool force);
void BarUpdate();
void Init();
void Reset(char* name, team_id team, BBitmap* icon, bool deleteIcon);
double fUser;
double fKernel;
private:
team_id fTeamID;
BBitmap* fIcon;
team_usage_info fTeamUsageInfo;
bigtime_t fLastTime;
float fGrenze1;
float fGrenze2;
bool fDeleteIcon;
};
#endif // _TEAM_BAR_MENU_ITEM_H_
| 393 |
370 | package com.netflix.ndbench.core;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.config.IConfiguration;
import org.apache.log4j.Level;
import org.junit.Rule;
import org.junit.Test;
import org.libex.test.TestBase;
import org.libex.test.logging.log4j.Log4jCapturer;
import org.libex.test.logging.log4j.Log4jCapturer.LogAssertion;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RPSCountTest extends TestBase {
@Rule
public Log4jCapturer logCapturer = Log4jCapturer.builder().build();
@Test
public void testMessageLogged() {
// Note: readSuccess+readFail will be divided by stats update frequency of 10,
// and similarly for writeSuccess+writeFail
//
verifyLoggerActivity( // verify no logging if expected rate < observed rate
"Observed Read RPS",
false,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
100/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate == observed rate
"Observed Read RPS",
false,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
90/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Read RPS",
true,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
89/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Read RPS",
false,
getRPSCount(
false, true, 9/*readRate*/, 1/*writeRate*/,
89/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate < observed rate
"Observed Write RPS",
false,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
100/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate == observed rate
"Observed Write RPS",
false,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
0/*readSuccess*/, 0/*readFail*/,
90/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Write RPS",
true,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
89/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Write RPS",
false,
getRPSCount(
true, false, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
89/*writeSuccess*/, 0/*writeFail*/));
}
private void verifyLoggerActivity(String fragmentOfExpectedLoggedMsg,
boolean shouldBeLogged,
RPSCount counter) {
logCapturer.clearLog();
counter.updateRPS();
logCapturer.assertThat(LogAssertion.newLogAssertion()
.withLevel(Level.DEBUG).isNotLogged());
LogAssertion assertionTmp = LogAssertion.newLogAssertion()
.withLevel(Level.WARN).withRenderedMessage(fragmentOfExpectedLoggedMsg);
LogAssertion assertion;
if (shouldBeLogged) {
assertion = assertionTmp.isLogged();
} else {
assertion = assertionTmp.isNotLogged();
}
logCapturer.assertThat(assertion);
}
private RPSCount getRPSCount(boolean readsStarted,
boolean writesStarted,
double readRate,
double writeRate,
long readSuccess,
long readFailure,
long writeSuccess,
long writeFailure) {
IConfiguration config = mock(IConfiguration.class);
when(config.getStatsUpdateFreqSeconds()).thenReturn(10);
when(config.isReadEnabled()).thenReturn(true);
when(config.isWriteEnabled()).thenReturn(true);
NdBenchMonitor monitor = mock(NdBenchMonitor.class);
when(monitor.getReadSuccess()).thenReturn(readSuccess);
when(monitor.getReadFailure()).thenReturn(readFailure);
when(monitor.getWriteSuccess()).thenReturn(writeSuccess);
when(monitor.getWriteFailure()).thenReturn(writeFailure);
RPSCount counter =
new RPSCount(
new AtomicBoolean(readsStarted),
new AtomicBoolean(writesStarted),
new AtomicReference(RateLimiter.create(readRate)),
new AtomicReference(RateLimiter.create(writeRate)),
config,
monitor);
return counter;
}
}
| 3,293 |
332 | <filename>Lib/getpass.py
"""Utilities to get a password and/or the current user name.
getpass(prompt) - prompt for a password, with echo turned off
getuser() - get the user name from the environment or password database
On Windows, the msvcrt module will be used.
On the Mac EasyDialogs.AskPassword is used, if available.
"""
# From CPython 2.5.1 with a fix to _raw_input (see
# http://bugs.python.org/issue1703 )
# Authors: <NAME> (original)
# <NAME> (Windows support and cleanup)
import os
import sys
__all__ = ["getpass","getuser"]
def jython_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
The prompt is written on stream, by default stdout.
Restore terminal settings at end.
"""
try:
reader = sys._jy_console.reader
except:
return default_getpass(prompt)
if stream is not None:
stream.write(prompt)
prompt = ''
return reader.readLine(prompt, '\0').encode(sys._jy_console.encoding)
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
The prompt is written on stream, by default stdout.
Restore terminal settings at end.
"""
if stream is None:
stream = sys.stdout
try:
fd = sys.stdin.fileno()
except:
return default_getpass(prompt)
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
passwd = _raw_input(prompt, stream)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return default_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while True:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
def default_getpass(prompt='Password: ', stream=None):
print >>sys.stderr, "Warning: Problem with getpass. Passwords may be echoed."
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if stream is None:
stream = sys.stdout
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
line = sys.stdin.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
if os.name == 'java':
getpass = jython_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
else:
getpass = unix_getpass
| 1,648 |
469 | <reponame>chengkunxf/MeInGame
import logging
import os
import math
from time import time
from glob import glob
import cv2
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.renderer import (DirectionalLights, MeshRasterizer, MeshRenderer,
OpenGLPerspectiveCameras, RasterizationSettings,
TexturedSoftPhongShader, look_at_view_transform)
from pytorch3d.structures import Meshes, Textures
from pytorch3d.transforms import Transform3d
from skimage import io
import utils
# from lib import meshio
from lib.gaussian import gaussian_blur
from lib.loss import AdversarialLoss, PerceptualLoss, StyleLoss
from networks import Generator, ImageDiscriminator, UVMapDiscriminator
class InpaintingModel(nn.Module):
def __init__(self, config, device, rot_order, debug=False):
super(InpaintingModel, self).__init__()
self.debug = debug
self.rot_order = rot_order
self.device = device
# if torch.cuda.device_count() > 1:
# self.device = torch.device('cuda:1')
# else:
# self.device = self.device
self.config = config
# self.parser = 'hq' in config.name
self.parser = False
self.name = config.name
self.batch_size = config.batch_size
self.im_size = config.im_size
self.uv_size = config.uv_size
self.log = logging.getLogger('x')
self.iteration = 0
# small_image = io.imread(os.path.join(config.root_dir, 'data/uv_param/small_mask.png'))
# small_mask = small_image[..., :3] == [0, 255, 0]
# small_mask = np.all(small_mask, axis=-1)
# self.small_mask = torch.from_numpy(small_mask)
# self.ds_scale = self.uv_size // small_image.shape[0]
self.mask_dir = 'data/uv_param/masks'
# self.brow_mask = 1 - self.load_mask('data/uv_param/masks/brow_mask.png')
ear_mask = 1 - self.load_mask(os.path.join(self.mask_dir, 'ear_mask.png'),
-self.uv_size // 16)
eye_mask = 1 - self.load_mask(os.path.join(self.mask_dir, 'eye_mask.png'))
self.hair_mask = 1 - self.load_mask(
os.path.join(self.mask_dir, 'hair_mask.png'))
self.lip_mask = 1 - self.load_mask(
os.path.join(self.mask_dir, 'lip_mask.png'), -self.uv_size // 32)
# self.tone_mask = 1 - self.load_mask('data/uv_param/masks/tone_mask.png')
self.skin_mask = self.load_mask(
os.path.join(self.mask_dir, 'skin_mask_for_loss.png'),
self.uv_size // 32)
self.skin_ear_mask = torch.clamp(self.skin_mask + ear_mask, min=0, max=1)
self.face_mask = self.load_mask(
os.path.join(self.mask_dir, 'face_mask.png'), self.uv_size // 16)
self.face_mask = torch.clamp(self.face_mask - eye_mask - self.lip_mask,
min=0, max=1)
self.meshes = {}
for face_model in ['230']:
mesh_path = os.path.join(config.root_dir, 'data', 'mesh', face_model,
'nsh_bfm_face.obj')
mesh = load_objs_as_meshes([mesh_path], self.device)
self.meshes[face_model] = mesh.extend(self.batch_size * 2)
self.ckpt_dir = os.path.join('checkpoints', self.name)
os.makedirs(self.ckpt_dir, exist_ok=True)
self.gen_weights_name = os.path.join(
self.ckpt_dir, '{}_{}_gen'.format(self.im_size, self.uv_size))
self.im_dis_weights_name = os.path.join(
self.ckpt_dir, '{}_{}_im_dis'.format(self.im_size, self.uv_size))
self.uv_dis_weights_name = os.path.join(
self.ckpt_dir, '{}_{}_uv_dis'.format(self.im_size, self.uv_size))
# self.gen_weights_path = os.path.join(self.ckpt_dir, self.name + '_gen.pth')
# self.im_dis_weights_path = os.path.join(self.ckpt_dir, self.name + '_im_dis.pth')
# self.uv_dis_weights_path = os.path.join(self.ckpt_dir, self.name + '_uv_dis.pth')
self.generator = Generator(3, 8, config).to(self.device)
if self.config.use_cuda:
self.generator = nn.parallel.DataParallel(self.generator)
if config.mode == 'train':
if config.adv_weight > 0:
self.image_disc = ImageDiscriminator(3, config).to(self.device)
self.uvmap_disc = UVMapDiscriminator(3, config).to(self.device)
# if self.config.use_cuda:
# # if torch.cuda.device_count() > 1:
# self.generator = nn.parallel.DistributedDataParallel(self.generator)
# self.image_disc = nn.parallel.DistributedDataParallel(self.image_disc)
# self.uvmap_disc = nn.parallel.DistributedDataParallel(self.uvmap_disc)
self.l1_loss = nn.L1Loss()
# self.l1_loss = nn.SmoothL1Loss()
# self.smooth_l1_loss = nn.SmoothL1Loss()
# self.l2_loss = nn.MSELoss()
self.perceptual_loss = PerceptualLoss()
self.style_loss = StyleLoss()
# self.facial_loss = FacialLoss()
if self.config.gan_loss != 'wgan':
self.adversarial_loss = AdversarialLoss(types=config.gan_loss)
self.gen_optimizer = optim.Adam(params=self.generator.parameters(),
lr=config.learning_rate,
betas=(config.beta1, config.beta2),
weight_decay=0.0001)
if config.adv_weight > 0:
self.im_dis_optimizer = optim.Adam(params=self.image_disc.parameters(),
lr=config.learning_rate * 0.1,
betas=(config.beta1, config.beta2),
weight_decay=0.001)
self.uv_dis_optimizer = optim.Adam(params=self.uvmap_disc.parameters(),
lr=config.learning_rate * 0.1,
betas=(config.beta1, config.beta2),
weight_decay=0.001)
self.load_uvmasks()
self.init_renderer()
def load_uvmasks(self):
def to_torch(x):
x = cv2.resize(x, (self.uv_size, self.uv_size),
interpolation=cv2.INTER_NEAREST)
return torch.from_numpy(x).to(self.device)
uv_tmp = io.imread(os.path.join(self.mask_dir, 'uvmap.png'))[..., :3]
uv_tmp = to_torch(uv_tmp).float() / 127.5 - 1
skin_mask = io.imread(os.path.join(self.mask_dir,
'skin_mask.png'))[..., -1] // 255
ear_mask = 1 - io.imread(os.path.join(self.mask_dir,
'ear_mask.png'))[..., -1] // 255
skin_mask = np.clip(skin_mask + ear_mask, 0, 1)
skin_mask = to_torch(skin_mask)
self.tmp_mean = torch.mean(uv_tmp[skin_mask == 1], axis=0)[None, :, None,
None]
self.uv_tmp = uv_tmp.permute(2, 0, 1)[None]
hair_mask = 1 - io.imread(os.path.join(self.mask_dir,
'hair_mask.png'))[..., -1] // 255
tone_mask = 1 - io.imread(os.path.join(self.mask_dir,
'tone_mask.png'))[..., -1] // 255
hair_mask = cv2.GaussianBlur(hair_mask.astype(np.float32), (77, 77), 49)
hair_tone_mask = np.clip(hair_mask + tone_mask, 0, 1)[..., None]
self.hair_tone_mask = to_torch(hair_tone_mask)[None, None]
face_mask = io.imread(os.path.join(self.mask_dir,
'face_mask.png'))[..., -1] // 255
blur_face_mask = cv2.GaussianBlur(face_mask.astype(np.float32), (99, 99),
49)[..., None]
# blur_face_mask_bt = cv2.GaussianBlur(face_mask.astype(np.float32), (99, 99), 49)[..., None]
# blur_face_mask[self.uv_size // 2:] = blur_face_mask_bt[self.uv_size // 2:]
self.blur_face_mask = to_torch(blur_face_mask)[None, None]
def init_renderer(self):
# nsh_face_mesh = meshio.Mesh('data/mesh/nsh_bfm_face.obj')
# self.nsh_face_tri = torch.from_numpy(nsh_face_mesh.triangles).type(
# torch.int64).to(self.device)
R, T = look_at_view_transform(10, 0, 0)
cameras = OpenGLPerspectiveCameras(znear=0.001, zfar=30.0, aspect_ratio=1.0,
fov=12.5936, degrees=True, R=R, T=T,
device=self.device)
raster_settings = RasterizationSettings(image_size=self.im_size,
blur_radius=0.0, faces_per_pixel=1,
bin_size=0, cull_backfaces=True)
self.rasterizer = MeshRasterizer(cameras=cameras,
raster_settings=raster_settings)
lights = DirectionalLights(device=self.device)
shader = TexturedSoftPhongShader(device=self.device, cameras=cameras,
lights=lights)
self.renderer = MeshRenderer(rasterizer=self.rasterizer, shader=shader)
# if torch.cuda.device_count() > 1:
# self.renderer = nn.parallel.DistributedDataParallel(self.renderer)
def process(self, images_alpha, uvmaps_alpha, uvmap_gts, vertices, coeffs,
uv_gt=True):
# zero optimizers
self.gen_optimizer.zero_grad()
if self.config.adv_weight > 0:
self.im_dis_optimizer.zero_grad()
self.uv_dis_optimizer.zero_grad()
# process outputs
images = images_alpha[:, :3].contiguous()
im_skins = images_alpha[:, 3:4].contiguous()
gen_uvmaps, renders_alpha, lights = self(images, uvmaps_alpha, vertices,
coeffs)
renders = renders_alpha[:, :3]
renders_mask = renders_alpha[:, 3:]
gen_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
im_dis_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
uv_dis_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
im_gen_gan_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
uv_gen_gan_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_uv_std_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_uv_sym_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_rd_l1_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_rd_style_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_uv_style_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
gen_uv_content_loss = torch.tensor(0, dtype=torch.float32,
device=self.device)
gen_uv_l1_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
double_image = torch.cat([images, torch.flip(images, (3,))], dim=0)
double_skins = torch.cat([im_skins, torch.flip(im_skins, (3,))], dim=0)
render_mask = renders_mask * double_skins
uv_merged = double_image * (1 - render_mask) + renders * render_mask
uv_merged = uv_merged.contiguous()
self.imsave('tmp/train/full_render.png', uv_merged[0, :3])
self.imsave('tmp/train/full_render_flip.png',
uv_merged[self.batch_size, :3])
self.imsave('tmp/train/image_mask.png', double_skins[0, :3])
self.imsave('tmp/train/image_mask_flip.png',
double_skins[self.batch_size, :3])
self.imsave('tmp/train/renders_mask.png', renders_mask[0, 0])
self.imsave('tmp/train/render_mask.png', render_mask[0, 0])
self.imsave('tmp/train/image.png', double_image[0, :3])
self.imsave('tmp/train/image_flip.png', double_image[self.batch_size, :3])
if self.config.adv_weight > 0:
# discriminator loss
im_dis_input_real = images
im_dis_input_fake = uv_merged[:self.batch_size].detach()
self.imsave('tmp/train/im_dis_input_real.png', im_dis_input_real[0, :3])
self.imsave('tmp/train/im_dis_input_fake.png', im_dis_input_fake[0, :3])
im_dis_real = self.image_disc(im_dis_input_real)
im_dis_fake = self.image_disc(im_dis_input_fake)
if self.config.gan_loss == 'wgan':
im_dis_real_loss = -torch.mean(im_dis_real)
im_dis_fake_loss = torch.mean(im_dis_fake)
im_dis_gp = self.calculate_gradient_penalty(self.image_disc,
im_dis_input_real,
im_dis_input_fake)
im_dis_loss += (im_dis_real_loss + im_dis_fake_loss +
im_dis_gp) * self.config.adv_weight
else:
im_dis_real_loss = self.adversarial_loss(im_dis_real, True, True)
im_dis_fake_loss = self.adversarial_loss(im_dis_fake, False, True)
im_dis_loss += (im_dis_real_loss + im_dis_fake_loss) / 2
uv_dis_input_real = uvmap_gts
if not uv_gt:
uv_dis_input_real = torch.flip(uv_dis_input_real, (3,))
self.imsave('tmp/train/uv_dis_input_real.png', uv_dis_input_real[0, :3])
uv_dis_real = self.uvmap_disc(uv_dis_input_real)
uv_dis_input_fake_1 = gen_uvmaps.detach()
self.imsave('tmp/train/uv_dis_input_fake_1.png',
uv_dis_input_fake_1[0, :3])
uv_dis_fake_1 = self.uvmap_disc(uv_dis_input_fake_1)
if self.config.gan_loss == 'wgan':
uv_dis_real_loss = -torch.mean(uv_dis_real)
uv_dis_fake_loss_1 = torch.mean(uv_dis_fake_1)
uv_dis_gp = self.calculate_gradient_penalty(self.uvmap_disc,
uv_dis_input_real,
uv_dis_input_fake_1)
uv_dis_loss += (uv_dis_real_loss + uv_dis_fake_loss_1 +
uv_dis_gp) * self.config.adv_weight
else:
uv_dis_real_loss = self.adversarial_loss(uv_dis_real, True, True)
uv_dis_fake_loss_1 = self.adversarial_loss(uv_dis_fake_1, False, True)
uv_dis_loss += (uv_dis_real_loss + uv_dis_fake_loss_1) / 2
# generator adversarial loss
im_gen_input_fake = uv_merged[:self.batch_size]
self.imsave('tmp/train/im_gen_input_fake.png', im_gen_input_fake[0, :3])
im_gen_fake = self.image_disc(im_gen_input_fake)
if self.config.gan_loss == 'wgan':
im_gen_gan_loss = -torch.mean(im_gen_fake)
else:
im_gen_gan_loss = self.adversarial_loss(im_gen_fake, True,
False) * self.config.adv_weight
gen_loss += im_gen_gan_loss
uv_gen_input_fake = gen_uvmaps
self.imsave('tmp/train/uv_gen_input_fake.png', uv_gen_input_fake[0, :3])
uv_gen_fake = self.uvmap_disc(uv_gen_input_fake)
if self.config.gan_loss == 'wgan':
uv_gen_gan_loss = -torch.mean(uv_gen_fake)
else:
uv_gen_gan_loss = self.adversarial_loss(uv_gen_fake, True,
False) * self.config.adv_weight
gen_loss += uv_gen_gan_loss
#* Other Losses
if self.config.sym_weight > 0 or self.config.std_weight > 0:
blur_gen_uvs = gaussian_blur(
gen_uvmaps, (self.uv_size // 8 + 1, self.uv_size // 8 + 1),
(self.uv_size // 32, self.uv_size // 32))
self.imsave('tmp/train/blur_gen_uv.png', blur_gen_uvs[0, :3])
# generator symmetry loss
if self.config.sym_weight > 0:
flipped_uv = torch.flip(blur_gen_uvs, dims=(3,))
gen_uv_sym_loss = self.l1_loss(blur_gen_uvs, flipped_uv)
self.imsave('tmp/train/uv_flip.png', flipped_uv[0, :3])
gen_loss += gen_uv_sym_loss * self.config.sym_weight
# generator variance loss
if self.config.std_weight > 0:
blur_uv_hsv = utils.rgb2hsv(blur_gen_uvs)
gen_uv_std_loss = torch.mean(
torch.std(blur_uv_hsv[:, :,
self.skin_ear_mask.type(torch.bool)], dim=-1))
blur_gen_uvs_for_lip = gaussian_blur(
gen_uvmaps, (self.uv_size // 32 + 1, self.uv_size // 32 + 1),
(self.uv_size // 64, self.uv_size // 64))
gen_uv_std_loss += torch.mean(
torch.std(
blur_gen_uvs_for_lip[:, :, self.lip_mask.type(torch.bool)],
dim=-1)) * 0.05
gen_loss += gen_uv_std_loss * self.config.std_weight
if uv_gt:
self.imsave('tmp/train/uvmap_gens.png', gen_uvmaps[0, :3])
self.imsave('tmp/train/uvmap_gts.png', uvmap_gts[0, :3])
# generator l1 loss uvmap
if self.config.l1_weight > 0:
gen_uv_l1_loss = self.l1_loss(gen_uvmaps, uvmap_gts)
gen_loss += gen_uv_l1_loss * self.config.l1_weight * 3
# generator perceptual loss
if self.config.con_weight > 0:
gen_uv_content_loss = self.perceptual_loss(gen_uvmaps, uvmap_gts)
gen_loss += gen_uv_content_loss * self.config.con_weight
# generator style loss
if self.config.sty_weight > 0:
gen_uv_style_loss = self.style_loss(gen_uvmaps, uvmap_gts)
gen_loss += gen_uv_style_loss * self.config.sty_weight
# rendered L1 loss
if self.config.l1_weight > 0:
gen_rd_l1_loss = self.l1_loss(double_image, uv_merged)
gen_loss += gen_rd_l1_loss * self.config.l1_weight
if self.config.sty_weight > 0:
gen_rd_style_loss = self.style_loss(double_image, uv_merged)
gen_loss += gen_rd_style_loss * self.config.sty_weight
gen_loss += torch.mean(
torch.std(lights[:, 0:3], dim=-1) +
torch.std(lights[:, 3:6], dim=-1) * 0.3)
# create logs
logs = {
'im_d': im_dis_loss.item(),
'uv_d': uv_dis_loss.item(),
'im_g': im_gen_gan_loss.item(),
'uv_g': uv_gen_gan_loss.item(),
'uv_std': gen_uv_std_loss.item(),
'uv_sym': gen_uv_sym_loss.item(),
'rd_l1': gen_rd_l1_loss.item(),
'rd_sty': gen_rd_style_loss.item()
}
if uv_gt:
logs['uv_sty'] = gen_uv_style_loss.item()
logs['uv_con'] = gen_uv_content_loss.item()
logs['uv_l1'] = gen_uv_l1_loss.item()
return gen_uvmaps, gen_loss, im_dis_loss, uv_dis_loss, logs
def forward(self, images, uvmaps_alpha, vertices, coeffs, fix_uv=False,
deploy=False, face_model='230'):
# the input images should be 3 channel and uvmaps should be 4 channel
uvmaps_flip = torch.flip(uvmaps_alpha, (3,))
uvmaps_input = torch.cat([uvmaps_alpha, uvmaps_flip], dim=1)
gen_uvmaps, light_params = self.generator(images, uvmaps_input)
self.imsave('tmp/train/uv_before.png', gen_uvmaps[0, :3])
if fix_uv:
face_mean = torch.mean(gen_uvmaps[..., self.face_mask == 1], axis=-1)
new_uv = self.uv_tmp - self.tmp_mean + face_mean[..., None, None]
new_uv = self.uv_tmp * self.hair_tone_mask + new_uv * (
1 - self.hair_tone_mask)
gen_uvmaps = gen_uvmaps * self.blur_face_mask + new_uv * (
1 - self.blur_face_mask)
self.imsave('tmp/train/uv_fix.png', gen_uvmaps[0, :3])
if deploy:
return gen_uvmaps
else:
renders = self.rendering(light_params, coeffs, vertices, gen_uvmaps,
face_model)
if self.config.mode == 'test':
light_params[:, 0:3] = light_params[:, 0:3] + light_params[:, 3:6] + 1.0
light_params[:, 3:9] = -1
alb_rends = self.rendering(light_params, coeffs, vertices, gen_uvmaps,
face_model)
self.imsave('tmp/train/uv_flip.png', uvmaps_flip[0, :4])
self.imsave('tmp/train/uv_input.png', uvmaps_input[0, :4])
self.imsave('tmp/train/gen_uvmap.png', gen_uvmaps[0])
self.imsave('tmp/train/renders0.png', renders[0, :3])
self.imsave('tmp/train/renders1.png', renders[1, :3])
self.imsave('tmp/train/rend_mask.png', renders[0, 3])
if self.config.mode != 'test':
return gen_uvmaps, renders, light_params
else:
return gen_uvmaps, renders, alb_rends
def rendering(self, light_params, coeffs, vertices, gen_uvmaps, face_model):
ambient_color = torch.clamp(0.5 + 0.5 * light_params[:, 0:3], 0, 1)
diffuse_color = torch.clamp(0.5 + 0.5 * light_params[:, 3:6], 0, 1)
specular_color = torch.clamp(0.2 + 0.2 * light_params[:, 6:9], 0, 1)
direction = light_params[:, 9:12]
directions = torch.cat([
direction, direction *
torch.tensor([[-1, 1, 1]], dtype=torch.float, device=self.device)
], dim=0)
lights = DirectionalLights(ambient_color=ambient_color.repeat(2, 1),
diffuse_color=diffuse_color.repeat(2, 1),
specular_color=specular_color.repeat(2, 1),
direction=directions, device=self.device)
self.renderer.shader.lights = lights
_, _, _, angles, _, trans = utils.split_bfm09_coeff(coeffs)
reflect_angles = torch.cat([
angles, angles *
torch.tensor([[1, -1, -1]], dtype=torch.float, device=self.device)
], dim=0)
reflect_trans = torch.cat([
trans, trans *
torch.tensor([[-1, 1, 1]], dtype=torch.float, device=self.device)
], dim=0)
rotated_vert = self.rotate_vert(vertices.repeat(2, 1, 1), reflect_angles,
reflect_trans)
fliped_uv = torch.flip(gen_uvmaps / 2 + 0.5,
(2, 3)).repeat(2, 1, 1, 1).permute(0, 2, 3, 1)
texture = Textures(
maps=fliped_uv,
faces_uvs=self.meshes[face_model].textures.faces_uvs_padded(),
verts_uvs=self.meshes[face_model].textures.verts_uvs_padded())
meshes = Meshes(rotated_vert, self.meshes[face_model].faces_padded(),
texture)
renders = self.renderer(meshes)
renders[..., :3] = renders[..., :3] * 2 - 1
renders[..., -1] = (renders[..., -1] > 0).float()
renders = renders.permute(0, 3, 1, 2).contiguous()
return renders
def rotate_vert(self, vertices, angles, trans):
transformer = Transform3d(device=self.device)
transformer = transformer.rotate_axis_angle(angles[:, 0], self.rot_order[0],
False)
transformer = transformer.rotate_axis_angle(angles[:, 1], self.rot_order[1],
False)
transformer = transformer.rotate_axis_angle(angles[:, 2], self.rot_order[2],
False)
transformer = transformer.translate(trans)
rotate_vert = transformer.transform_points(vertices)
return rotate_vert
def calculate_gradient_penalty(self, discrimiator, real_data, fake_data):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake data
alpha = torch.rand(real_data.size(0), 1, 1, 1).to(self.device)
interpolates = (alpha * real_data +
((1 - alpha) * fake_data)).requires_grad_(True).to(
self.device)
discrimiator_interpolates = discrimiator(interpolates)
fake = torch.ones(
discrimiator_interpolates.size()).requires_grad_(False).to(self.device)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(outputs=discrimiator_interpolates,
inputs=interpolates, grad_outputs=fake,
create_graph=True, retain_graph=True,
only_inputs=True)[0]
# lambda for gradient penalty is set to 10
gradient_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() * 10
return gradient_penalty
def backward(self, gen_loss=None, im_dis_loss=None, uv_dis_loss=None):
if self.config.adv_weight > 0:
if im_dis_loss is not None:
im_dis_loss.backward()
self.im_dis_optimizer.step()
if uv_dis_loss is not None:
uv_dis_loss.backward()
self.uv_dis_optimizer.step()
if gen_loss is not None:
gen_loss.backward()
self.gen_optimizer.step()
def load_pth(self, path):
self.log.info('Loading checkpoint from %s ...', path)
data = torch.load(path, map_location=self.device)
return data
def load(self):
gen_weights_paths = sorted(
glob(
os.path.join(self.config.root_dir,
self.gen_weights_name + '_*.pth')))
epoch = 0
if gen_weights_paths:
data = self.load_pth(gen_weights_paths[-1])
epoch = int(
os.path.split(gen_weights_paths[-1])[-1].split('.')[0].split('_')[-1])
if not self.config.use_cuda:
data['generator'] = utils.fix_state_dict(data['generator'])
self.generator.load_state_dict(data['generator'])
self.iteration = data['iteration']
# load discriminator only when training
if self.config.mode == 'train':
im_dis_weights_paths = sorted(glob(self.im_dis_weights_name + '_*.pth'))
if im_dis_weights_paths:
data = self.load_pth(im_dis_weights_paths[-1])
if not self.config.use_cuda:
data['image_disc'] = utils.fix_state_dict(data['image_disc'])
self.image_disc.load_state_dict(data['image_disc'])
uv_dis_weights_paths = sorted(glob(self.uv_dis_weights_name + '_*.pth'))
if uv_dis_weights_paths:
data = self.load_pth(uv_dis_weights_paths[-1])
if not self.config.use_cuda:
data['uvmap_disc'] = utils.fix_state_dict(data['uvmap_disc'])
self.uvmap_disc.load_state_dict(data['uvmap_disc'])
return epoch
def save(self, idx):
torch.save(
{
'iteration': self.iteration,
'generator': self.generator.state_dict()
}, '{}_{:>04}.pth'.format(self.gen_weights_name, idx))
if self.config.adv_weight > 0:
torch.save({'image_disc': self.image_disc.state_dict()},
'{}_{:>04}.pth'.format(self.im_dis_weights_name, idx))
torch.save({'uvmap_disc': self.uvmap_disc.state_dict()},
'{}_{:>04}.pth'.format(self.uv_dis_weights_name, idx))
self.log.info('Saved checkpoint to %s.\n', self.name)
def imsave(self, path, image, debug=False):
if debug or self.debug:
io.imsave(path, utils.to_uint8_torch(image.cpu()))
def load_mask(self, path, erode=0):
mask = io.imread(os.path.join(self.config.root_dir, path))[..., -1]
mask = cv2.resize(mask, (self.uv_size, self.uv_size),
interpolation=cv2.INTER_NEAREST)
mask = mask // 255
if erode > 0:
mask = cv2.erode(mask, np.ones((erode // 4, erode // 4)), iterations=4)
elif erode < 0:
mask = cv2.dilate(mask, np.ones((-erode // 4, -erode // 4)), iterations=4)
mask = torch.from_numpy(mask).to(self.device)
return mask.int()
def to_tensor(self, array, dtype=torch.float32):
if not isinstance(array, np.ndarray):
array = np.array(array)
return torch.from_numpy(array).type(dtype).to(self.device)
| 13,098 |
839 | <gh_stars>100-1000
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.systest.mtom_bindingtype;
import java.awt.Image;
import java.io.ByteArrayOutputStream;
import java.io.PrintWriter;
import java.net.URL;
import javax.imageio.ImageIO;
import javax.xml.namespace.QName;
import javax.xml.ws.BindingProvider;
import javax.xml.ws.Holder;
import javax.xml.ws.soap.SOAPBinding;
import org.apache.cxf.ext.logging.LoggingInInterceptor;
import org.apache.cxf.ext.logging.LoggingOutInterceptor;
import org.apache.cxf.systest.mtom_feature.Hello;
import org.apache.cxf.systest.mtom_feature.HelloService;
import org.apache.cxf.systest.mtom_feature.ImageHelper;
import org.apache.cxf.testutil.common.AbstractBusClientServerTestBase;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class MTOMBindingTypeTest extends AbstractBusClientServerTestBase {
public static final String PORT = Server.PORT;
private final QName serviceName = new QName("http://apache.org/cxf/systest/mtom_feature",
"HelloService");
//private Hello port = getPort();
@Before
public void setUp() throws Exception {
createBus();
}
@BeforeClass
public static void startServers() throws Exception {
assertTrue("server did not launch correctly", launchServer(Server.class, true));
}
protected ByteArrayOutputStream setupInLogging() {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(bos, true);
LoggingInInterceptor in = new LoggingInInterceptor(writer);
this.bus.getInInterceptors().add(in);
return bos;
}
protected ByteArrayOutputStream setupOutLogging() {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(bos, true);
LoggingOutInterceptor out = new LoggingOutInterceptor(writer);
this.bus.getOutInterceptors().add(out);
return bos;
}
@Test
public void testDetail() throws Exception {
ByteArrayOutputStream input = setupInLogging();
ByteArrayOutputStream output = setupOutLogging();
Holder<byte[]> photo = new Holder<>("CXF".getBytes());
Holder<Image> image = new Holder<>(getImage("/java.jpg"));
Hello port = getPort();
SOAPBinding binding = (SOAPBinding) ((BindingProvider)port).getBinding();
binding.setMTOMEnabled(true);
port.detail(photo, image);
String expected = "<xop:Include ";
assertTrue(output.toString().indexOf(expected) != -1);
assertTrue(input.toString().indexOf(expected) != -1);
assertEquals("CXF", new String(photo.value));
assertNotNull(image.value);
}
@Test
@org.junit.Ignore
public void testEcho() throws Exception {
byte[] bytes = ImageHelper.getImageBytes(getImage("/java.jpg"), "image/jpeg");
Holder<byte[]> image = new Holder<>(bytes);
Hello port = getPort();
SOAPBinding binding = (SOAPBinding) ((BindingProvider)port).getBinding();
binding.setMTOMEnabled(true);
port.echoData(image);
assertNotNull(image);
}
private Image getImage(String name) throws Exception {
return ImageIO.read(getClass().getResource(name));
}
private Hello getPort() {
URL wsdl = getClass().getResource("/wsdl_systest/mtom.wsdl");
assertNotNull("WSDL is null", wsdl);
HelloService service = new HelloService(wsdl, serviceName);
assertNotNull("Service is null ", service);
Hello hello = service.getHelloPort();
try {
updateAddressPort(hello, PORT);
} catch (Exception ex) {
//ignore
}
return hello;
}
}
| 1,712 |
1,853 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dromara.hmily.xa.core;
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
import javax.transaction.InvalidTransactionException;
import javax.transaction.NotSupportedException;
import javax.transaction.RollbackException;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import javax.transaction.UserTransaction;
/**
* UserTransactionImpl .
*
* @author <NAME>
*/
public class UserTransactionImpl implements UserTransaction, TransactionManager {
private TransactionManager tm;
private TransactionManager getTm() {
if (tm == null) {
tm = TransactionManagerImpl.INST;
}
return tm;
}
@Override
public void begin() throws NotSupportedException, SystemException {
getTm().begin();
}
@Override
public void commit() throws RollbackException, HeuristicMixedException, HeuristicRollbackException, SecurityException, IllegalStateException, SystemException {
getTm().commit();
}
@Override
public void rollback() throws IllegalStateException, SecurityException, SystemException {
getTm().rollback();
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException {
getTm().setRollbackOnly();
}
@Override
public int getStatus() throws SystemException {
return getTm().getStatus();
}
@Override
public Transaction getTransaction() throws SystemException {
return getTm().getTransaction();
}
@Override
public void resume(final Transaction transaction) throws InvalidTransactionException, IllegalStateException, SystemException {
getTm().resume(transaction);
}
@Override
public void setTransactionTimeout(final int i) throws SystemException {
getTm().setTransactionTimeout(i);
}
@Override
public Transaction suspend() throws SystemException {
return getTm().suspend();
}
}
| 869 |
360 | <gh_stars>100-1000
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* multi_redo_api.cpp
* Defines GUC options for parallel recovery.
*
* IDENTIFICATION
* src/gausskernel/storage/access/transam/multi_redo_api.cpp
*
* -------------------------------------------------------------------------
*/
#include <stdio.h>
#include <unistd.h>
#include "postgres.h"
#include "knl/knl_variable.h"
#include "utils/guc.h"
#include "access/multi_redo_settings.h"
#include "access/multi_redo_api.h"
#include "access/extreme_rto/dispatcher.h"
#include "access/parallel_recovery/dispatcher.h"
#include "access/extreme_rto/page_redo.h"
#include "access/parallel_recovery/page_redo.h"
#ifdef ENABLE_MULTIPLE_NODES
bool g_supportHotStandby = false; /* don't support consistency view */
#else
bool g_supportHotStandby = true; /* don't support consistency view */
#endif
void StartUpMultiRedo(XLogReaderState *xlogreader, uint32 privateLen)
{
if (IsExtremeRedo()) {
extreme_rto::StartRecoveryWorkers(xlogreader, privateLen);
} else if (IsParallelRedo()) {
parallel_recovery::StartRecoveryWorkers();
}
}
bool IsMultiThreadRedoRunning()
{
return (get_real_recovery_parallelism() > 1 &&
(extreme_rto::g_dispatcher != 0 || parallel_recovery::g_dispatcher != 0));
}
bool IsExtremeRtoRunning()
{
return (get_real_recovery_parallelism() > 1 && extreme_rto::g_dispatcher != 0 &&
extreme_rto::g_dispatcher->pageLineNum > 0);
}
bool IsExtremeRtoSmartShutdown()
{
if (!IsExtremeRtoRunning()) {
return false;
}
if (extreme_rto::g_dispatcher->smartShutdown) {
extreme_rto::g_dispatcher->smartShutdown =false;
return true;
}
return false;
}
void ExtremeRtoRedoManagerSendEndToStartup()
{
if (!IsExtremeRtoRunning()) {
return;
}
extreme_rto::g_redoEndMark.record.isDecode = true;
extreme_rto::PutRecordToReadQueue((XLogReaderState *)&extreme_rto::g_redoEndMark.record);
}
bool IsExtremeRtoReadWorkerRunning()
{
if (!IsExtremeRtoRunning()) {
return false;
}
uint32 readWorkerState = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readWorkerState);
if (readWorkerState == extreme_rto::WORKER_STATE_STOP || readWorkerState == extreme_rto::WORKER_STATE_EXIT) {
return false;
}
return true;
}
void DispatchRedoRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime)
{
if (IsExtremeRedo()) {
extreme_rto::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime);
} else if (IsParallelRedo()) {
parallel_recovery::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime);
} else {
uint32 term = XLogRecGetTerm(record);
if (term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) {
g_instance.comm_cxt.localinfo_cxt.term_from_xlog = term;
}
parallel_recovery::ApplyRedoRecord(record, t_thrd.xlog_cxt.redo_oldversion_xlog);
if (XLogRecGetRmid(record) == RM_XACT_ID)
SetLatestXTime(recordXTime);
SetXLogReplayRecPtr(record->ReadRecPtr, record->EndRecPtr);
CheckRecoveryConsistency();
}
}
void GetThreadNameIfMultiRedo(int argc, char *argv[], char **threadNamePtr)
{
if (IsExtremeRedo()) {
extreme_rto::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr);
} else if (IsParallelRedo()) {
parallel_recovery::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr);
}
}
PGPROC *MultiRedoThreadPidGetProc(ThreadId pid)
{
if (IsExtremeRedo()) {
return extreme_rto::StartupPidGetProc(pid);
} else {
return parallel_recovery::StartupPidGetProc(pid);
}
}
void MultiRedoUpdateStandbyState(HotStandbyState newState)
{
if (IsExtremeRedo()) {
extreme_rto::UpdateStandbyState(newState);
} else if (IsParallelRedo()) {
parallel_recovery::UpdateStandbyState(newState);
}
}
uint32 MultiRedoGetWorkerId()
{
if (IsExtremeRedo()) {
return extreme_rto::GetMyPageRedoWorkerIdWithLock();
} else if (IsParallelRedo()) {
return parallel_recovery::GetMyPageRedoWorkerOrignId();
} else {
ereport(ERROR, (errmsg("MultiRedoGetWorkerId parallel redo and extreme redo is close, should not be here!")));
}
return 0;
}
bool IsAllPageWorkerExit()
{
if (get_real_recovery_parallelism() > 1) {
for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) {
uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState));
if (state != PAGE_REDO_WORKER_INVALID) {
return false;
}
}
g_instance.comm_cxt.predo_cxt.totalNum = 0;
}
ereport(LOG,
(errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("page workers all exit or not open parallel redo")));
return true;
}
void SetPageRedoWorkerIndex(int index)
{
if (IsExtremeRedo()) {
extreme_rto::g_redoWorker->index = index;
} else if (IsParallelRedo()) {
parallel_recovery::g_redoWorker->index = index;
}
}
int GetPageRedoWorkerIndex(int index)
{
if (IsExtremeRedo()) {
return extreme_rto::g_redoWorker->index;
} else if (IsParallelRedo()) {
return parallel_recovery::g_redoWorker->index;
} else {
return 0;
}
}
PageRedoExitStatus CheckExitPageWorkers(ThreadId pid)
{
PageRedoExitStatus checkStatus = NOT_PAGE_REDO_THREAD;
for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) {
if (g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId == pid) {
checkStatus = PAGE_REDO_THREAD_EXIT_NORMAL;
uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState));
ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG),
errmsg("page worker thread %lu exit, state %u", pid, state)));
if (state == PAGE_REDO_WORKER_READY) {
checkStatus = PAGE_REDO_THREAD_EXIT_ABNORMAL;
}
pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState),
PAGE_REDO_WORKER_INVALID);
g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId = 0;
break;
}
}
return checkStatus;
}
void ProcTxnWorkLoad(bool force)
{
if (IsParallelRedo()) {
parallel_recovery::ProcessTrxnRecords(force);
}
}
/* Run from the worker thread. */
void SetMyPageRedoWorker(knl_thread_arg *arg)
{
if (IsExtremeRedo()) {
extreme_rto::g_redoWorker = (extreme_rto::PageRedoWorker *)arg->payload;
} else if (IsParallelRedo()) {
parallel_recovery::g_redoWorker = (parallel_recovery::PageRedoWorker *)arg->payload;
}
}
/* Run from the worker thread. */
uint32 GetMyPageRedoWorkerId()
{
if (IsExtremeRedo()) {
return extreme_rto::g_redoWorker->id;
} else if (IsParallelRedo()) {
return parallel_recovery::g_redoWorker->id;
} else {
return 0;
}
}
void MultiRedoMain()
{
if (IsExtremeRedo()) {
extreme_rto::ParallelRedoThreadMain();
} else if (IsParallelRedo()) {
parallel_recovery::PageRedoWorkerMain();
} else {
ereport(ERROR, (errmsg("MultiRedoMain parallel redo and extreme redo is close, should not be here!")));
}
}
void EndDispatcherContext()
{
if (IsExtremeRedo()) {
(void)MemoryContextSwitchTo(extreme_rto::g_dispatcher->oldCtx);
} else if (IsParallelRedo()) {
(void)MemoryContextSwitchTo(parallel_recovery::g_dispatcher->oldCtx);
}
}
void SwitchToDispatcherContext()
{
(void)MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx);
}
void FreeAllocatedRedoItem()
{
if (IsExtremeRedo()) {
extreme_rto::FreeAllocatedRedoItem();
} else if (IsParallelRedo()) {
parallel_recovery::FreeAllocatedRedoItem();
}
}
uint32 GetRedoWorkerCount()
{
if (IsExtremeRedo()) {
return extreme_rto::GetAllWorkerCount();
} else if (IsParallelRedo()) {
return parallel_recovery::GetPageWorkerCount();
}
return 0;
}
void **GetXLogInvalidPagesFromWorkers()
{
if (IsExtremeRedo()) {
return extreme_rto::GetXLogInvalidPagesFromWorkers();
} else if (IsParallelRedo()) {
return parallel_recovery::GetXLogInvalidPagesFromWorkers();
}
return NULL;
}
void SendRecoveryEndMarkToWorkersAndWaitForFinish(int code)
{
if (IsExtremeRedo()) {
return extreme_rto::SendRecoveryEndMarkToWorkersAndWaitForFinish(code);
} else if (IsParallelRedo()) {
return parallel_recovery::SendRecoveryEndMarkToWorkersAndWaitForFinish(code);
}
}
RedoWaitInfo GetRedoIoEvent(int32 event_id)
{
if (IsExtremeRedo()) {
return extreme_rto::redo_get_io_event(event_id);
} else {
return parallel_recovery::redo_get_io_event(event_id);
}
}
void GetRedoWrokerStatistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen)
{
if (IsExtremeRedo()) {
return extreme_rto::redo_get_wroker_statistic(realNum, worker, workerLen);
} else {
return parallel_recovery::redo_get_wroker_statistic(realNum, worker, workerLen);
}
}
| 4,568 |
852 | #include "FWCore/PluginManager/interface/PluginManager.h"
#include "FWCore/PluginManager/interface/standard.h"
#include "FWCore/ServiceRegistry/interface/ServiceRegistry.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
//local includes
#include "CondCore/Utilities/interface/Utilities.h"
#include <fstream>
#include <iostream>
#include "CondCore/CondDB/interface/Auth.h"
#include <termios.h>
#include <unistd.h>
#include <cstdio>
namespace cond {
int getch() {
int ch;
struct termios t_old, t_new;
tcgetattr(STDIN_FILENO, &t_old);
t_new = t_old;
t_new.c_lflag &= ~(ICANON | ECHO);
tcsetattr(STDIN_FILENO, TCSANOW, &t_new);
ch = getchar();
tcsetattr(STDIN_FILENO, TCSANOW, &t_old);
return ch;
}
std::string getpass(const std::string& prompt, bool show_asterisk) {
const char BACKSPACE = 127;
const char RETURN = 10;
std::string password;
unsigned char ch = 0;
std::cout << prompt;
while ((ch = getch()) != RETURN) {
if (ch == BACKSPACE) {
if (password.length() != 0) {
if (show_asterisk)
std::cout << "\b \b";
password.resize(password.length() - 1);
}
} else {
password += ch;
if (show_asterisk)
std::cout << '*';
}
}
std::cout << std::endl;
return password;
}
std::string getpassForUser(const std::string& userName) {
std::string prompt("Enter password for user ");
prompt += userName;
prompt += ": ";
return getpass(prompt, true);
}
} // namespace cond
cond::UtilitiesError::UtilitiesError(const std::string& message) : Exception(message) {}
cond::UtilitiesError::~UtilitiesError() throw() {}
cond::Utilities::Utilities(const std::string& commandName, std::string positionalParameter)
: m_name(commandName),
m_options(std::string("Usage: ") + m_name + std::string(" [options] ") + positionalParameter +
std::string(" \n")),
m_positionalOptions(),
m_values() {
m_options.add_options()("debug", "switch on debug mode")("help,h", "help message");
if (!positionalParameter.empty()) {
m_positionalOptions.add(positionalParameter.c_str(), -1);
addOption<std::string>(positionalParameter, "", positionalParameter);
}
}
cond::Utilities::~Utilities() {}
int cond::Utilities::execute() { return 0; }
int cond::Utilities::run(int argc, char** argv) {
edmplugin::PluginManager::Config config;
edmplugin::PluginManager::configure(edmplugin::standard::config());
std::vector<edm::ParameterSet> psets;
edm::ParameterSet pSet;
pSet.addParameter("@service_type", std::string("SiteLocalConfigService"));
psets.push_back(pSet);
edm::ServiceToken servToken(edm::ServiceRegistry::createSet(psets));
m_currentToken = &servToken;
edm::ServiceRegistry::Operate operate(servToken);
int ret = 0;
try {
parseCommand(argc, argv);
if (m_values.count("help")) {
std::cout << m_options << std::endl;
;
return 0;
}
ret = execute();
} catch (cond::Exception& err) {
std::cout << err.what() << std::endl;
ret = 1;
} catch (const std::exception& exc) {
std::cout << exc.what() << std::endl;
ret = 1;
}
m_currentToken = nullptr;
return ret;
}
void cond::Utilities::addConnectOption(const std::string& connectionOptionName,
const std::string& shortName,
const std::string& helpEntry) {
addOption<std::string>(connectionOptionName, shortName, helpEntry);
}
void cond::Utilities::addAuthenticationOptions() {
addOption<std::string>("authPath", "P", "path to the authentication key");
addOption<std::string>("user", "u", "user name");
addOption<std::string>("pass", "p", "password");
}
void cond::Utilities::addConfigFileOption() {
addOption<std::string>("configFile", "f", "configuration file(optional)");
}
void cond::Utilities::parseCommand(int argc, char** argv) {
boost::program_options::store(
boost::program_options::command_line_parser(argc, argv).options(m_options).positional(m_positionalOptions).run(),
m_values);
if (m_options.find_nothrow("configFile", false)) {
std::string configFileName = getValueIfExists("configFile");
if (!configFileName.empty()) {
std::fstream configFile;
configFile.open(configFileName.c_str(), std::fstream::in);
boost::program_options::store(boost::program_options::parse_config_file(configFile, m_options), m_values);
configFile.close();
}
}
boost::program_options::notify(m_values);
}
std::string cond::Utilities::getAuthenticationPathValue() { return getOptionValue<std::string>("authPath"); }
std::string cond::Utilities::getUserValue() { return getOptionValue<std::string>("user"); }
std::string cond::Utilities::getPasswordValue() { return getOptionValue<std::string>("pass"); }
std::string cond::Utilities::getConnectValue() { return getOptionValue<std::string>("connect"); }
std::string cond::Utilities::getLogDBValue() { return getOptionValue<std::string>("logDB"); }
std::string cond::Utilities::getDictionaryValue() { return getOptionValue<std::string>("dictionary"); }
std::string cond::Utilities::getConfigFileValue() { return getOptionValue<std::string>("configFile"); }
bool cond::Utilities::hasOptionValue(const std::string& fullName) {
const void* found = m_options.find_nothrow(fullName, false);
if (!found) {
std::stringstream message;
message << "Utilities::hasOptionValue: option \"" << fullName << "\" is not known by the command.";
sendException(message.str());
}
return m_values.count(fullName);
}
bool cond::Utilities::hasDebug() { return m_values.count("debug"); }
void cond::Utilities::initializePluginManager() {
// dummy, to avoid to adapt non-CondCore clients
}
std::string cond::Utilities::getValueIfExists(const std::string& fullName) {
std::string val("");
if (m_values.count(fullName)) {
val = m_values[fullName].as<std::string>();
}
return val;
}
void cond::Utilities::sendError(const std::string& message) { throw cond::UtilitiesError(message); }
void cond::Utilities::sendException(const std::string& message) { throw cond::Exception(message); }
| 2,301 |
348 | {"nom":"Champigneulles","circ":"6ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":4880,"abs":2721,"votants":2159,"blancs":36,"nuls":15,"exp":2108,"res":[{"nuance":"FN","nom":"M. <NAME>","voix":408},{"nuance":"DVG","nom":"Mme <NAME>","voix":381},{"nuance":"FI","nom":"Mme <NAME>","voix":317},{"nuance":"DIV","nom":"Mme <NAME>","voix":307},{"nuance":"LR","nom":"Mme <NAME>","voix":249},{"nuance":"SOC","nom":"M. <NAME>","voix":176},{"nuance":"DLF","nom":"M. <NAME>","voix":60},{"nuance":"COM","nom":"<NAME>","voix":53},{"nuance":"ECO","nom":"M. <NAME>","voix":53},{"nuance":"ECO","nom":"Mme <NAME>","voix":33},{"nuance":"EXG","nom":"M. <NAME>","voix":23},{"nuance":"DIV","nom":"M. <NAME>","voix":18},{"nuance":"ECO","nom":"M. <NAME>","voix":16},{"nuance":"ECO","nom":"<NAME>","voix":9},{"nuance":"EXG","nom":"M. <NAME>","voix":5},{"nuance":"DVD","nom":"Mme <NAME>","voix":0}]} | 362 |
335 | <gh_stars>100-1000
{
"word": "Debut",
"definitions": [
"Perform in public for the first time.",
"(of a new product) be launched.",
"(of a company) launch (a new product)"
],
"parts-of-speech": "Verb"
} | 106 |
625 | /* Copyright 2013-2021 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CODEGEN_MODULE_H
#define CODEGEN_MODULE_H
#include <string>
#include <vector>
#include <llvm/IR/GlobalValue.h>
#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/LLVMContext.h>
#include "AST/Module.h"
#include "AST/Type.h"
#include "AST/Expr.h"
#include "Utils/StringList.h"
namespace llvm {
class Module;
class Type;
class Function;
}
namespace C2 {
class Decl;
class VarDecl;
class Type;
class StringLiteral;
enum OptimizationLevel {
O0,
O1,
O2,
O3,
Os,
Oz,
};
// generates LLVM Module from (multiple) Module(s)
class CodeGenModule {
public:
CodeGenModule(const std::string& name_,
const std::string& dir_,
bool single,
const ModuleList& mods_);
~CodeGenModule();
void generate();
bool verify();
void write();
bool optimize(OptimizationLevel opt) const;
bool compile() const;
static bool link(const std::string& outputDir, const std::string& binary, const StringList& objects);
void remove_tmp() const;
void dump();
llvm::Type* ConvertType(BuiltinType::Kind K);
llvm::Type* ConvertType(QualType Q);
llvm::PointerType* getVoidPtrType();
llvm::Function* createExternal(const C2::Module* P, const std::string& name_);
llvm::GlobalValue::LinkageTypes getLinkage(const Decl* D);
const std::string& getName() const { return name; }
llvm::Module* getModule() const { return module; }
llvm::LLVMContext& getContext() { return context; }
unsigned getAlignment(QualType Q) const;
private:
llvm::Type* ConvertStructType(const StructType* S);
void EmitGlobalVariable(VarDecl* V);
llvm::Constant* EvaluateExprAsConstant(const Expr *E);
llvm::Constant* GetConstantArrayFromStringLiteral(const StringLiteral* E);
llvm::Constant* EmitDefaultInit(QualType Q);
llvm::Constant* EmitStructInit(const StructType *AT, Expr** Vals, unsigned numValues);
llvm::Constant* EmitArrayInit(const ArrayType *AT, Expr** Vals, unsigned numValues);
llvm::Constant* EmitConstantDecl(const Decl* D);
const std::string& name; // of object file without .o
const std::string& outputDir;
bool single_module; // multiple modules in single module
const ModuleList mods;
// TODO only keep context when needed (so at max 1 for each thread)
llvm::LLVMContext context;
llvm::Module* module;
llvm::IRBuilder<> builder;
CodeGenModule(const CodeGenModule&);
CodeGenModule& operator= (const CodeGenModule&);
};
}
#endif
| 1,111 |
591 | {
"name": "table2excel",
"title": "jQuery table2excel",
"description": "jQuery plugin to export an .xls file in browser from an HTML table",
"keywords": [
"jquery",
"table2excel",
"plugins",
"Table",
"Excel"
],
"version": "1.1.1",
"author": {
"name": "rainabba",
"email": "<EMAIL>",
"url": "https://github.com/rainabba"
},
"maintainers": [
{
"name": "<NAME>",
"email": "<EMAIL>",
"url": "https://github.com/rainabba"
}
],
"licenses": [
{
"type": "MIT",
"url": "http://rainabba.mit-license.org/"
}
],
"bugs": "https://github.com/rainabba/jquery-table2excel/issues",
"homepage": "https://github.com/rainabba/jquery-table2excel",
"docs": "https://github.com/rainabba/jquery-table2excel#readme",
"download": "https://github.com/rainabba/jquery-table2excel/archive/master.zip",
"dependencies": {
"jquery": ">=1.4"
}
}
| 388 |
311 | /**
* Copyright 2019 The JoyQueue Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joyqueue.repository.mybatis.interceptor;
import org.joyqueue.model.PageResult;
import org.joyqueue.model.QPageQuery;
import org.apache.ibatis.executor.resultset.ResultSetHandler;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.plugin.Intercepts;
import org.apache.ibatis.plugin.Invocation;
import org.apache.ibatis.plugin.Signature;
import org.apache.ibatis.reflection.DefaultReflectorFactory;
import org.apache.ibatis.reflection.MetaObject;
import org.apache.ibatis.reflection.factory.DefaultObjectFactory;
import org.apache.ibatis.reflection.wrapper.DefaultObjectWrapperFactory;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* 分页查询结果拦截器
*
* @author hujunliang
* @version V1.0
*/
@Intercepts({@Signature(type = ResultSetHandler.class, method = "handleResultSets", args = {Statement.class})})
public class PageResultInterceptor extends PageInterceptor {
@Override
public Object intercept(Invocation invocation) throws Throwable {
// 目标对象转换
ResultSetHandler resultSetHandler = (ResultSetHandler) invocation.getTarget();
// 获取MappedStatement,Configuration对象
MetaObject metaObject =
MetaObject.forObject(resultSetHandler, new DefaultObjectFactory(), new DefaultObjectWrapperFactory(), new DefaultReflectorFactory());
MappedStatement mappedStatement = (MappedStatement) metaObject.getValue("mappedStatement");
String statement = mappedStatement.getId();
if (!isPageSql(statement,metaObject.getValue("boundSql.parameterObject"))) {
return invocation.proceed();
}
// 获取分页参数
QPageQuery pageQuery = (QPageQuery) metaObject.getValue("boundSql.parameterObject");
List<PageResult> result = new ArrayList<PageResult>(1);
PageResult page = new PageResult();
page.setPagination(pageQuery.getPagination());
page.setResult((List) invocation.proceed());
result.add(page);
return result;
}
}
| 903 |
453 | #include <_ansi.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "trap.h"
int
_close (int file)
{
return TRAP0 (SYS_close, file, 0, 0);
}
| 70 |
412 | class A extends RuntimeException {}
class B extends A {}
class C extends B {}
public class test {
public static void main (String args[]) {
try {
try {
C c = new C();
A a = new A();
}
catch(C exc) {
assert false;
}
catch(B exc) {
assert false;
}
}
catch(A exc) {
assert false;
}
}
}
| 156 |
7,137 | package io.onedev.server.util;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import com.google.common.collect.Lists;
@SuppressWarnings("serial")
public class Path implements Serializable {
private List<PathNode> nodes = new ArrayList<>();
public Path(PathNode...nodes) {
for (PathNode node: nodes)
this.nodes.add(node);
}
public Path(List<PathNode> nodes) {
this.nodes = nodes;
}
public Path(Path path) {
this.nodes = new ArrayList<>(path.getNodes());
}
public Path(Path path, PathNode node) {
nodes = new ArrayList<>(path.getNodes());
nodes.add(node);
}
public Path(Path path, String namedPathNode) {
this(path, new PathNode.Named(namedPathNode));
}
public Path(Path path, int indexedPathNode) {
this(path, new PathNode.Indexed(indexedPathNode));
}
public Path(javax.validation.Path path) {
for (javax.validation.Path.Node node: path) {
if (node.getIndex() != null)
nodes.add(new PathNode.Indexed(node.getIndex()));
if (node.getName() != null)
nodes.add(new PathNode.Named(node.getName()));
}
}
public List<PathNode> getNodes() {
return nodes;
}
@Nullable
public PathNode takeNode() {
if (!nodes.isEmpty())
return nodes.remove(0);
else
return null;
}
@Override
public String toString() {
return nodes.stream().map(it->it.toString()).collect(Collectors.joining("->"));
}
public static String describe(PathNode propertyNode, Path pathInProperty) {
List<PathNode> nodes = Lists.newArrayList(propertyNode);
nodes.addAll(pathInProperty.getNodes());
return new Path(nodes).toString();
}
}
| 649 |
419 | <gh_stars>100-1000
/*
* Program to encode text using RSA public key.
*
* *** For Demonstration use only *****
*
*/
#include <stdio.h>
#include "miracl.h"
#include <stdlib.h>
#include <string.h>
void strip(char *name)
{ /* strip off filename extension */
int i;
for (i=0;name[i]!='\0';i++)
{
if (name[i]!='.') continue;
name[i]='\0';
break;
}
}
miracl *mip;
int main()
{ /* encode using public key */
big e,m,y,ke,mn,mx;
FILE *ifile;
FILE *ofile;
static char line[500];
static char buff[256];
char ifname[13],ofname[13];
BOOL fli,last;
int i,ipt,klen;
mip=mirsys(100,0);
e=mirvar(0);
m=mirvar(0);
y=mirvar(0);
ke=mirvar(0);
mn=mirvar(0);
mx=mirvar(0);
if ((ifile=fopen("public.key","rt"))==NULL)
{
printf("Unable to open file public.key\n");
return 0;
}
mip->IOBASE=16;
cinnum(ke,ifile);
fclose(ifile);
nroot(ke,3,mn);
multiply(mn,mn,m);
multiply(mn,m,mx);
subtract(mx,m,mx);
klen=0;
copy(mx,m);
while (size(m)>0)
{ /* find key length in characters */
klen++;
subdiv(m,128,m);
}
klen--;
printf("file to be encoded = ");
gets(ifname);
fli=FALSE;
if (strlen(ifname)>0) fli=TRUE;
if (fli)
{ /* set up input file */
strcpy(ofname,ifname);
strip(ofname);
strcat(ofname,".rsa");
if ((ifile=fopen(ifname,"rt"))==NULL)
{
printf("Unable to open file %s\n",ifname);
return 0;
}
printf("encoding message\n");
}
else
{ /* accept input from keyboard */
ifile=stdin;
do
{
printf("output filename = ");
gets(ofname);
} while (strlen(ofname)==0);
strip(ofname);
strcat(ofname,".rsa");
printf("input message - finish with cntrl z\n");
}
ofile=fopen(ofname,"wt");
ipt=0;
last=FALSE;
while (!last)
{ /* encode line by line */
if (fgets(&line[ipt],132,ifile)==NULL) last=TRUE;
if (line[ipt]==EOF) last=TRUE;
ipt=strlen(line);
if (ipt<klen && !last) continue;
while (ipt>=klen)
{ /* chop up into klen-sized chunks and encode */
for (i=0;i<klen;i++)
buff[i]=line[i];
buff[klen]='\0';
for (i=klen;i<=ipt;i++)
line[i-klen]=line[i];
ipt-=klen;
mip->IOBASE=128;
cinstr(m,buff);
power(m,3,ke,e);
mip->IOBASE=16;
cotnum(e,ofile);
}
if (last && ipt>0)
{ /* now deal with left overs */
mip->IOBASE=128;
cinstr(m,line);
if (mr_compare(m,mn)<0)
{ /* pad out with random number if necessary */
bigrand(mn,y);
multiply(mn,mn,e);
subtract(e,y,e);
multiply(mn,e,y);
add(m,y,m);
}
power(m,3,ke,e);
mip->IOBASE=16;
cotnum(e,ofile);
}
}
fclose(ofile);
if (fli) fclose(ifile);
return 0;
}
| 1,809 |
678 | /**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/MusicLibrary.framework/MusicLibrary
*/
#import <MusicLibrary/NSCopying.h>
#import <MusicLibrary/XXUnknownSuperclass.h>
#import <MusicLibrary/MusicLibrary-Structs.h>
@class NSData, NSMutableDictionary, NSDictionary, MLMovieProperties, MLContentRating, NSArray, NSString;
@interface MLTrackImport : XXUnknownSuperclass <NSCopying> {
@private
NSData *_artworkData; // 4 = 0x4
NSString *_assetFilePath; // 8 = 0x8
NSArray *_chapters; // 12 = 0xc
NSMutableDictionary *_properties; // 16 = 0x10
BOOL _shouldAddToPurchasedPlaylist; // 20 = 0x14
BOOL _shouldDeleteExistingArtwork; // 21 = 0x15
BOOL _assignArtworkCacheIDFromAlbum; // 22 = 0x16
}
@property(readonly, assign, nonatomic) NSDictionary *entityProperties; // G=0x3899d;
@property(assign, nonatomic) BOOL assignArtworkCacheIDFromAlbum; // G=0x390f5; S=0x39105; @synthesize=_assignArtworkCacheIDFromAlbum
@property(assign, nonatomic) BOOL shouldAddToPurchasedPlaylist; // G=0x39149; S=0x39159; @synthesize=_shouldAddToPurchasedPlaylist
@property(copy, nonatomic) MLMovieProperties *movieProperties; // G=0x38a25; S=0x38bbd;
@property(assign, nonatomic) unsigned long mediaType; // G=0x389d5; S=0x38b35;
@property(copy, nonatomic) MLContentRating *contentRating; // G=0x38921; S=0x38ae1;
@property(copy, nonatomic) NSArray *chapters; // G=0x39115; S=0x39125; @synthesize=_chapters
@property(copy, nonatomic) NSString *assetFilePath; // G=0x390c1; S=0x390d1; @synthesize=_assetFilePath
@property(assign, nonatomic) BOOL shouldDeleteExistingArtwork; // G=0x39169; S=0x39179; @synthesize=_shouldDeleteExistingArtwork
@property(copy, nonatomic) NSData *artworkData; // G=0x3908d; S=0x3909d; @synthesize=_artworkData
+ (void)initialize; // 0x386d1
// declared property setter: - (void)setShouldDeleteExistingArtwork:(BOOL)deleteExistingArtwork; // 0x39179
// declared property getter: - (BOOL)shouldDeleteExistingArtwork; // 0x39169
// declared property setter: - (void)setShouldAddToPurchasedPlaylist:(BOOL)addToPurchasedPlaylist; // 0x39159
// declared property getter: - (BOOL)shouldAddToPurchasedPlaylist; // 0x39149
// declared property setter: - (void)setChapters:(id)chapters; // 0x39125
// declared property getter: - (id)chapters; // 0x39115
// declared property setter: - (void)setAssignArtworkCacheIDFromAlbum:(BOOL)album; // 0x39105
// declared property getter: - (BOOL)assignArtworkCacheIDFromAlbum; // 0x390f5
// declared property setter: - (void)setAssetFilePath:(id)path; // 0x390d1
// declared property getter: - (id)assetFilePath; // 0x390c1
// declared property setter: - (void)setArtworkData:(id)data; // 0x3909d
// declared property getter: - (id)artworkData; // 0x3908d
- (void)_addSortStrings; // 0x38fe9
- (void)normalizeEntityProperties; // 0x38d9d
- (id)copyEntityProperties; // 0x38d7d
- (id)valueForEntityProperty:(id)entityProperty; // 0x38d35
- (void)setValue:(id)value forEntityProperty:(id)entityProperty; // 0x38cad
// declared property setter: - (void)setMovieProperties:(id)properties; // 0x38bbd
// declared property setter: - (void)setMediaType:(unsigned long)type; // 0x38b35
// declared property setter: - (void)setContentRating:(id)rating; // 0x38ae1
// declared property getter: - (id)movieProperties; // 0x38a25
// declared property getter: - (unsigned long)mediaType; // 0x389d5
// declared property getter: - (id)entityProperties; // 0x3899d
// declared property getter: - (id)contentRating; // 0x38921
- (id)copyWithZone:(NSZone *)zone; // 0x38815
- (void)dealloc; // 0x38649
- (id)init; // 0x38609
@end
| 1,358 |
319 | import torch
from torch import nn
from torch_scatter import scatter
from typing import Dict, List, Optional, Tuple, Union
from ptgnn.neuralmodels.gnn.messagepassing.abstractmessagepassing import (
AbstractMessageAggregation,
AbstractMessagePassingLayer,
)
from ptgnn.neuralmodels.mlp import MLP
class PnaMessageAggregation(AbstractMessageAggregation):
"""
Principal Neighbourhood Aggregation for Graph Nets
https://arxiv.org/abs/2004.05718
"""
def __init__(
self,
delta: float = 1,
):
super().__init__()
self._delta = delta # See Eq 5 of paper
def forward(self, messages: torch.Tensor, message_targets: torch.Tensor, num_nodes):
degree = scatter(
torch.ones_like(message_targets),
index=message_targets,
dim_size=num_nodes,
reduce="sum",
)
msg_dtype = messages.dtype
messages = messages.to(torch.float32)
sum_agg = scatter(messages, index=message_targets, dim=0, dim_size=num_nodes, reduce="sum")
mean_agg = sum_agg / (degree.unsqueeze(-1) + 1e-5)
max_agg = scatter(messages, index=message_targets, dim=0, dim_size=num_nodes, reduce="max")
min_agg = scatter(messages, index=message_targets, dim=0, dim_size=num_nodes, reduce="min")
std_components = torch.relu(messages.pow(2) - mean_agg[message_targets].pow(2)) + 1e-10
std = torch.sqrt(
scatter(std_components, index=message_targets, dim=0, dim_size=num_nodes, reduce="sum")
)
all_aggregations = torch.cat([sum_agg, mean_agg, max_agg, min_agg, std], dim=-1).to(
msg_dtype
)
scaler_p1 = torch.log(degree.float() + 1).unsqueeze(-1) / self._delta
scaler_m1 = 1 / (scaler_p1 + 1e-3)
return torch.cat(
[all_aggregations, all_aggregations * scaler_p1, all_aggregations * scaler_m1], dim=-1
)
def output_state_size(self, message_input_size: int) -> int:
return message_input_size * 5 * 3
| 908 |
351 | package com.promegu.xlog.base;
import java.lang.reflect.Constructor;
import java.lang.reflect.Member;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.List;
/**
* Created by guyacong on 2015/3/9.
*/
public class MethodToLog {
private int type;
private String className;
private String methodName;
private List<String> parameterClasses;
private List<String> parameterNames;
public MethodToLog(int type, String className, String methodName, List<String> parameterClasses,
List<String> parameterNames) {
this.type = type;
this.className = className;
this.methodName = methodName;
this.parameterClasses = parameterClasses;
this.parameterNames = parameterNames;
}
public int getType() {
return type;
}
public String getClassName() {
return className;
}
public String getName() {
return methodName;
}
public List<String> getParameterClasses() {
return parameterClasses;
}
public List<String> getParameterNames() {
return parameterNames;
}
public boolean isMethod() {
return type == XLogUtils.TYPE_METHOD;
}
public boolean isConstructor() {
return type == XLogUtils.TYPE_CONSTRUCTOR;
}
public boolean matchMethodOrConstructor(Member member) {
if (member instanceof Method) {
return matchMethod((Method) member);
} else if (member instanceof Constructor) {
return matchMethod((Constructor) member);
}
return false;
}
private boolean matchMethod(Method method) {
if (method == null) {
return false;
}
String otherClassName;
if (method.getDeclaringClass().getEnclosingClass() != null) {
// nested class
otherClassName = method.getDeclaringClass().getName().replaceAll("\\$", ".");
} else {
otherClassName = method.getDeclaringClass().getName();
}
//1. method name
if (methodName == null || !methodName.equals(method.getName())) {
return false;
}
//2. class name
if (className == null || !className.equals(otherClassName)) {
return false;
}
//3. parameter count
if (parameterClasses == null) {
return false;
}
if (parameterClasses.size() != method.getParameterTypes().length) {
return false;
}
//4. parameter types
for (int i = 0; i < parameterClasses.size(); i++) {
String str = parameterClasses.get(i);
if (str == null || !str.equals(method.getParameterTypes()[i].getName())) {
return false;
}
}
return true;
}
private boolean matchMethod(Constructor constructor) {
if (constructor == null) {
return false;
}
String otherClassName;
int paramOffset = 0;
if (constructor.getDeclaringClass().getEnclosingClass() != null) {
// nested class
otherClassName = constructor.getDeclaringClass().getName().replaceAll("\\$", ".");
if (Modifier.isStatic(constructor.getDeclaringClass().getModifiers())) {
//static nested class
paramOffset = 0;
} else {
// inner class
paramOffset = 1;
}
} else {
otherClassName = constructor.getDeclaringClass().getName();
}
//1. method name
if (!"<init>".equals(methodName)
|| constructor.getName() == null) {
return false;
}
//2. class name
if (className == null || !className.equals(otherClassName)) {
return false;
}
//3. parameter count
if (parameterClasses == null) {
return false;
}
if (parameterClasses.size() != (constructor.getParameterTypes().length - paramOffset)) {
return false;
}
//4. parameter types
for (int i = 0; i < parameterClasses.size(); i++) {
String str = parameterClasses.get(i);
if (str == null || !str
.equals(constructor.getParameterTypes()[i + paramOffset].getName())) {
return false;
}
}
return true;
}
@Override
public String toString() {
StringBuilder classesSb = new StringBuilder();
classesSb.append("[");
if (parameterClasses != null) {
for (int i = 0; i < parameterClasses.size(); i++) {
String className = parameterClasses.get(i);
classesSb.append("\"" + className + "\"");
if (i < parameterClasses.size() - 1) {
classesSb.append(",");
}
}
}
classesSb.append("]");
StringBuilder namesSb = new StringBuilder();
namesSb.append("[");
if (parameterNames != null) {
for (int i = 0; i < parameterNames.size(); i++) {
String className = parameterNames.get(i);
namesSb.append("\"" + className + "\"");
if (i < parameterNames.size() - 1) {
namesSb.append(",");
}
}
}
namesSb.append("]");
return "{"
+ "\"type\":" + type
+ ", \"className\":" + "\""
+ className + "\""
+ ", \"methodName\":" + "\"" + methodName + "\""
+ ", \"parameterClasses\":" + classesSb.toString()
+ ", \"parameterNames\":" + namesSb.toString()
+ '}';
}
}
| 2,710 |
308 | #include "init.hpp"
#include <vpp/sharedBuffer.hpp>
#include <ostream>
template<typename T>
std::ostream& operator<<(std::ostream& os, const vpp::BasicAllocation<T>& alloc)
{
os << "(" << alloc.offset << ", " << alloc.size << ")";
return os;
}
TEST(sharedBuf) {
auto& dev = *globals.device;
vpp::SharedBuffer buf(dev.devMemAllocator(),
{{}, 1024u, vk::BufferUsageBits::uniformBuffer});
auto alloc1 = buf.alloc(1000u);
EXPECT(alloc1.offset, 0u);
EXPECT(alloc1.size, 1000u);
auto alloc2 = buf.alloc(3u);
EXPECT(alloc2.size, 3u);
auto alloc3 = buf.alloc(7u);
EXPECT(alloc3.size, 7u);
buf.free(alloc1);
auto alloc4 = buf.alloc(231u);
EXPECT(alloc4.size, 231u);
buf.free(alloc2);
buf.free(alloc4);
auto alloc5 = buf.alloc(2100u);
EXPECT(alloc5.size, 0u);
buf.free(alloc3);
vpp::SubBuffer bufRange(buf, buf.alloc(100u));
EXPECT(&bufRange.buffer(), &buf);
EXPECT(bufRange.size(), 100u);
EXPECT(bufRange.offset(), 0u);
auto bufRange2 = std::move(bufRange);
EXPECT(&bufRange2.buffer(), &buf);
EXPECT(bufRange2.size(), 100u);
EXPECT(bufRange2.offset(), 0u);
// allocator
vpp::BufferAllocator bufAlloc(dev);
auto id0 = bufAlloc.reserve(10000u, vk::BufferUsageBits::uniformBuffer);
auto id1 = bufAlloc.reserve(7u, vk::BufferUsageBits::storageBuffer);
auto id2 = bufAlloc.reserve(100u, vk::BufferUsageBits::indexBuffer);
EXPECT(bufAlloc.buffers().size(), 0u);
auto usage = vk::BufferUsageBits::uniformBuffer |
vk::BufferUsageBits::storageBuffer;
for(auto i = 0u; i < 100; ++i) {
vpp::SubBuffer tmp(bufAlloc, 11000u, usage);
EXPECT(bufAlloc.buffers().size(), 1u);
}
bufAlloc.cancel(id0);
bufAlloc.cancel(id1);
bufAlloc.cancel(id2);
}
TEST(alignment) {
using Alloc = vpp::SharedBuffer::Allocation;
auto& dev = *globals.device;
vpp::SharedBuffer buf(dev.devMemAllocator(),
{{}, 2048u, vk::BufferUsageBits::uniformBuffer});
auto alloc1 = buf.alloc(230u, 64u);
EXPECT(alloc1, (Alloc{0, 230u}));
auto alloc2 = buf.alloc(200u, 128u);
EXPECT(alloc2, (Alloc{256u, 200u}));
auto alloc3 = buf.alloc(44u, 2u);
EXPECT(alloc3, (Alloc{456u, 44u}));
auto alloc4 = buf.alloc(2048, 0u);
EXPECT(alloc4.size, 0u);
auto alloc5 = buf.alloc(100u, 0u);
EXPECT(alloc5, (Alloc{500u, 100u}));
auto alloc6 = buf.alloc(100u, 1u);
EXPECT(alloc6, (Alloc{600u, 100u}));
auto alloc7 = buf.alloc(100u, 2048u);
EXPECT(alloc7.size, 0u);
buf.free(alloc1);
auto alloc8 = buf.alloc(100u, 2048u);
EXPECT(alloc8, (Alloc{0u, 100u}));
auto alloc9 = buf.alloc(99, 0u);
EXPECT(alloc9, (Alloc{100u, 99u}));
auto alloc10 = buf.alloc(1, 1);
EXPECT(alloc10, (Alloc{199u, 1u}));
buf.free(alloc2);
buf.free(alloc3);
buf.free(alloc5);
buf.free(alloc6);
buf.free(alloc8);
buf.free(alloc9);
buf.free(alloc10);
}
TEST(nonCoherentAtomAlign) {
using Alloc = vpp::SharedBuffer::Allocation;
auto& dev = *globals.device;
auto coherentBits = dev.memoryTypeBits(vk::MemoryPropertyBits::hostCoherent);
auto atomAlign = dev.properties().limits.nonCoherentAtomSize;
auto hostNonCoherent = dev.hostMemoryTypes() & ~coherentBits;
// not guaranteed to exist
if(!hostNonCoherent) {
return;
}
vpp::SharedBuffer buf(dev.devMemAllocator(),
{{}, 2048u, vk::BufferUsageBits::uniformBuffer}, hostNonCoherent);
vpp::SubBuffer range1(buf, buf.alloc(10u));
EXPECT(range1.allocation(), (Alloc{0u, 10u}));
vpp::SubBuffer range2(buf, buf.alloc(10u));
auto offset = vpp::align<vk::DeviceSize>(10u, atomAlign);
EXPECT(range2.allocation(), (Alloc{offset, 10u}));
vpp::SubBuffer range3(buf, buf.alloc(100u));
offset = vpp::align<vk::DeviceSize>(end(range2.allocation()), atomAlign);
EXPECT(range3.allocation(), (Alloc{offset, 100u}));
auto range4 = std::move(range3);
}
TEST(mappable) {
auto& dev = *globals.device;
auto& allocator = dev.bufferAllocator();
auto usage = vk::BufferUsageBits::vertexBuffer;
auto coherentBits = dev.memoryTypeBits(vk::MemoryPropertyBits::hostCoherent);
auto atomAlign = dev.properties().limits.nonCoherentAtomSize;
auto hostBits = dev.hostMemoryTypes();
// just allocate a mappable buffer
auto buf1 = vpp::SubBuffer(allocator, 100u, usage, hostBits);
EXPECT(buf1.allocation().size, 100u);
EXPECT(buf1.buffer().mappable(), true);
auto props = buf1.buffer().memory().properties();
auto coherent = (props & vk::MemoryPropertyBits::hostCoherent);
EXPECT(buf1.offset() % 128u, 0u);
if(!coherent) {
EXPECT(buf1.offset() % atomAlign, 0u);
}
// allocate mappable buffer on coherent memory
auto buf2 = vpp::SubBuffer{allocator, 1000u, usage, coherentBits};
EXPECT(buf2.allocation().size, 1000u);
EXPECT(buf2.buffer().mappable(), true);
auto type = buf2.buffer().memory().type();
EXPECT((coherentBits & (1 << type)) != 0, true);
auto mapView1 = buf2.memoryMap();
auto mapView2 = buf2.memoryMap();
auto mapView3 = buf2.memoryMap();
// allocate mappable buffer on non-coherent memory
// there might be vulkan implementations where are all hostVisible
// types are also hostCoherent, we cannot do this test there
auto hostNonCoherent = hostBits & ~coherentBits;
if(hostNonCoherent) {
auto buf3 = vpp::SubBuffer(allocator, 511u, usage, hostNonCoherent);
EXPECT(buf3.allocation().size, 511u);
EXPECT(buf3.buffer().mappable(), true);
type = buf3.buffer().memory().type();
EXPECT((~coherentBits & (1 << type)) != 0, true);
EXPECT(buf3.offset() % atomAlign, 0u);
}
}
TEST(defer) {
auto& dev = *globals.device;
auto& allocator = dev.bufferAllocator();
auto usage = vk::BufferUsageBits::uniformBuffer |
vk::BufferUsageBits::vertexBuffer;
auto hostBits = dev.hostMemoryTypes();
auto devBits = dev.deviceMemoryTypes();
std::array<vpp::SubBuffer::InitData, 6> data;
auto buf1 = vpp::SubBuffer(data[0], allocator, 3251u, usage, hostBits);
usage |= vk::BufferUsageBits::transferSrc;
auto buf2 = vpp::SubBuffer(data[1], allocator, 6431u, usage, devBits);
usage = vk::BufferUsageBits::transferDst;
auto buf3 = vpp::SubBuffer(data[2], allocator, 234u, usage, devBits, 32u);
usage |= vk::BufferUsageBits::storageBuffer;
auto buf4 = vpp::SubBuffer(data[3], allocator, 54u, usage, hostBits);
usage = vk::BufferUsageBits::storageTexelBuffer;
auto buf5 = vpp::SubBuffer(data[4], allocator, 53221u, usage, devBits);
buf1.init(data[0]);
buf2.init(data[1]);
buf3.init(data[2]);
EXPECT(buf1.size(), 3251u);
auto b1m = (1u << buf1.buffer().memory().type());
EXPECT(((b1m & hostBits) != 0), true);
EXPECT(buf2.size(), 6431u);
auto b2m = (1u << buf2.buffer().memory().type());
EXPECT(((b2m & devBits) != 0), true);
EXPECT(buf3.size(), 234u);
auto buf6 = vpp::SubBuffer(data[5], allocator, 2143u, usage, devBits, 2u);
buf4.init(data[3]);
buf5.init(data[4]);
buf6.init(data[5]);
EXPECT(buf4.size(), 54u);
EXPECT(buf5.size(), 53221u);
EXPECT(buf6.size(), 2143u);
}
| 2,732 |
6,557 | {
"ascending": "ascendente",
"ascendingSort": "Ordenar por coluna {columnName} em ordem ascendente",
"descending": "descendente",
"descendingSort": "Ordenar por coluna {columnName} em ordem descendente",
"select": "Selecionar",
"selectAll": "Selecionar tudo",
"sortable": "Coluna ordenável"
}
| 115 |
2,151 | <filename>third_party/blink/renderer/modules/presentation/presentation_connection_close_event.cc
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/presentation/presentation_connection_close_event.h"
#include "third_party/blink/renderer/modules/presentation/presentation_connection_close_event_init.h"
namespace blink {
PresentationConnectionCloseEvent::PresentationConnectionCloseEvent(
const AtomicString& event_type,
const String& reason,
const String& message)
: Event(event_type, Bubbles::kNo, Cancelable::kNo),
reason_(reason),
message_(message) {}
PresentationConnectionCloseEvent::PresentationConnectionCloseEvent(
const AtomicString& event_type,
const PresentationConnectionCloseEventInit& initializer)
: Event(event_type, initializer),
reason_(initializer.reason()),
message_(initializer.message()) {}
const AtomicString& PresentationConnectionCloseEvent::InterfaceName() const {
return EventNames::PresentationConnectionCloseEvent;
}
void PresentationConnectionCloseEvent::Trace(blink::Visitor* visitor) {
Event::Trace(visitor);
}
} // namespace blink
| 382 |
4,772 | <gh_stars>1000+
package example.repo;
import example.model.Customer925;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
public interface Customer925Repository extends CrudRepository<Customer925, Long> {
List<Customer925> findByLastName(String lastName);
}
| 91 |
3,083 | // Copyright 2011-2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.security.zynamics.binnavi.debug.debugger.synchronizers;
import com.google.security.zynamics.binnavi.Log.NaviLogger;
import com.google.security.zynamics.binnavi.debug.debugger.DebugExceptionWrapper;
import com.google.security.zynamics.binnavi.debug.debugger.interfaces.IDebugger;
import com.google.security.zynamics.binnavi.debug.models.processmanager.ProcessManagerListenerAdapter;
import com.google.security.zynamics.binnavi.debug.models.processmanager.TargetProcessThread;
import com.google.security.zynamics.binnavi.debug.models.processmanager.ThreadListenerAdapter;
import com.google.security.zynamics.binnavi.debug.models.processmanager.ThreadState;
import com.google.security.zynamics.binnavi.debug.models.processmanager.interfaces.ProcessManagerListener;
import com.google.security.zynamics.binnavi.debug.models.processmanager.interfaces.ThreadListener;
/**
* Synchronizes debug events with modeled thread states.
*/
public class ThreadStateSynchronizer {
/**
* Debugger used for synchronization.
*/
private final IDebugger debugger;
/**
* Keeps track of changes in thread states.
*/
private final ThreadListener m_threadListener = new ThreadListenerAdapter() {
@Override
public void stateChanged(final TargetProcessThread thread) {
if (thread.getState() == ThreadState.RUNNING) {
try {
debugger.resumeThread(thread.getThreadId());
} catch (final DebugExceptionWrapper exception) {
NaviLogger.severe("Error: Debugger could not resume thread. Exception %s", exception);
}
} else {
try {
debugger.suspendThread(thread.getThreadId());
} catch (final DebugExceptionWrapper exception) {
NaviLogger.severe("Error: Debugger could not suspend thread. Exception %s", exception);
}
}
}
};
/**
* Keeps track of relevant events in the synchronized process.
*/
private final ProcessManagerListener m_processListener = new ProcessManagerListenerAdapter() {
@Override
public void addedThread(final TargetProcessThread thread) {
thread.addListener(m_threadListener);
}
@Override
public void removedThread(final TargetProcessThread thread) {
thread.removeListener(m_threadListener);
}
};
/**
* Creates a new synchronizer object.
*
* @param debugger Debugger used for synchronization.
*/
public ThreadStateSynchronizer(final IDebugger debugger) {
this.debugger = debugger;
for (final TargetProcessThread thread : this.debugger.getProcessManager().getThreads()) {
thread.addListener(m_threadListener);
}
this.debugger.getProcessManager().addListener(m_processListener);
}
}
| 1,027 |
1,647 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.pinterest.secor.common;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Topic partition group describes a kafka message topic-partitions pair.
*
* @author <NAME> (<EMAIL>)
*/
public class TopicPartitionGroup {
private String mTopic;
private int[] mPartitions;
public TopicPartitionGroup(String topic, int[] partitions) {
mTopic = topic;
mPartitions = Arrays.copyOf(partitions, partitions.length);
}
public TopicPartitionGroup(TopicPartition tp) {
this(tp.getTopic(), new int[]{tp.getPartition()});
}
public String getTopic() {
return mTopic;
}
public int[] getPartitions() {
return mPartitions;
}
public List<TopicPartition> getTopicPartitions() {
List<TopicPartition> tps = new ArrayList<TopicPartition>();
for (int p : mPartitions) {
tps.add(new TopicPartition(mTopic, p));
}
return tps;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicPartitionGroup that = (TopicPartitionGroup) o;
if (!Arrays.equals(mPartitions, that.mPartitions)) return false;
if (mTopic != null ? !mTopic.equals(that.mTopic) : that.mTopic != null) return false;
return true;
}
@Override
public int hashCode() {
int result = mTopic != null ? mTopic.hashCode() : 0;
result = 31 * result + Arrays.hashCode(mPartitions);
return result;
}
@Override
public String toString() {
return "TopicPartitionGroup{" +
"mTopic='" + mTopic + '\'' +
", mPartitions=" + Arrays.toString(mPartitions) +
'}';
}
}
| 945 |
1,178 | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/types.h
* @brief Basic types and typedefs.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by <NAME> and <NAME>.
#ifndef _GLIBCXX_PARALLEL_TYPES_H
#define _GLIBCXX_PARALLEL_TYPES_H 1
#include <cstdlib>
#include <limits>
#include <tr1/cstdint>
namespace __gnu_parallel
{
// Enumerated types.
/// Run-time equivalents for the compile-time tags.
enum _Parallelism
{
/// Not parallel.
sequential,
/// Parallel unbalanced (equal-sized chunks).
parallel_unbalanced,
/// Parallel balanced (work-stealing).
parallel_balanced,
/// Parallel with OpenMP dynamic load-balancing.
parallel_omp_loop,
/// Parallel with OpenMP static load-balancing.
parallel_omp_loop_static,
/// Parallel with OpenMP taskqueue construct.
parallel_taskqueue
};
/// Strategies for run-time algorithm selection:
// force_sequential, force_parallel, heuristic.
enum _AlgorithmStrategy
{
heuristic,
force_sequential,
force_parallel
};
/// Sorting algorithms:
// multi-way mergesort, quicksort, load-balanced quicksort.
enum _SortAlgorithm
{
MWMS,
QS,
QS_BALANCED
};
/// Merging algorithms:
// bubblesort-alike, loser-tree variants, enum __sentinel.
enum _MultiwayMergeAlgorithm
{
LOSER_TREE
};
/// Partial sum algorithms: recursive, linear.
enum _PartialSumAlgorithm
{
RECURSIVE,
LINEAR
};
/// Sorting/merging algorithms: sampling, __exact.
enum _SplittingAlgorithm
{
SAMPLING,
EXACT
};
/// Find algorithms:
// growing blocks, equal-sized blocks, equal splitting.
enum _FindAlgorithm
{
GROWING_BLOCKS,
CONSTANT_SIZE_BLOCKS,
EQUAL_SPLIT
};
/**
* @brief Unsigned integer to index __elements.
* The total number of elements for each algorithm must fit into this type.
*/
typedef uint64_t _SequenceIndex;
/**
* @brief Unsigned integer to index a thread number.
* The maximum thread number (for each processor) must fit into this type.
*/
typedef uint16_t _ThreadIndex;
// XXX atomics interface?
/// Longest compare-and-swappable integer type on this platform.
typedef int64_t _CASable;
/// Number of bits of _CASable.
static const int _CASable_bits = std::numeric_limits<_CASable>::digits;
/// ::_CASable with the right half of bits set to 1.
static const _CASable _CASable_mask =
((_CASable(1) << (_CASable_bits / 2)) - 1);
}
#endif /* _GLIBCXX_PARALLEL_TYPES_H */
| 1,300 |
522 | package algs.example.gui.problems.nearestNeighbor.controller;
import java.awt.event.MouseEvent;
import java.awt.event.MouseMotionListener;
import algs.example.gui.canvas.ElementCanvas;
import algs.example.gui.generator.IOutput;
import algs.example.gui.problems.nearestNeighbor.model.Model;
import algs.model.nd.Hyperpoint;
import algs.model.twod.TwoDPoint;
/**
* Mouse Handler to interact with Element Canvas.
*
* @param <E> Underlying type of element to be drawn on the canvas.
*
* @author <NAME>
* @version 1.0, 6/15/08
* @since 1.0
*/
public class MouseHandler<E> extends java.awt.event.MouseAdapter implements MouseMotionListener {
/** Store the point of contact [AWT]. */
java.awt.Dimension offset;
/** interactive Output goes here. */
protected IOutput output;
/** Store the target canvas. */
protected ElementCanvas<E> canvas;
/** Model of entities. */
Model model;
public MouseHandler (ElementCanvas<E> c, IOutput output, Model model) {
this.canvas = c;
this.output = output;
this.model = model;
}
/** Ignore dragged events... */
public void mouseDragged(MouseEvent e){}
/** Automatically track and process nearest queries. */
public void mouseMoved(MouseEvent e) {
int x = e.getX();
int y = e.getY();
// we need to convert the AWT mouse coordinate into Cartesian coordinates
// so it can be comparable with the Cartesian coordinates in the model.
int ht = canvas.getHeight();
model.computeNearest(new Hyperpoint(new TwoDPoint(x,ht - y)));
if (model.getNearest() == null) { return; }
canvas.redrawState();
canvas.repaint();
}
}
| 553 |
777 | <filename>chrome/browser/devtools/devtools_protocol.h
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_DEVTOOLS_DEVTOOLS_PROTOCOL_H_
#define CHROME_BROWSER_DEVTOOLS_DEVTOOLS_PROTOCOL_H_
#include <memory>
#include <string>
#include "base/compiler_specific.h"
#include "base/values.h"
// Utility class for processing DevTools remote debugging messages.
class DevToolsProtocol {
public:
// Caller maintains ownership of |command|. |*params| is owned by |command|.
static bool ParseCommand(base::DictionaryValue* command,
int* command_id,
std::string* method,
base::DictionaryValue** params);
static bool ParseNotification(const std::string& json,
std::string* method,
std::unique_ptr<base::DictionaryValue>* params);
static bool ParseResponse(const std::string& json,
int* command_id,
int* error_code);
static std::string SerializeCommand(
int command_id,
const std::string& method,
std::unique_ptr<base::DictionaryValue> params);
static std::unique_ptr<base::DictionaryValue> CreateSuccessResponse(
int command_id,
std::unique_ptr<base::DictionaryValue> result);
static std::unique_ptr<base::DictionaryValue> CreateInvalidParamsResponse(
int command_id,
const std::string& param);
private:
DevToolsProtocol() {}
~DevToolsProtocol() {}
};
#endif // CHROME_BROWSER_DEVTOOLS_DEVTOOLS_PROTOCOL_H_
| 703 |
335 | {
"word": "Then",
"definitions": [
"At that time; at the time in question.",
"After that; next; afterwards.",
"Also; in addition.",
"In that case; therefore.",
"Used at the end of a sentence to emphasize an inference being drawn.",
"Used to finish off a conversation."
],
"parts-of-speech": "Adverb"
} | 145 |
595 | /******************************************************************************
* Copyright (c) 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
#ifndef XPFW_MOD_RPU_H_
#define XPFW_MOD_RPU_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Macros for RPU_0 Status, Cfg and standby mode masks */
#define RPU_0_CFG_REG 0xFF9A0100U
#define RPU_0_STATUS_REG 0xFF9A0104U
#define RUN_MODE_MASK 0x6U
#define RPU_HALT_MASK 0x1U
/* Mask to know RPU_0 is powered down */
#define RPU_POWER_UP_MASK 0x400U
/* Macros to indicate STL task started on PMU */
#define STL_STARTED 0x20000000U
#define CHECK_STL_STARTED 100U
#define XPFW_RPU_RUNMODE_TIME 100U
void ModRpuInit(void);
#ifdef __cplusplus
}
#endif
#endif /* XPFW_MOD_RPU_H_ */
| 324 |
1,738 | <filename>dev/Gems/EMotionFX/Code/Tests/UI/LY-92269.cpp<gh_stars>1000+
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include <Tests/UI/CommandRunnerFixture.h>
namespace EMotionFX
{
INSTANTIATE_TEST_CASE_P(LY92269, CommandRunnerFixture,
::testing::Values(std::vector<std::string> {
R"str(CreateAnimGraph)str",
R"str(AnimGraphCreateNode -animGraphID 0 -type {A8B5BB1E-5BA9-4B0A-88E9-21BB7A199ED2} -parentName Root -xPos 240 -yPos 230 -name GENERATE -namePrefix BlendTree)str",
R"str(AnimGraphCreateNode -animGraphID 0 -type {1A755218-AD9D-48EA-86FC-D571C11ECA4D} -parentName BlendTree0 -xPos 0 -yPos 0 -name GENERATE -namePrefix FinalNode)str",
R"str(AnimGraphCreateNode -animGraphID 0 -type {4510529A-323F-40F6-B773-9FA8FC4DE53D} -parentName BlendTree0 -xPos -120 -yPos 30 -name GENERATE -namePrefix Parameters)str",
R"str(AnimGraphCreateParameter -animGraphID 0 -type {2ED6BBAF-5C82-4EAA-8678-B220667254F2} -name Parameter0 -contents <ObjectStream version="3">
<Class name="FloatSliderParameter" version="1" type="{2ED6BBAF-5C82-4EAA-8678-B220667254F2}">
<Class name="FloatParameter" field="BaseClass1" version="1" type="{0F0B8531-0B07-4D9B-A8AC-3A32D15E8762}">
<Class name="(RangedValueParameter<ValueType, Derived>)<float FloatParameter >" field="BaseClass1" version="1" type="{01CABBF8-9500-5ABB-96BD-9989198146C2}">
<Class name="(DefaultValueParameter<ValueType, Derived>)<float (RangedValueParameter<ValueType, Derived>)<float FloatParameter > >" field="BaseClass1" version="1" type="{3221F118-9372-5BA3-BD8B-E88267CB356B}">
<Class name="ValueParameter" field="BaseClass1" version="1" type="{46549C79-6B4C-4DDE-A5E3-E5FBEC455816}">
<Class name="Parameter" field="BaseClass1" version="1" type="{4AF0BAFC-98F8-4EA3-8946-4AD87D7F2A6C}">
<Class name="AZStd::string" field="name" value="Parameter0" type="{03AAAB3F-5C47-5A66-9EBC-D5FA4DB353C9}"/>
<Class name="AZStd::string" field="description" value="" type="{03AAAB3F-5C47-5A66-9EBC-D5FA4DB353C9}"/>
</Class>
</Class>
<Class name="float" field="defaultValue" value="0.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
</Class>
<Class name="bool" field="hasMinValue" value="true" type="{A0CA880C-AFE4-43CB-926C-59AC48496112}"/>
<Class name="float" field="minValue" value="0.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
<Class name="bool" field="hasMaxValue" value="true" type="{A0CA880C-AFE4-43CB-926C-59AC48496112}"/>
<Class name="float" field="maxValue" value="1.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
</Class>
</Class>
</Class>
</ObjectStream>)str",
R"str(AnimGraphCreateParameter -animGraphID 0 -type {2ED6BBAF-5C82-4EAA-8678-B220667254F2} -name Parameter1 -contents <ObjectStream version="3">
<Class name="FloatSliderParameter" version="1" type="{2ED6BBAF-5C82-4EAA-8678-B220667254F2}">
<Class name="FloatParameter" field="BaseClass1" version="1" type="{0F0B8531-0B07-4D9B-A8AC-3A32D15E8762}">
<Class name="(RangedValueParameter<ValueType, Derived>)<float FloatParameter >" field="BaseClass1" version="1" type="{01CABBF8-9500-5ABB-96BD-9989198146C2}">
<Class name="(DefaultValueParameter<ValueType, Derived>)<float (RangedValueParameter<ValueType, Derived>)<float FloatParameter > >" field="BaseClass1" version="1" type="{3221F118-9372-5BA3-BD8B-E88267CB356B}">
<Class name="ValueParameter" field="BaseClass1" version="1" type="{46549C79-6B4C-4DDE-A5E3-E5FBEC455816}">
<Class name="Parameter" field="BaseClass1" version="1" type="{4AF0BAFC-98F8-4EA3-8946-4AD87D7F2A6C}">
<Class name="AZStd::string" field="name" value="Parameter1" type="{03AAAB3F-5C47-5A66-9EBC-D5FA4DB353C9}"/>
<Class name="AZStd::string" field="description" value="" type="{03AAAB3F-5C47-5A66-9EBC-D5FA4DB353C9}"/>
</Class>
</Class>
<Class name="float" field="defaultValue" value="0.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
</Class>
<Class name="bool" field="hasMinValue" value="true" type="{A0CA880C-AFE4-43CB-926C-59AC48496112}"/>
<Class name="float" field="minValue" value="0.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
<Class name="bool" field="hasMaxValue" value="true" type="{A0CA880C-AFE4-43CB-926C-59AC48496112}"/>
<Class name="float" field="maxValue" value="1.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>
</Class>
</Class>
</Class>
</ObjectStream>)str",
R"str(AnimGraphRemoveParameter -animGraphID 0 -name Parameter0)str",
R"str(UNDO)str",
R"str(AnimGraphAdjustNode -animGraphID 0 -name Parameters0 -attributesString -parameterNames {<ObjectStream version="3">
<Class name="AZStd::vector" type="{99DAD0BC-740E-5E82-826B-8FC7968CC02C}">
<Class name="AZStd::string" field="element" value="Parameter1" type="{03AAAB3F-5C47-5A66-9EBC-D5FA4DB353C9}"/>
</Class>
</ObjectStream>
})str",
R"str(AnimGraphRemoveParameter -animGraphID 0 -name Parameter1)str"
}
));
} // end namespace EMotionFX
| 3,459 |
2,023 | <filename>recipes/Python/577136_IFS_fractals/recipe-577136.py
# IFS fractals
# FB - 201003221
from PIL import Image
import random
### Fractint IFS definition of Fern
##mat=[[0.0,0.0,0.0,0.16,0.0,0.0,0.01],
## [0.85,0.04,-0.04,0.85,0.0,1.6,0.85],
## [0.2,-0.26,0.23,0.22,0.0,1.6,0.07],
## [-0.15,0.28,0.26,0.24,0.0,0.44,0.07]]
### Fractint IFS definition of Dragon
##mat = [[0.824074, 0.281482, -0.212346, 0.864198, -1.882290, -0.110607, 0.787473],
## [0.088272, 0.520988, -0.463889, -0.377778, 0.785360, 8.095795, 0.212527]]
### Levy C curve
##mat = [[0.5, -0.5, 0.5, 0.5, 0.0, 0.0, 0.5],
## [0.5, 0.5, -0.5, 0.5, 0.5, 0.5, 0.5]]
# Levy Dragon
mat = [[0.5, -0.5, 0.5, 0.5, 0.0, 0.0, 0.5],
[-0.5, -0.5, 0.5, -0.5, 1.0, 0.0, 0.5]]
# image size
imgx = 512
imgy = 512 # will be auto-re-adjusted
m = len(mat)
# find the xmin, xmax, ymin, ymax
x = mat[0][4]
y = mat[0][5]
#
xa = x
xb = x
ya = y
yb = y
#
for k in range(imgx * imgy):
p=random.random()
psum = 0.0
for i in range(m):
psum += mat[i][6]
if p <= psum:
break
x0 = x * mat[i][0] + y * mat[i][1] + mat[i][4]
y = x * mat[i][2] + y * mat[i][3] + mat[i][5]
x = x0
#
if x < xa:
xa = x
if x > xb:
xb = x
if y < ya:
ya = y
if y > yb:
yb = y
# drawing
imgy = round(imgy * (yb - ya) / (xb - xa)) # auto-re-adjust the aspect ratio
image = Image.new("L", (imgx, imgy))
x=0.0
y=0.0
for k in range(imgx * imgy):
p=random.random()
psum = 0.0
for i in range(m):
psum += mat[i][6]
if p <= psum:
break
x0 = x * mat[i][0] + y * mat[i][1] + mat[i][4]
y = x * mat[i][2] + y * mat[i][3] + mat[i][5]
x = x0
jx = int((x - xa) / (xb - xa) * (imgx - 1))
jy = (imgy - 1) - int((y - ya) / (yb - ya) * (imgy - 1))
image.putpixel((jx, jy), 255)
image.save("IFS_.png", "PNG")
| 1,122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.