max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
5,305 |
class I<T> {}
class J {
<T> I<T> foo(T x) {
return new I<T>();
}
<T> I<T>[] bar(T x) {
Object[] r = new Object[]{new I<T>()};
return (I<T>[]) r;
}
}
| 95 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Murlin","circ":"1ère circonscription","dpt":"Nièvre","inscrits":61,"abs":27,"votants":34,"blancs":3,"nuls":0,"exp":31,"res":[{"nuance":"FN","nom":"<NAME>","voix":19},{"nuance":"REM","nom":"<NAME>","voix":12}]} | 106 |
3,428 | {"id":"02059","group":"easy-ham-1","checksum":{"type":"MD5","value":"e7cb2f48474335029d7eb951c82722f4"},"text":"From <EMAIL> Mon Sep 30 13:43:38 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy<EMAIL>assassin.taint.org\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id 5697C16F16\n\tfor <jm@localhost>; Mon, 30 Sep 2002 13:43:38 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Mon, 30 Sep 2002 13:43:38 +0100 (IST)\nReceived: from dogma.slashnull.org (localhost [127.0.0.1]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g8U80Xg21180 for\n <<EMAIL>>; Mon, 30 Sep 2002 09:00:33 +0100\nMessage-Id: <<EMAIL>>\nTo: [email protected]\nFrom: boingboing <<EMAIL>>\nSubject: Turkey City Lexicon\nDate: Mon, 30 Sep 2002 08:00:32 -0000\nContent-Type: text/plain; encoding=utf-8\n\nURL: http://boingboing.net/#85506002\nDate: Not supplied\n\nAfter the talk at UT Austin, I spent Saturday at the Turkey City science \nfiction writers' workshop at Bruce Sterling's place. Turkey City is a venerable \nscience fiction workshop that has spawned many good writers and a lexicon of \nscience fiction critical terms that is the de facto standard for understanding \nwhat works and what doesn't in a work of science fiction: \n\n Squid on the Mantelpiece \n\n Chekhov said that if there are dueling pistols over the mantelpiece in the \n first act, they should be fired in the third. In other words, a plot \n element should be deployed in a timely fashion and with proper dramatic \n emphasis. However, in SF plotting the MacGuffins are often so overwhelming \n that they cause conventional plot structures to collapse. It's hard to \n properly dramatize, say, the domestic effects of Dad's bank overdraft when \n a giant writhing kraken is levelling the city. This mismatch between the \n conventional dramatic proprieties and SF's extreme, grotesque, or visionary \n thematics is known as the \"squid on the mantelpiece.\" \n\n Card Tricks in the Dark \n\n Elaborately contrived plot which arrives at (a) the punchline of a private \n joke no reader will get or (b) the display of some bit of learned trivia \n relevant only to the author. This stunt may be intensely ingenious, and \n very gratifying to the author, but it serves no visible fictional purpose. \n (Attr. <NAME>) \n\nI had the cold from hell all weekend and I'm jetlagged, but I wanted to get \nsome links up before I hit the sack. Until tomorrow! Link[1] Discuss[2]\n\n[1] http://www.sfwa.org/writing/turkeycity.html\n[2] http://www.quicktopic.com/boing/H/cgivZf3AAhKkk\n\n\n"} | 915 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-wx77-rp39-c6vg",
"modified": "2022-03-24T22:10:13Z",
"published": "2020-09-04T15:11:03Z",
"aliases": [
],
"summary": "Regular Expression Denial of Service in markdown",
"details": "All versions of `markdown` are vulnerable to Regular Expression Denial of Service (ReDoS). The `markdown.toHTML()` function has significantly degraded performance when parsing long strings containing underscores. This may lead to Denial of Service if the parser accepts user input.\n\n\n## Recommendation\n\nNo fix is currently available. Consider using an alternative package until a fix is made available.",
"severity": [
],
"affected": [
{
"package": {
"ecosystem": "npm",
"name": "markdown"
},
"ranges": [
{
"type": "ECOSYSTEM",
"events": [
{
"introduced": "0.0.0"
}
]
}
]
}
],
"references": [
{
"type": "WEB",
"url": "https://www.npmjs.com/advisories/1330"
},
{
"type": "PACKAGE",
"url": "https://github.com/evilstreak/markdown-js"
}
],
"database_specific": {
"cwe_ids": [
"CWE-400"
],
"severity": "LOW",
"github_reviewed": true
}
} | 585 |
373 | <filename>cas/src/main/java/com/dianrong/common/uniauth/cas/filter/ThreadLocalTagCleanFilter.java
package com.dianrong.common.uniauth.cas.filter;
import com.dianrong.common.uniauth.cas.helper.StaffNoPersistTagHolder;
import com.dianrong.common.uniauth.cas.model.vo.ApiResponse;
import com.dianrong.common.uniauth.cas.model.vo.ResponseCode;
import com.dianrong.common.uniauth.cas.util.UniBundleUtil;
import com.dianrong.common.uniauth.cas.util.WebScopeUtil;
import com.dianrong.common.uniauth.common.util.JsonUtil;
import java.io.IOException;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.MessageSource;
/**
* 清空各种ThreadLocal信息.
*
* @author wanglin
*/
@Slf4j
public class ThreadLocalTagCleanFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
chain.doFilter(request, response);
} finally {
StaffNoPersistTagHolder.remove();
}
}
@Override
public void destroy() {
}
}
| 514 |
435 | package datawave.audit;
import com.google.common.collect.Lists;
import java.util.List;
import datawave.webservice.query.QueryImpl;
import org.apache.commons.lang.math.IntRange;
import org.junit.Assert;
import org.junit.Test;
public class SplitSelectorExtractorTest {
@Test
public void extractSelectorsLuceneQuery1() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
QueryImpl q = new QueryImpl();
q.setQuery("selector1");
List<String> selectorList = extractor.extractSelectors(q);
List<String> expected = Lists.newArrayList("selector1");
Assert.assertEquals(expected, selectorList);
}
@Test
public void extractSelectorsLuceneQuery2() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
extractor.setSeparatorCharacter(";");
QueryImpl q = new QueryImpl();
q.setQuery("selector1;selector2;selector3");
List<String> selectorList = extractor.extractSelectors(q);
List<String> expected = Lists.newArrayList("selector1", "selector2", "selector3");
Assert.assertEquals(expected, selectorList);
}
@Test
public void extractSelectorsLuceneQuery3() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
extractor.setSeparatorCharacter("\0");
QueryImpl q = new QueryImpl();
q.setQuery("selector1\0selector2\0selector3");
List<String> selectorList = extractor.extractSelectors(q);
List<String> expected = Lists.newArrayList("selector1", "selector2", "selector3");
Assert.assertEquals(expected, selectorList);
}
@Test
public void extractSelectorsLuceneQuery4() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
extractor.setSeparatorParameter("delimiter");
QueryImpl q = new QueryImpl();
q.addParameter("delimiter", ",");
q.setQuery("selector1,selector2,selector3");
List<String> selectorList = extractor.extractSelectors(q);
List<String> expected = Lists.newArrayList("selector1", "selector2", "selector3");
Assert.assertEquals(expected, selectorList);
}
@Test
public void rangeTest1() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges("0-2");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 0));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 1));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 2));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 3));
}
@Test
public void rangeTest2() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges("0-2,4");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 2));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 3));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 4));
}
@Test
public void rangeTest3() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges("2,4");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 2));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 3));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 4));
}
@Test
public void rangeTest4() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges("2,4,6-");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 2));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 3));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 4));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 5));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 6));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 100));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 1000));
}
@Test
public void rangeTest5() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges(" 2, 4 , 6- ");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 2));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 3));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 4));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 5));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 6));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 100));
Assert.assertTrue(extractor.useSplit(useSplitRanges, 1000));
}
@Test
public void rangeTest6() {
SplitSelectorExtractor extractor = new SplitSelectorExtractor();
List<IntRange> useSplitRanges = extractor.parseUseSplitsRanges("0");
Assert.assertTrue(extractor.useSplit(useSplitRanges, 0));
Assert.assertFalse(extractor.useSplit(useSplitRanges, 1));
}
}
| 2,093 |
2,151 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef HEADLESS_LIB_BROWSER_HEADLESS_CONTENT_BROWSER_CLIENT_H_
#define HEADLESS_LIB_BROWSER_HEADLESS_CONTENT_BROWSER_CLIENT_H_
#include "content/public/browser/content_browser_client.h"
#include "headless/lib/browser/headless_resource_dispatcher_host_delegate.h"
#include "headless/public/headless_browser.h"
namespace headless {
class HeadlessBrowserImpl;
class HeadlessContentBrowserClient : public content::ContentBrowserClient {
public:
explicit HeadlessContentBrowserClient(HeadlessBrowserImpl* browser);
~HeadlessContentBrowserClient() override;
// content::ContentBrowserClient implementation:
content::BrowserMainParts* CreateBrowserMainParts(
const content::MainFunctionParams&) override;
void OverrideWebkitPrefs(content::RenderViewHost* render_view_host,
content::WebPreferences* prefs) override;
content::DevToolsManagerDelegate* GetDevToolsManagerDelegate() override;
std::unique_ptr<base::Value> GetServiceManifestOverlay(
base::StringPiece name) override;
void RegisterOutOfProcessServices(OutOfProcessServiceMap* services) override;
content::QuotaPermissionContext* CreateQuotaPermissionContext() override;
void GetQuotaSettings(
content::BrowserContext* context,
content::StoragePartition* partition,
storage::OptionalQuotaSettingsCallback callback) override;
#if defined(OS_POSIX) && !defined(OS_MACOSX)
void GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
content::PosixFileDescriptorInfo* mappings) override;
#endif
void AppendExtraCommandLineSwitches(base::CommandLine* command_line,
int child_process_id) override;
void AllowCertificateError(
content::WebContents* web_contents,
int cert_error,
const net::SSLInfo& ssl_info,
const GURL& request_url,
content::ResourceType resource_type,
bool strict_enforcement,
bool expired_previous_decision,
const base::Callback<void(content::CertificateRequestResultType)>&
callback) override;
void SelectClientCertificate(
content::WebContents* web_contents,
net::SSLCertRequestInfo* cert_request_info,
net::ClientCertIdentityList client_certs,
std::unique_ptr<content::ClientCertificateDelegate> delegate) override;
void ResourceDispatcherHostCreated() override;
net::NetLog* GetNetLog() override;
bool AllowGetCookie(const GURL& url,
const GURL& first_party,
const net::CookieList& cookie_list,
content::ResourceContext* context,
int render_process_id,
int render_frame_id) override;
bool AllowSetCookie(const GURL& url,
const GURL& first_party,
const net::CanonicalCookie& cookie,
content::ResourceContext* context,
int render_process_id,
int render_frame_id,
const net::CookieOptions& options) override;
bool DoesSiteRequireDedicatedProcess(content::BrowserContext* browser_context,
const GURL& effective_site_url) override;
private:
std::unique_ptr<base::Value> GetBrowserServiceManifestOverlay();
std::unique_ptr<base::Value> GetRendererServiceManifestOverlay();
std::unique_ptr<base::Value> GetPackagedServicesServiceManifestOverlay();
HeadlessBrowserImpl* browser_; // Not owned.
// We store the callback here because we may call it from the I/O thread.
HeadlessBrowser::Options::AppendCommandLineFlagsCallback
append_command_line_flags_callback_;
std::unique_ptr<HeadlessResourceDispatcherHostDelegate>
resource_dispatcher_host_delegate_;
DISALLOW_COPY_AND_ASSIGN(HeadlessContentBrowserClient);
};
} // namespace headless
#endif // HEADLESS_LIB_BROWSER_HEADLESS_CONTENT_BROWSER_CLIENT_H_
| 1,535 |
1,155 | <reponame>alexhenrie/poedit
/********************************************************************
* COPYRIGHT:
* Copyright (c) 2000-2016, International Business Machines Corporation and
* others. All Rights Reserved.
********************************************************************/
/*
* File stdnmtst.c
*
* Modification History:
*
* Date Name Description
* 08/05/2000 Yves Creation
******************************************************************************
*/
#include "unicode/ucnv.h"
#include "unicode/ustring.h"
#include "cmemory.h"
#include "cstring.h"
#include "cintltst.h"
static void TestStandardName(void);
static void TestStandardNames(void);
static void TestCanonicalName(void);
void addStandardNamesTest(TestNode** root);
void
addStandardNamesTest(TestNode** root)
{
addTest(root, &TestStandardName, "tsconv/stdnmtst/TestStandardName");
addTest(root, &TestStandardNames, "tsconv/stdnmtst/TestStandardNames");
addTest(root, &TestCanonicalName, "tsconv/stdnmtst/TestCanonicalName");
}
static int dotestname(const char *name, const char *standard, const char *expected) {
int res = 1;
UErrorCode error;
const char *tag;
error = U_ZERO_ERROR;
tag = ucnv_getStandardName(name, standard, &error);
if (!tag && expected) {
log_err_status(error, "FAIL: could not find %s standard name for %s\n", standard, name);
res = 0;
} else if (expected && (name == tag || uprv_strcmp(expected, tag))) {
log_err("FAIL: expected %s for %s standard name for %s, got %s\n", expected, standard, name, tag);
res = 0;
}
return res;
}
static void TestStandardName()
{
int res = 1;
uint16_t i, count;
UErrorCode err;
/* Iterate over all standards. */
for (i = 0, count = ucnv_countStandards(); i < count-1; ++i) {
const char *standard;
err = U_ZERO_ERROR;
standard = ucnv_getStandard(i, &err);
if (U_FAILURE(err)) {
log_err("FAIL: ucnv_getStandard(%d), error=%s\n", i, u_errorName(err));
res = 0;
} else if (!standard || !*standard) {
log_err("FAIL: %s standard name at index %d\n", (standard ? "empty" :
"null"), i);
res = 0;
}
}
err = U_ZERO_ERROR;
/* "" must be last */
if(!count) {
log_data_err("No standards. You probably have no data.\n");
} else if (*ucnv_getStandard((uint16_t)(count-1), &err) != 0) {
log_err("FAIL: ucnv_getStandard(%d) should return ""\n", count-1);
res = 0;
}
err = U_ZERO_ERROR;
if (ucnv_getStandard(++i, &err)) {
log_err("FAIL: ucnv_getStandard(%d) should return NULL\n", i);
res = 0;
}
if (res) {
log_verbose("PASS: iterating over standard names works\n");
}
/* Test for some expected results. */
if (dotestname("ibm-1208", "MIME", "UTF-8") &&
/*dotestname("cp1252", "MIME", "windows-1252") &&*/
dotestname("ascii", "MIME", "US-ASCII") &&
dotestname("csiso2022jp2", "MIME", "ISO-2022-JP-2") &&
dotestname("Iso20-22__cN", "IANA", "ISO-2022-CN") &&
dotestname("ascii", "IANA", "ANSI_X3.4-1968") &&
dotestname("cp850", "IANA", "IBM850") &&
dotestname("crazy", "MIME", NULL) &&
dotestname("ASCII", "crazy", NULL) &&
dotestname("LMBCS-1", "MIME", NULL))
{
log_verbose("PASS: getting IANA and MIME standard names works\n");
}
}
static int dotestconv(const char *name, const char *standard, const char *expected) {
int res = 1;
UErrorCode error;
const char *tag;
error = U_ZERO_ERROR;
tag = ucnv_getCanonicalName(name, standard, &error);
if (tag && !expected) {
log_err("FAIL: Unexpectedly found %s canonical name for %s, got %s\n", standard, name, tag);
res = 0;
}
else if (!tag && expected) {
log_err_status(error, "FAIL: could not find %s canonical name for %s\n", (standard ? "\"\"" : standard), name);
res = 0;
}
else if (expected && (name == tag || uprv_strcmp(expected, tag) != 0)) {
log_err("FAIL: expected %s for %s canonical name for %s, got %s\n", expected, standard, name, tag);
res = 0;
}
else {
log_verbose("PASS: (\"%s\", \"%s\") -> %s == %s \n", name, standard, tag, expected);
}
return res;
}
static void TestCanonicalName()
{
/* Test for some expected results. */
if (dotestconv("UTF-8", "IANA", "UTF-8") && /* default name */
dotestconv("UTF-8", "MIME", "UTF-8") && /* default name */
dotestconv("ibm-1208", "IBM", "UTF-8") && /* default name */
dotestconv("ibm-5305", "IBM", "UTF-8") && /* non-default name */
dotestconv("ibm-5305", "MIME", NULL) && /* mapping does not exist */
dotestconv("ascii", "MIME", NULL) && /* mapping does not exist */
dotestconv("ibm-1208", "IANA", NULL) && /* mapping does not exist */
dotestconv("ibm-5305", "IANA", NULL) && /* mapping does not exist */
dotestconv("cp1208", "", "UTF-8") && /* default name due to ordering */
dotestconv("UTF16_BigEndian", "", "UTF-16BE") && /* non-default name due to ordering */
dotestconv("ISO-2022-CN", "IANA", "ISO_2022,locale=zh,version=0") &&/* default name */
dotestconv("Shift_JIS", "MIME", "ibm-943_P15A-2003") &&/* ambiguous alias */
dotestconv("Shift_JIS", "", "ibm-943_P130-1999") &&/* ambiguous alias */
dotestconv("ibm-943", "", "ibm-943_P15A-2003") &&/* ambiguous alias */
dotestconv("ibm-943", "IBM", "ibm-943_P130-1999") &&/* ambiguous alias */
dotestconv("ibm-1363", "", "ibm-1363_P11B-1998") &&/* ambiguous alias */
dotestconv("ibm-1363", "IBM", "ibm-1363_P110-1997") &&/* ambiguous alias */
dotestconv("crazy", "MIME", NULL) &&
dotestconv("ASCII", "crazy", NULL))
{
log_verbose("PASS: getting IANA and MIME canonical names works\n");
}
}
static UBool doTestNames(const char *name, const char *standard, const char **expected, int32_t size) {
UErrorCode err = U_ZERO_ERROR;
UEnumeration *myEnum = ucnv_openStandardNames(name, standard, &err);
const char *enumName, *testName;
int32_t enumCount = uenum_count(myEnum, &err);
int32_t idx, len, repeatTimes = 3;
if (err == U_FILE_ACCESS_ERROR) {
log_data_err("Unable to open standard names for %s of standard: %s\n", name, standard);
return 0;
}
if (size != enumCount) {
log_err("FAIL: different size arrays for %s. Got %d. Expected %d\n", name, enumCount, size);
return 0;
}
if (size < 0 && myEnum) {
log_err("FAIL: size < 0, but recieved an actual object\n");
return 0;
}
log_verbose("\n%s %s\n", name, standard);
while (repeatTimes-- > 0) {
for (idx = 0; idx < enumCount; idx++) {
enumName = uenum_next(myEnum, &len, &err);
testName = expected[idx];
if (uprv_strcmp(enumName, testName) != 0 || U_FAILURE(err)
|| len != (int32_t)uprv_strlen(expected[idx]))
{
log_err("FAIL: uenum_next(%d) == \"%s\". expected \"%s\", len=%d, error=%s\n",
idx, enumName, testName, len, u_errorName(err));
}
log_verbose("%s\n", enumName);
err = U_ZERO_ERROR;
}
if (enumCount >= 0) {
/* one past the list of all names must return NULL */
enumName = uenum_next(myEnum, &len, &err);
if (enumName != NULL || len != 0 || U_FAILURE(err)) {
log_err("FAIL: uenum_next(past the list) did not return NULL[0] with U_SUCCESS(). name=%s standard=%s len=%d err=%s\n", name, standard, len, u_errorName(err));
}
}
log_verbose("\n reset\n");
uenum_reset(myEnum, &err);
if (U_FAILURE(err)) {
log_err("FAIL: uenum_reset() for %s{%s} failed with %s\n",
name, standard, u_errorName(err));
err = U_ZERO_ERROR;
}
}
uenum_close(myEnum);
return 1;
}
static UBool doTestUCharNames(const char *name, const char *standard, const char **expected, int32_t size) {
UErrorCode err = U_ZERO_ERROR;
UEnumeration *myEnum = ucnv_openStandardNames(name, standard, &err);
int32_t enumCount = uenum_count(myEnum, &err);
int32_t idx, repeatTimes = 3;
if (err == U_FILE_ACCESS_ERROR) {
log_data_err("Unable to open standard names for %s of standard: %s\n", name, standard);
return 0;
}
if (size != enumCount) {
log_err("FAIL: different size arrays. Got %d. Expected %d\n", enumCount, size);
return 0;
}
if (size < 0 && myEnum) {
log_err("FAIL: size < 0, but recieved an actual object\n");
return 0;
}
log_verbose("\n%s %s\n", name, standard);
while (repeatTimes-- > 0) {
for (idx = 0; idx < enumCount; idx++) {
UChar testName[256];
int32_t len;
const UChar *enumName = uenum_unext(myEnum, &len, &err);
u_uastrncpy(testName, expected[idx], UPRV_LENGTHOF(testName));
if (u_strcmp(enumName, testName) != 0 || U_FAILURE(err)
|| len != (int32_t)uprv_strlen(expected[idx]))
{
log_err("FAIL: uenum_next(%d) == \"%s\". expected \"%s\", len=%d, error=%s\n",
idx, enumName, testName, len, u_errorName(err));
}
log_verbose("%s\n", expected[idx]);
err = U_ZERO_ERROR;
}
log_verbose("\n reset\n");
uenum_reset(myEnum, &err);
if (U_FAILURE(err)) {
log_err("FAIL: uenum_reset() for %s{%s} failed with %s\n",
name, standard, u_errorName(err));
err = U_ZERO_ERROR;
}
}
uenum_close(myEnum);
return 1;
}
static void TestStandardNames()
{
static const char *asciiIANA[] = {
"ANSI_X3.4-1968",
"US-ASCII",
"ASCII",
"ANSI_X3.4-1986",
"ISO_646.irv:1991",
"ISO646-US",
"us",
"csASCII",
"iso-ir-6",
"cp367",
"IBM367",
};
static const char *asciiMIME[] = {
"US-ASCII"
};
static const char *iso2022MIME[] = {
"ISO-2022-KR",
};
doTestNames("ASCII", "IANA", asciiIANA, UPRV_LENGTHOF(asciiIANA));
doTestNames("US-ASCII", "IANA", asciiIANA, UPRV_LENGTHOF(asciiIANA));
doTestNames("ASCII", "MIME", asciiMIME, UPRV_LENGTHOF(asciiMIME));
doTestNames("ascii", "mime", asciiMIME, UPRV_LENGTHOF(asciiMIME));
doTestNames("ASCII", "crazy", asciiMIME, -1);
doTestNames("crazy", "MIME", asciiMIME, -1);
doTestNames("LMBCS-1", "MIME", asciiMIME, 0);
doTestNames("ISO_2022,locale=ko,version=0", "MIME", iso2022MIME, UPRV_LENGTHOF(iso2022MIME));
doTestNames("csiso2022kr", "MIME", iso2022MIME, UPRV_LENGTHOF(iso2022MIME));
log_verbose(" Testing unext()\n");
doTestUCharNames("ASCII", "IANA", asciiIANA, UPRV_LENGTHOF(asciiIANA));
}
| 5,193 |
369 | <filename>products/BellHybrid/apps/application-bell-onboarding/windows/OnBoardingInstructionPromptWindow.hpp
// Copyright (c) 2017-2021, Mudit<NAME>.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include <common/windows/BellFinishedWindow.hpp>
namespace gui
{
namespace OnBoarding::Information
{
inline constexpr auto icon_top_margin = 40U;
inline constexpr auto text_w = 380U;
} // namespace OnBoarding::Information
class OnBoardingInstructionPromptWindow : public BellFinishedWindow
{
public:
OnBoardingInstructionPromptWindow(app::ApplicationCommon *app, const std::string &name);
bool onInput(const InputEvent &inputEvent) override;
void buildInterface() override;
};
} // namespace gui
| 297 |
5,133 | /*
* Copyright MapStruct Authors.
*
* Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package org.mapstruct.ap.test.bugs._537;
import org.mapstruct.MapperConfig;
/**
* @author <NAME>
*/
@MapperConfig(uses = ReferenceMapper.class)
public interface Issue537MapperConfig {
}
| 110 |
897 | from server import WSSHBridge
__version__ = '0.1.0'
| 21 |
1,209 | <gh_stars>1000+
/*
u8g_com_linux_ssd_i2c.c
com interface for linux i2c-dev and the SSDxxxx chip (SOLOMON) variant
I2C protocol
Universal 8bit Graphics Library
Copyright (c) 2012, <EMAIL>
Copyright (c) 2015, <EMAIL>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "u8g.h"
#if defined(U8G_LINUX)
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <linux/i2c-dev.h>
#define I2C_SLA 0x3c
#define I2C_CMD_MODE 0x80
#define I2C_DATA_MODE 0x40
#define MAX_PACKET 64
#ifndef U8G_WITH_PINLIST
#error U8G_WITH_PINLIST is mandatory for this driver
#endif
static void set_cmd_mode(u8g_t *u8g, bool cmd_mode)
{
u8g->pin_list[U8G_PI_A0_STATE] = cmd_mode;
}
static bool get_cmd_mode(u8g_t *u8g)
{
return u8g->pin_list[U8G_PI_A0_STATE];
}
static uint8_t send_data_burst(u8g_t *u8g, int fd, uint8_t *buf, size_t buflen)
{
uint8_t i2cbuf[2*MAX_PACKET];
uint8_t i2clen;
int res;
/* ignore bursts when there is no file open */
if (fd < 0)
return 0;
if (get_cmd_mode(u8g)) {
i2clen = 0;
while (buflen > 0) {
i2cbuf[i2clen++] = I2C_CMD_MODE;
i2cbuf[i2clen++] = *buf++;
buflen--;
}
} else {
i2cbuf[0] = I2C_DATA_MODE;
memcpy(i2cbuf+1, buf, buflen);
i2clen = buflen + 1;
}
res = write(fd, i2cbuf, i2clen);
if (res < 0)
fprintf(stderr, "I2C write failed (%s)\n", strerror(errno));
else if (res != i2clen)
fprintf(stderr, "Incomplete I2C write (%d of %d packet)\n", res, i2clen);
return res == i2clen;
}
uint8_t u8g_com_linux_ssd_i2c_fn(u8g_t *u8g, uint8_t msg, uint8_t arg_val, void *arg_ptr)
{
static int fd = -1;
char dev[24];
switch(msg)
{
case U8G_COM_MSG_INIT:
sprintf(dev, "/dev/i2c-%d", u8g->pin_list[U8G_PI_I2C_OPTION]);
fd = open(dev, O_RDWR);
if (fd < 0) {
fprintf(stderr, "cannot open %s (%s)\n", dev, strerror(errno));
return 0;
}
if (ioctl(fd, I2C_SLAVE, I2C_SLA) < 0) {
fprintf(stderr, "cannot set slave address (%s)\n", strerror(errno));
return 0;
}
break;
case U8G_COM_MSG_STOP:
/* ignored - i2c-dev will automatically stop between writes */
break;
case U8G_COM_MSG_RESET:
/* ignored - no obvious means to reset an SSD via I2C */
break;
case U8G_COM_MSG_CHIP_SELECT:
set_cmd_mode(u8g, true);
break;
case U8G_COM_MSG_WRITE_BYTE:
send_data_burst(u8g, fd, &arg_val, 1);
break;
case U8G_COM_MSG_WRITE_SEQ:
case U8G_COM_MSG_WRITE_SEQ_P: /* no progmem in Linux */
while (arg_val > MAX_PACKET) {
send_data_burst(u8g, fd, arg_ptr, MAX_PACKET);
arg_ptr += MAX_PACKET;
arg_val -= MAX_PACKET;
}
send_data_burst(u8g, fd, arg_ptr, arg_val);
break;
case U8G_COM_MSG_ADDRESS:
/* choose cmd (arg_val = 0) or data mode (arg_val = 1) */
set_cmd_mode(u8g, !arg_val);
break;
}
return 1;
}
#endif /* U8G_LINUX */
| 1,912 |
1,963 | <filename>variants/STM32U5xx/U575Z(G-I)TxQ_U585ZETxQ/variant_NUCLEO_U575ZI_Q.cpp
/*
*******************************************************************************
* Copyright (c) 2021, STMicroelectronics
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*******************************************************************************
*/
#if defined(ARDUINO_NUCLEO_U575ZI_Q)
#include "pins_arduino.h"
// Digital PinName array
const PinName digitalPin[] = {
PG_8, // D0
PG_7, // D1
PF_15, // D2/A9
PE_13, // D3
PF_14, // D4/A10
PE_11, // D5
PE_9, // D6
PF_13, // D7
PF_12, // D8
PD_15, // D9
PD_14, // D10
PA_7, // D11/A11
PA_6, // D12/A12
PA_5, // D13/A13
PB_9, // D14
PB_8, // D15
PC_6, // D16
PD_11, // D17/A14
PB_13, // D18
PD_12, // D19/A15
PA_4, // D20/A16
PB_4, // D21
PB_5, // D22
PB_3, // D23
PA_4, // D24
PB_4, // D25
PA_2, // D26
PB_10, // D27
PE_15, // D28
PB_0, // D29
PE_12, // D30
PE_14, // D31
PA_0, // D32/A17
PA_8, // D33
PE_0, // D34
PB_11, // D35
PB_10, // D36
PE_15, // D37
PE_14, // D38
PE_12, // D39
PE_10, // D40
PE_7, // D41
PE_8, // D42
PC_8, // D43
PC_9, // D44
PC_10, // D45
PC_11, // D46
PC_12, // D47
PD_2, // D48
PF_3, // D49
PF_5, // D50
PD_7, // D51
PD_6, // D52
PD_5, // D53
PD_4, // D54
PD_3, // D55
PE_2, // D56
PE_4, // D57
PE_5, // D58
PE_6, // D59
PE_3, // D60
PF_8, // D61
PF_7, // D62
PF_9, // D63
PG_1, // D64/A18
PG_0, // D65/A19
PD_1, // D66
PD_0, // D67
PF_0, // D68
PF_1, // D69
PF_2, // D70
PB_6, // D71
PB_2, // D72/A20
PA_3, // D73/A0
PA_2, // D74/A1
PC_3, // D75/A2
PB_0, // D76/A3
PC_1, // D77/A4
PC_0, // D78/A5
PB_1, // D79/A6
PC_2, // D80/A7
PA_1, // D81/A8
PA_9, // D82
PA_10, // D83
PA_11, // D84
PA_12, // D85
PA_13, // D86
PA_14, // D87
PA_15, // D88
PB_7, // D89
PB_14, // D90
PB_15, // D91
PC_7, // D92
PC_13, // D93
PC_14, // D94
PC_15, // D95
PD_8, // D96
PD_9, // D97
PD_10, // D98
PD_13, // D99/A21
PE_1, // D100
PF_4, // D101
PF_6, // D102
PF_10, // D103
PF_11, // D104
PG_2, // D105
PG_3, // D106
PG_4, // D107
PG_5, // D108
PG_6, // D109
PG_9, // D110
PG_10, // D111
PG_12, // D112
PG_13, // D113
PG_14, // D114
PG_15, // D115
PH_0, // D116
PH_1, // D117
PH_3 // D118
};
// Analog (Ax) pin number array
const uint32_t analogInputPin[] = {
73, // A0, PA3
74, // A1, PA2
75, // A2, PC3
76, // A3, PB0
77, // A4, PC1
78, // A5, PC0
79, // A6, PB1
80, // A7, PC2
81, // A8, PA1
2, // A9, PF15
4, // A10, PF14
11, // A11, PA7
12, // A12, PA6
13, // A13, PA5
17, // A14, PD11
19, // A15, PD12
20, // A16, PA4
32, // A17, PA0
64, // A18, PG1
65, // A19, PG0
72, // A20, PB2
99 // A21, PD13
};
// ----------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
/** System Clock Configuration
*/
WEAK void SystemClock_Config(void)
{
RCC_OscInitTypeDef RCC_OscInitStruct = {};
RCC_ClkInitTypeDef RCC_ClkInitStruct = {};
RCC_PeriphCLKInitTypeDef PeriphClkInit = {};
RCC_CRSInitTypeDef RCC_CRSInitStruct = {};
/** Configure the main internal regulator output voltage
*/
if (HAL_PWREx_ControlVoltageScaling(PWR_REGULATOR_VOLTAGE_SCALE1) != HAL_OK) {
Error_Handler();
}
/** Configure LSE Drive Capability
*/
HAL_PWR_EnableBkUpAccess();
__HAL_RCC_LSEDRIVE_CONFIG(RCC_LSEDRIVE_LOW);
/** Initializes the CPU, AHB and APB busses clocks
*/
RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI48 | RCC_OSCILLATORTYPE_HSI
| RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_MSI;
RCC_OscInitStruct.LSEState = RCC_LSE_ON;
RCC_OscInitStruct.HSIState = RCC_HSI_ON;
RCC_OscInitStruct.HSI48State = RCC_HSI48_ON;
RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT;
RCC_OscInitStruct.MSIState = RCC_MSI_ON;
RCC_OscInitStruct.MSICalibrationValue = RCC_MSICALIBRATION_DEFAULT;
RCC_OscInitStruct.MSIClockRange = RCC_MSIRANGE_0;
RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON;
RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_MSI;
RCC_OscInitStruct.PLL.PLLMBOOST = RCC_PLLMBOOST_DIV4;
RCC_OscInitStruct.PLL.PLLM = 3;
RCC_OscInitStruct.PLL.PLLN = 10;
RCC_OscInitStruct.PLL.PLLP = 2;
RCC_OscInitStruct.PLL.PLLQ = 2;
RCC_OscInitStruct.PLL.PLLR = 1;
RCC_OscInitStruct.PLL.PLLRGE = RCC_PLLVCIRANGE_1;
RCC_OscInitStruct.PLL.PLLFRACN = 0;
if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) {
Error_Handler();
}
/** Initializes the CPU, AHB and APB busses clocks
*/
RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK
| RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2
| RCC_CLOCKTYPE_PCLK3;
RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK;
RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1;
RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1;
RCC_ClkInitStruct.APB3CLKDivider = RCC_HCLK_DIV1;
if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_4) != HAL_OK) {
Error_Handler();
}
PeriphClkInit.PeriphClockSelection = RCC_PERIPHCLK_ADCDAC | RCC_PERIPHCLK_DAC1
| RCC_PERIPHCLK_CLK48;
PeriphClkInit.AdcDacClockSelection = RCC_ADCDACCLKSOURCE_HSI;
PeriphClkInit.Dac1ClockSelection = RCC_DAC1CLKSOURCE_LSE;
PeriphClkInit.Clk48ClockSelection = RCC_CLK48CLKSOURCE_HSI48;
if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInit) != HAL_OK) {
Error_Handler();
}
/** Enable the SYSCFG APB clock
*/
__HAL_RCC_CRS_CLK_ENABLE();
/** Configures CRS
*/
RCC_CRSInitStruct.Prescaler = RCC_CRS_SYNC_DIV1;
RCC_CRSInitStruct.Source = RCC_CRS_SYNC_SOURCE_USB;
RCC_CRSInitStruct.Polarity = RCC_CRS_SYNC_POLARITY_RISING;
RCC_CRSInitStruct.ReloadValue = __HAL_RCC_CRS_RELOADVALUE_CALCULATE(48000000, 1000);
RCC_CRSInitStruct.ErrorLimitValue = 34;
RCC_CRSInitStruct.HSI48CalibrationValue = 32;
HAL_RCCEx_CRSConfig(&RCC_CRSInitStruct);
}
#ifdef __cplusplus
}
#endif
#endif /* ARDUINO_NUCLEO_U575ZI_Q */
| 3,320 |
700 | /*
File: HelperTool.h
Abstract: The main object in the helper tool.
Version: 1.0
Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple
Inc. ("Apple") in consideration of your agreement to the following
terms, and your use, installation, modification or redistribution of
this Apple software constitutes acceptance of these terms. If you do
not agree with these terms, please do not use, install, modify or
redistribute this Apple software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, Apple grants you a personal, non-exclusive
license, under Apple's copyrights in this original Apple software (the
"Apple Software"), to use, reproduce, modify and redistribute the Apple
Software, with or without modifications, in source and/or binary forms;
provided that if you redistribute the Apple Software in its entirety and
without modifications, you must retain this notice and the following
text and disclaimers in all such redistributions of the Apple Software.
Neither the name, trademarks, service marks or logos of Apple Inc. may
be used to endorse or promote products derived from the Apple Software
without specific prior written permission from Apple. Except as
expressly stated in this notice, no other rights or licenses, express or
implied, are granted by Apple herein, including but not limited to any
patent rights that may be infringed by your derivative works or by other
works in which the Apple Software may be incorporated.
The Apple Software is provided by Apple on an "AS IS" basis. APPLE
MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Copyright (C) 2013 Apple Inc. All Rights Reserved.
*/
#import <Foundation/Foundation.h>
// kHelperToolMachServiceName is the Mach service name of the helper tool. Note that the value
// here has to match the value in the MachServices dictionary in "HelperTool-Launchd.plist".
#define kHelperToolMachServiceName @"com.example.apple-samplecode.EBAS.HelperTool"
// HelperToolProtocol is the NSXPCConnection-based protocol implemented by the helper tool
// and called by the app.
@protocol HelperToolProtocol
@required
- (void)connectWithEndpointReply:(void(^)(NSXPCListenerEndpoint * endpoint))reply;
// Not used by the standard app (it's part of the sandboxed XPC service support).
- (void)getVersionWithReply:(void(^)(NSString * version))reply;
// This command simply returns the version number of the tool. It's a good idea to include a
// command line this so you can handle app upgrades cleanly.
// The next two commands imagine an app that needs to store a license key in some global location
// that's not writable by all users; thus, setting the license key requires elevated privileges.
// To manage this there's a 'read' command--which by default can be used by everyone--to return
// the key and a 'write' command--which requires admin authentication--to set the key.
- (void)readLicenseKeyAuthorization:(NSData *)authData withReply:(void(^)(NSError * error, NSString * licenseKey))reply;
// Reads the current license key. authData must be an AuthorizationExternalForm embedded
// in an NSData.
- (void)writeLicenseKey:(NSString *)licenseKey authorization:(NSData *)authData withReply:(void(^)(NSError * error))reply;
// Writes a new license key. licenseKey is the new license key string. authData must be
// an AuthorizationExternalForm embedded in an NSData.
- (void)bindToLowNumberPortAuthorization:(NSData *)authData withReply:(void(^)(NSError * error, NSFileHandle * ipv4Handle, NSFileHandle * ipv6Handle))reply;
// This command imagines an app that contains an embedded web server. A web server has to
// bind to port 80, which is a privileged operation. This command lets the app request that
// the privileged helper tool create sockets bound to port 80 and then pass them back to the
// app, thereby minimising the amount of code that has to run with elevated privileges.
// authData must be an AuthorizationExternalForm embedded in an NSData and the sockets are
// returned wrapped up in NSFileHandles.
@end
// The following is the interface to the class that implements the helper tool.
// It's called by the helper tool's main() function, but not by the app directly.
@interface HelperTool : NSObject
- (id)init;
- (void)run;
@end
| 1,377 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_forms.hxx"
#include "clipboarddispatcher.hxx"
#include <editeng/editview.hxx>
/** === begin UNO includes === **/
#include <com/sun/star/lang/DisposedException.hpp>
/** === end UNO includes === **/
#include <svtools/cliplistener.hxx>
#include <svtools/transfer.hxx>
//........................................................................
namespace frm
{
//........................................................................
using namespace ::com::sun::star::uno;
using namespace ::com::sun::star::frame;
using namespace ::com::sun::star::lang;
using namespace ::com::sun::star::util;
using namespace ::com::sun::star::beans;
//====================================================================
namespace
{
static URL createClipboardURL( OClipboardDispatcher::ClipboardFunc _eFunc )
{
URL aURL;
switch ( _eFunc )
{
case OClipboardDispatcher::eCut:
aURL.Complete = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( ".uno:Cut" ) );
break;
case OClipboardDispatcher::eCopy:
aURL.Complete = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( ".uno:Copy" ) );
break;
case OClipboardDispatcher::ePaste:
aURL.Complete = ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( ".uno:Paste" ) );
break;
}
return aURL;
}
}
//====================================================================
//= OClipboardDispatcher
//====================================================================
//--------------------------------------------------------------------
OClipboardDispatcher::OClipboardDispatcher( EditView& _rView, ClipboardFunc _eFunc )
:ORichTextFeatureDispatcher( _rView, createClipboardURL( _eFunc ) )
,m_eFunc( _eFunc )
,m_bLastKnownEnabled( sal_True )
{
}
//--------------------------------------------------------------------
sal_Bool OClipboardDispatcher::implIsEnabled( ) const
{
sal_Bool bEnabled = sal_False;
switch ( m_eFunc )
{
case eCut:
bEnabled = !getEditView()->IsReadOnly() && getEditView()->HasSelection();
break;
case eCopy:
bEnabled = getEditView()->HasSelection();
break;
case ePaste:
bEnabled = !getEditView()->IsReadOnly();
break;
}
return bEnabled;
}
//--------------------------------------------------------------------
FeatureStateEvent OClipboardDispatcher::buildStatusEvent() const
{
FeatureStateEvent aEvent( ORichTextFeatureDispatcher::buildStatusEvent() );
aEvent.IsEnabled = implIsEnabled();
return aEvent;
}
//--------------------------------------------------------------------
void OClipboardDispatcher::invalidateFeatureState_Broadcast()
{
sal_Bool bEnabled = implIsEnabled();
if ( m_bLastKnownEnabled == bEnabled )
// nothing changed -> no notification
return;
m_bLastKnownEnabled = bEnabled;
ORichTextFeatureDispatcher::invalidateFeatureState_Broadcast();
}
//--------------------------------------------------------------------
void SAL_CALL OClipboardDispatcher::dispatch( const URL& /*_rURL*/, const Sequence< PropertyValue >& /*Arguments*/ ) throw (RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
if ( !getEditView() )
throw DisposedException();
switch ( m_eFunc )
{
case eCut:
getEditView()->Cut();
break;
case eCopy:
getEditView()->Copy();
break;
case ePaste:
getEditView()->Paste();
break;
}
}
//====================================================================
//= OPasteClipboardDispatcher
//====================================================================
//--------------------------------------------------------------------
OPasteClipboardDispatcher::OPasteClipboardDispatcher( EditView& _rView )
:OClipboardDispatcher( _rView, ePaste )
,m_pClipListener( NULL )
,m_bPastePossible( sal_False )
{
m_pClipListener = new TransferableClipboardListener( LINK( this, OPasteClipboardDispatcher, OnClipboardChanged ) );
m_pClipListener->acquire();
m_pClipListener->AddRemoveListener( _rView.GetWindow(), sal_True );
// initial state
TransferableDataHelper aDataHelper( TransferableDataHelper::CreateFromSystemClipboard( _rView.GetWindow() ) );
m_bPastePossible = ( aDataHelper.HasFormat( SOT_FORMAT_STRING ) || aDataHelper.HasFormat( SOT_FORMAT_RTF ) );
}
//--------------------------------------------------------------------
OPasteClipboardDispatcher::~OPasteClipboardDispatcher()
{
if ( !isDisposed() )
{
acquire();
dispose();
}
}
//--------------------------------------------------------------------
IMPL_LINK( OPasteClipboardDispatcher, OnClipboardChanged, TransferableDataHelper*, _pDataHelper )
{
OSL_ENSURE( _pDataHelper, "OPasteClipboardDispatcher::OnClipboardChanged: ooops!" );
m_bPastePossible = _pDataHelper->HasFormat( SOT_FORMAT_STRING )
|| _pDataHelper->HasFormat( SOT_FORMAT_RTF );
invalidate();
return 0L;
}
//--------------------------------------------------------------------
void OPasteClipboardDispatcher::disposing( ::osl::ClearableMutexGuard& _rClearBeforeNotify )
{
OSL_ENSURE( getEditView() && getEditView()->GetWindow(), "OPasteClipboardDispatcher::disposing: EditView should not (yet) be disfunctional here!" );
if ( getEditView() && getEditView()->GetWindow() && m_pClipListener )
m_pClipListener->AddRemoveListener( getEditView()->GetWindow(), sal_False );
m_pClipListener->release();
m_pClipListener = NULL;
OClipboardDispatcher::disposing( _rClearBeforeNotify );
}
//--------------------------------------------------------------------
sal_Bool OPasteClipboardDispatcher::implIsEnabled( ) const
{
return m_bPastePossible && OClipboardDispatcher::implIsEnabled();
}
//........................................................................
} // namespace frm
//........................................................................
| 2,690 |
2,962 | #!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import config
import thread_cert
from pktverify.packet_filter import PacketFilter
# Test description:
# The purpose of this test is to verify a MED will inform its previous parent when re-attaches to another parent.
#
# Initial Topology:
#
# LEADER ----- ROUTER
# |
# MED
#
LEADER = 1
ROUTER = 2
MED = 3
LONG_CHILD_TIMEOUT = 120
class TestReset(thread_cert.TestCase):
SUPPORT_NCP = False
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'allowlist': [ROUTER, MED]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'allowlist': [LEADER]
},
MED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'allowlist': [LEADER],
'timeout': LONG_CHILD_TIMEOUT,
},
}
def test(self):
if 'posix' in os.getenv('OT_CLI_PATH', ''):
self.skipTest("skip for posix tests")
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(7)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MED].start()
self.simulator.go(7)
self.assertEqual(self.nodes[MED].get_state(), 'child')
self.assertIsChildOf(MED, LEADER)
self.nodes[LEADER].remove_allowlist(self.nodes[MED].get_addr64())
self.nodes[MED].remove_allowlist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_allowlist(self.nodes[MED].get_addr64())
self.nodes[MED].add_allowlist(self.nodes[ROUTER].get_addr64())
self.nodes[MED].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
self.simulator.go(config.DEFAULT_CHILD_TIMEOUT * 2)
self.assertIsChildOf(MED, ROUTER)
# Verify MED is not in the LEADER's Child Table.
med_extaddr = self.nodes[MED].get_addr64()
self.assertFalse(any(info['extaddr'] == med_extaddr for info in self.nodes[LEADER].get_child_table().values()))
self.collect_ipaddrs()
self.collect_rlocs()
def verify(self, pv):
pkts: PacketFilter = pv.pkts
pv.summary.show()
MED = pv.vars['MED']
LEADER_RLOC = pv.vars['LEADER_RLOC']
# MED should attach to LEADER first.
pv.verify_attached('MED', 'LEADER', child_type='MTD')
# MED should re-attach to ROUTER.
pv.verify_attached('MED', 'ROUTER', child_type='MTD')
# MED should send empty IPv6 message to inform previous parent (LEADER).
pkts.filter_wpan_src64(MED).filter('lowpan.dst == {LEADER_RLOC} and lowpan.next == 0x3b',
LEADER_RLOC=LEADER_RLOC).must_next()
def assertIsChildOf(self, childid, parentid):
childRloc16 = self.nodes[childid].get_addr16()
parentRloc16 = self.nodes[parentid].get_addr16()
self.assertEqual(parentRloc16 & 0xfc00, parentRloc16)
self.assertEqual(childRloc16 & 0xfc00, parentRloc16)
child_extaddr = self.nodes[childid].get_addr64()
self.assertTrue(
any(info['extaddr'] == child_extaddr for info in self.nodes[parentid].get_child_table().values()))
if __name__ == '__main__':
unittest.main()
| 2,112 |
483 | <filename>nitrite-replication/src/test/java/org/dizitart/no2/sync/crdt/LastWriteWinStateTest.java
package org.dizitart.no2.sync.crdt;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class LastWriteWinStateTest {
@Test
public void testConstructor() {
assertEquals("LastWriteWinState(changes=[], tombstones={})", (new LastWriteWinState()).toString());
}
}
| 153 |
678 | /**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/OfficeImport.framework/OfficeImport
*/
#import <OfficeImport/OfficeImport-Structs.h>
#import <OfficeImport/WBTable.h>
#import <OfficeImport/XXUnknownSuperclass.h>
__attribute__((visibility("hidden")))
@interface WBTable : XXUnknownSuperclass {
}
+ (void)readFrom:(id)from textRuns:(id)runs table:(id)table; // 0x13ee55
+ (BOOL)tryToReadRowFrom:(id)from textRuns:(id)runs to:(id)to; // 0x14126d
@end
@interface WBTable (Private)
+ (void)initPropertiesFrom:(id)from to:(id)to in:(id)anIn; // 0x13f095
+ (void)readRowFrom:(id)from textRuns:(id)runs to:(id)to; // 0x13f845
+ (BOOL)isTableFloating:(const WrdTableProperties *)floating tracked:(const WrdTableProperties *)tracked; // 0x1417c1
@end
| 296 |
407 | <filename>src/plugin_system/plugin_interface_base.cpp
#include "hal_core/plugin_system/plugin_interface_base.h"
#include "hal_core/plugin_system/plugin_interface_cli.h"
#include "hal_core/plugin_system/plugin_interface_gui.h"
#include "hal_core/plugin_system/plugin_interface_ui.h"
#include "hal_core/utilities/log.h"
namespace hal
{
void BasePluginInterface::initialize()
{
}
void BasePluginInterface::on_load()
{
}
void BasePluginInterface::on_unload()
{
}
std::set<std::string> BasePluginInterface::get_dependencies() const
{
return {};
}
void BasePluginInterface::initialize_logging()
{
LogManager& l = LogManager::get_instance();
l.add_channel(get_name(), {LogManager::create_stdout_sink(), LogManager::create_file_sink(), LogManager::create_gui_sink()}, "info");
}
bool BasePluginInterface::has_type(PluginInterfaceType t) const
{
return (t == PluginInterfaceType::base) || (t == PluginInterfaceType::cli && dynamic_cast<const CLIPluginInterface*>(this)) || (t == PluginInterfaceType::interactive_ui && dynamic_cast<const UIPluginInterface*>(this))
|| (t == PluginInterfaceType::gui && dynamic_cast<const GUIPluginInterface*>(this));
return false;
}
} // namespace hal
| 481 |
795 | <filename>tests/test_di_nocopy.c
/* Copyright (c) 2017 - 2021 LiteSpeed Technologies Inc. See LICENSE. */
/*
* Test the "nocopy" data in stream
*/
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/queue.h>
#ifdef WIN32
#include "getopt.h"
#else
#include <unistd.h>
#endif
#include "lsquic.h"
#include "lsquic_int_types.h"
#include "lsquic_sfcw.h"
#include "lsquic_rtt.h"
#include "lsquic_conn_flow.h"
#include "lsquic_varint.h"
#include "lsquic_hq.h"
#include "lsquic_hash.h"
#include "lsquic_stream.h"
#include "lsquic_conn.h"
#include "lsquic_conn_public.h"
#include "lsquic_malo.h"
#include "lsquic_packet_common.h"
#include "lsquic_packet_in.h"
#include "lsquic_packet_out.h"
#include "lsquic_mm.h"
#include "lsquic_logger.h"
#include "lsquic_data_in_if.h"
struct nocopy_test
{
int lineno;
/* Setup: initial set of frames to insert and read until some offset */
unsigned n_init_frames;
struct data_frame initial_frames[5];
unsigned read_until;
/* Test: data frame to insert and expected insert result */
struct data_frame data_frame;
enum ins_frame ins;
};
#define F(off, size, fin) { .df_offset = (off), .df_fin = (fin), .df_size = (size), }
static const struct nocopy_test tests[] =
{
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 300, 0), },
.read_until = 300,
.data_frame = F(200, 100, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 300, 0), F(300, 100, 0), },
.read_until = 300,
.data_frame = F(200, 100, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 300, 0), F(300, 0, 1), },
.read_until = 300,
.data_frame = F(200, 100, 1),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 301, 0), },
.read_until = 301,
.data_frame = F(200, 100, 1),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 400, 0), },
.read_until = 301,
.data_frame = F(200, 100, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(200, 100, 1), },
.read_until = 0,
.data_frame = F(200, 50, 1),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(200, 100, 1), },
.read_until = 0,
.data_frame = F(200, 150, 1),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(200, 100, 1), },
.read_until = 0,
.data_frame = F(200, 101, 0),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(200, 100, 1), },
.read_until = 0,
.data_frame = F(500, 1, 0),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 0), },
.read_until = 100,
.data_frame = F(0, 100, 1),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 1), },
.read_until = 100,
.data_frame = F(0, 100, 1),
.ins = INS_FRAME_DUP,
},
/* TODO: Case 'F' and 'L' -- remove "case 'F'" */
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 0), },
.read_until = 100,
.data_frame = F(0, 100, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 1), },
.read_until = 10,
.data_frame = F(0, 100, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 0), },
.read_until = 10,
.data_frame = F(0, 100, 1),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 0), },
.read_until = 100,
.data_frame = F(100, 0, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 0), },
.read_until = 0,
.data_frame = F(50, 100, 0),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 1), },
.read_until = 0,
.data_frame = F(50, 100, 0),
.ins = INS_FRAME_ERR,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(100, 100, 0), },
.read_until = 0,
.data_frame = F(50, 100, 0),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(100, 100, 0), },
.read_until = 0,
.data_frame = F(50, 100, 1),
.ins = INS_FRAME_OVERLAP, /* This is really an error,
* but we ignore it.
*/
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(100, 100, 1), },
.read_until = 0,
.data_frame = F(50, 100, 0),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 1,
.initial_frames = { F(0, 100, 1), },
.read_until = 60,
.data_frame = F(50, 2, 0),
.ins = INS_FRAME_DUP,
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 100, 0), F(200, 100, 0), },
.read_until = 0,
.data_frame = F(50, 200, 0),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 100, 0), F(200, 100, 0), },
.read_until = 0,
.data_frame = F(100, 100, 0),
.ins = INS_FRAME_OK,
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 100, 0), F(200, 100, 0), },
.read_until = 0,
.data_frame = F(100, 100, 1),
.ins = INS_FRAME_OK, /* Ignore another error */
},
{ .lineno = __LINE__,
.n_init_frames = 2,
.initial_frames = { F(0, 60, 0), F(60, 60, 0), },
.read_until = 120,
.data_frame = F(0, 180, 0),
.ins = INS_FRAME_OVERLAP,
},
{ .lineno = __LINE__,
.n_init_frames = 3,
.initial_frames = { F(0, 60, 0), F(60, 60, 0), F(180, 60, 0), },
.read_until = 120,
.data_frame = F(0, 180, 0),
.ins = INS_FRAME_OVERLAP,
},
};
static void
run_di_nocopy_test (const struct nocopy_test *test)
{
struct lsquic_mm mm;
struct lsquic_conn_public conn_pub;
struct lsquic_conn conn;
struct stream_frame *frame;
struct data_in *di;
struct data_frame *data_frame;
enum ins_frame ins;
unsigned i;
unsigned nread, n_to_read;
LSQ_NOTICE("running test on line %d", test->lineno);
lsquic_mm_init(&mm);
memset(&conn, 0, sizeof(conn));
conn_pub.lconn = &conn;
conn_pub.mm = &mm;
di = lsquic_data_in_nocopy_new(&conn_pub, 3);
for (i = 0; i < test->n_init_frames; ++i)
{
frame = lsquic_malo_get(mm.malo.stream_frame);
frame->packet_in = lsquic_mm_get_packet_in(&mm);
frame->packet_in->pi_refcnt = 1;
frame->data_frame = test->initial_frames[i];
ins = di->di_if->di_insert_frame(di, frame, 0);
assert(INS_FRAME_OK == ins); /* Self-test */
}
nread = 0;
while (nread < test->read_until)
{
data_frame = di->di_if->di_get_frame(di, nread);
assert(data_frame); /* Self-check */
n_to_read = test->read_until - nread > (unsigned) data_frame->df_size - data_frame->df_read_off
? (unsigned) data_frame->df_size - data_frame->df_read_off : test->read_until - nread;
data_frame->df_read_off += n_to_read;
nread += n_to_read;
if (data_frame->df_read_off == data_frame->df_size)
di->di_if->di_frame_done(di, data_frame);
else
{
assert(nread == test->read_until);
break;
}
}
frame = lsquic_malo_get(mm.malo.stream_frame);
frame->packet_in = lsquic_mm_get_packet_in(&mm);
frame->packet_in->pi_refcnt = 1;
frame->data_frame = test->data_frame;
ins = di->di_if->di_insert_frame(di, frame, test->read_until);
assert(test->ins == ins);
di->di_if->di_destroy(di);
lsquic_mm_cleanup(&mm);
}
int
main (int argc, char **argv)
{
const struct nocopy_test *test;
int opt;
lsquic_log_to_fstream(stderr, LLTS_NONE);
while (-1 != (opt = getopt(argc, argv, "l:")))
{
switch (opt)
{
case 'l':
lsquic_logger_lopt(optarg);
break;
default:
return 1;
}
}
for (test = tests; test < tests + sizeof(tests) / sizeof(tests[0]); ++test)
run_di_nocopy_test(test);
return 0;
}
| 5,692 |
579 | #include "HdrHistogram_c/src/hdr_histogram.h"
#include <gtest/gtest.h>
#include <util/rand.h>
#include <iostream>
static int64_t kLatencyMinMicroseconds = 1;
static int64_t kLatencyMaxMicroseconds = 1000 * 1000 * 100; // 100 seconds
TEST(HdrHistogramTest, Basic) {
hdr_histogram* hist;
int ret =
hdr_init(kLatencyMinMicroseconds, kLatencyMaxMicroseconds, 2, &hist);
EXPECT_EQ(ret, 0);
size_t hist_memory_sz = hdr_get_memory_size(hist);
printf("Histogram memory size = %zu bytes\n", hist_memory_sz);
EXPECT_LE(hist_memory_sz, 1024 * 32); // 32 KB
// Check histogram's precision to two digits
for (size_t i = 0; i < 99; i++)
hdr_record_value(hist, static_cast<int64_t>(i));
EXPECT_EQ(hdr_value_at_percentile(hist, 50.0), 49);
hdr_reset(hist);
// Few entries
hdr_record_value(hist, 1);
hdr_record_value(hist, 2);
hdr_record_value(hist, 2);
EXPECT_EQ(hdr_value_at_percentile(hist, 0.0), 1);
EXPECT_EQ(hdr_value_at_percentile(hist, 33.0), 1);
EXPECT_EQ(hdr_value_at_percentile(hist, 99.999), 2);
hdr_reset(hist);
// A more realistic workload:
// * A million latency samples between 1 and 32 microseconds
// * A thousand latency samples around 100 ms
// * One latency sample at 1 second
const int64_t k_low_latency = 32; // 32 microseconds
const int64_t k_high_latency = (1000 * 100); // 100 ms
const int64_t k_max_latency = (1000 * 1000); // 1 second
erpc::FastRand fast_rand;
for (size_t i = 0; i < 1000 * 1000; i++) {
const int64_t latency_sample = 1 + fast_rand.next_u32() % k_low_latency;
hdr_record_value(hist, latency_sample);
}
for (size_t i = 1; i <= 1000; i++) {
const size_t latency_sample = k_high_latency + i; // 100 ms + i us
hdr_record_value(hist, static_cast<int64_t>(latency_sample));
}
hdr_record_value(hist, k_max_latency);
const int64_t perc_50 = hdr_value_at_percentile(hist, 50);
const int64_t perc_99 = hdr_value_at_percentile(hist, 99);
const int64_t perc_998 = hdr_value_at_percentile(hist, 99.8);
const int64_t perc_9999 = hdr_value_at_percentile(hist, 99.99);
const int64_t perc_99999 = hdr_value_at_percentile(hist, 99.999);
const int64_t max_lat = hdr_max(hist);
printf("50%% 99%% 99.8%% 99.99%% 99.999%% max\n");
std::cout << perc_50 << " " << perc_99 << " " << perc_998 << " " << perc_9999
<< " " << perc_99999 << " " << max_lat << std::endl;
EXPECT_LE(perc_50, k_low_latency);
EXPECT_LE(perc_99, k_low_latency);
EXPECT_LE(perc_998, k_low_latency);
EXPECT_GE(perc_9999, k_high_latency);
EXPECT_GE(perc_99999, k_high_latency);
// hdr_max() does not give exact max
EXPECT_LE((max_lat - k_max_latency) * 1.0 / k_max_latency, 0.01);
EXPECT_NE(max_lat, k_max_latency);
hdr_close(hist);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 1,206 |
348 | {"nom":"Anthé","circ":"3ème circonscription","dpt":"Lot-et-Garonne","inscrits":142,"abs":69,"votants":73,"blancs":4,"nuls":4,"exp":65,"res":[{"nuance":"REM","nom":"<NAME>","voix":39},{"nuance":"FN","nom":"M. <NAME>","voix":26}]} | 94 |
679 | <gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _FTNIDX_HXX
#define _FTNIDX_HXX
#define _SVSTDARR_USHORTS
#include <svl/svstdarr.hxx>
class SwTxtFtn;
class SwNodeIndex;
class SwSectionNode;
// ueberall, wo der NodeIndex gebraucht wird, werden die hier fehlenden
// Headerfiles schon includes. Darum hier nur als define und nicht als
// inline Methode (spart Compile-Zeit)
#define _SwTxtFtn_GetIndex( pFIdx ) (pFIdx->GetTxtNode().GetIndex())
typedef SwTxtFtn* SwTxtFtnPtr;
SV_DECL_PTRARR_SORT( _SwFtnIdxs, SwTxtFtnPtr, 0, 10 )
class SwFtnIdxs : public _SwFtnIdxs
{
public:
SwFtnIdxs() {}
void UpdateFtn( const SwNodeIndex& rStt ); // ab Pos. alle Updaten
void UpdateAllFtn(); // alle Fussnoten updaten
SwTxtFtn* SeekEntry( const SwNodeIndex& rIdx, sal_uInt16* pPos = 0 ) const;
};
class SwUpdFtnEndNtAtEnd
{
SvPtrarr aFtnSects, aEndSects;
SvUShorts aFtnNums, aEndNums;
public:
SwUpdFtnEndNtAtEnd() : aFtnSects( 0, 4 ), aEndSects( 0, 4 ),
aFtnNums( 0, 4 ), aEndNums( 0, 4 )
{}
static const SwSectionNode* FindSectNdWithEndAttr(
const SwTxtFtn& rTxtFtn );
sal_uInt16 GetNumber( const SwTxtFtn& rTxtFtn, const SwSectionNode& rNd );
sal_uInt16 ChkNumber( const SwTxtFtn& rTxtFtn );
};
#endif // _FTNIDX_HXX
| 791 |
654 | <gh_stars>100-1000
package qz.printer.status.printer;
import org.apache.log4j.Level;
import qz.printer.status.NativeStatus;
import static org.apache.log4j.Level.*;
/**
* Created by kyle on 7/7/17.
*/
public enum NativePrinterStatus implements NativeStatus {
OK(INFO),
PAUSED(WARN),
ERROR(FATAL),
PENDING_DELETION(WARN),
PAPER_JAM(FATAL),
PAPER_OUT(WARN),
MANUAL_FEED(INFO),
PAPER_PROBLEM(WARN),
OFFLINE(FATAL),
IO_ACTIVE(INFO),
BUSY(INFO),
PRINTING(INFO),
OUTPUT_BIN_FULL(WARN),
NOT_AVAILABLE(FATAL),
WAITING(INFO),
PROCESSING(INFO),
INITIALIZING(INFO),
WARMING_UP(INFO),
TONER_LOW(WARN),
NO_TONER(FATAL),
PAGE_PUNT(FATAL),
USER_INTERVENTION(WARN),
OUT_OF_MEMORY(FATAL),
DOOR_OPEN(WARN),
SERVER_UNKNOWN(WARN),
POWER_SAVE(INFO),
UNKNOWN(INFO),
UNMAPPED(FATAL); // should never make it to the user
private Level level;
NativePrinterStatus(Level level) {
this.level = level;
}
@Override
public NativeStatus getDefault() {
return UNKNOWN;
}
@Override
public Level getLevel() {
return level;
}
}
| 546 |
3,212 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.email;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import javax.mail.MessagingException;
import javax.mail.internet.MimeMessage;
import org.apache.commons.mail.Email;
import org.apache.commons.mail.EmailAttachment;
import org.apache.commons.mail.EmailException;
import org.apache.commons.mail.MultiPartEmail;
import org.apache.commons.mail.SimpleEmail;
public class GenerateAttachment {
String from;
String to;
String subject;
String message;
String hostName;
public GenerateAttachment(String from, String to, String subject, String message, String hostName) {
this.from = from;
this.to = to;
this.subject = subject;
this.message = message;
this.hostName = hostName;
}
public byte[] SimpleEmail() {
MimeMessage mimeMessage = SimpleEmailMimeMessage();
ByteArrayOutputStream output = new ByteArrayOutputStream();
try {
mimeMessage.writeTo(output);
} catch (IOException e) {
e.printStackTrace();
} catch (MessagingException e) {
e.printStackTrace();
}
return output.toByteArray();
}
public MimeMessage SimpleEmailMimeMessage() {
Email email = new SimpleEmail();
try {
email.setFrom(from);
email.addTo(to);
email.setSubject(subject);
email.setMsg(message);
email.setHostName(hostName);
email.buildMimeMessage();
} catch (EmailException e) {
e.printStackTrace();
}
return email.getMimeMessage();
}
public byte[] WithAttachments(int amount) {
MultiPartEmail email = new MultiPartEmail();
try {
email.setFrom(from);
email.addTo(to);
email.setSubject(subject);
email.setMsg(message);
email.setHostName(hostName);
int x = 1;
while (x <= amount) {
// Create an attachment with the pom.xml being used to compile (yay!!!)
EmailAttachment attachment = new EmailAttachment();
attachment.setPath("pom.xml");
attachment.setDisposition(EmailAttachment.ATTACHMENT);
attachment.setDescription("pom.xml");
attachment.setName("pom.xml"+String.valueOf(x));
// attach
email.attach(attachment);
x++;
}
email.buildMimeMessage();
} catch (EmailException e) {
e.printStackTrace();
}
ByteArrayOutputStream output = new ByteArrayOutputStream();
MimeMessage mimeMessage = email.getMimeMessage();
try {
mimeMessage.writeTo(output);
} catch (IOException e) {
e.printStackTrace();
} catch (MessagingException e) {
e.printStackTrace();
}
return output.toByteArray();
}
}
| 1,543 |
389 | /*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.lang.parser.expressions;
import gw.lang.parser.IExpression;
import gw.lang.reflect.IFeatureInfo;
import gw.lang.reflect.IType;
import java.util.List;
public interface IFeatureLiteralExpression extends ILiteralExpression, Cloneable
{
/**
* @return the feature associated with this literal
*/
IFeatureInfo getFeature();
/**
* @return the root type of the entire literal chain (if one exists)
*/
public IType getFinalRootType();
/**
* @return the type that the feature that this literal refers to is on
*/
IType getRootType();
/**
* @return bound args of this feature, or null if there are no bound args
*/
List<IExpression> getBoundArgs();
/**
* @return true if this literal is bound at its root
*/
boolean isBound();
/**
* @return the final root expression of the feature literal
*/
public IExpression getFinalRoot();
}
| 295 |
769 | <gh_stars>100-1000
#include "catch.hpp"
#include "scene/filters.h"
#include "scene/sceneLoader.h"
#include "scene/styleContext.h"
#include "util/builders.h"
#include "yaml-cpp/yaml.h"
using namespace Tangram;
TEST_CASE( "", "[Duktape][init]") {
StyleContext();
}
TEST_CASE( "Test evalFilterFn with feature", "[Duktape][evalFilterFn]") {
Feature feature;
feature.props.set("a", "A");
feature.props.set("b", "B");
feature.props.set("n", 42);
StyleContext ctx;
ctx.setFeature(feature);
REQUIRE(ctx.setFunctions({R"(function() { return feature.a === 'A' })"}));
REQUIRE(ctx.evalFilter(0) == true);
REQUIRE(ctx.setFunctions({ R"(function() { return feature.b === 'B' })"}));
REQUIRE(ctx.evalFilter(0) == true);
REQUIRE(ctx.setFunctions({ R"(function() { return feature.n === 42 })"}));
REQUIRE(ctx.evalFilter(0) == true);
REQUIRE(ctx.setFunctions({ R"(function() { return feature.n === 43 })"}));
REQUIRE(ctx.evalFilter(0) == false);
REQUIRE(ctx.setFunctions({ R"(function() { return feature.n === '42' })"}));
REQUIRE(ctx.evalFilter(0) == false);
}
TEST_CASE( "Test evalFilterFn with feature and keywords", "[Duktape][evalFilterFn]") {
Feature feature;
feature.props.set("scalerank", 2);
StyleContext ctx;
ctx.setFeature(feature);
ctx.setZoom(5);
REQUIRE(ctx.setFunctions({ R"(function() { return (feature.scalerank * .5) <= ($zoom - 4); })"}));
REQUIRE(ctx.evalFilter(0) == true);
ctx.setZoom(4);
REQUIRE(ctx.evalFilter(0) == false);
}
TEST_CASE( "Test $meters_per_pixel keyword in JS function", "[Duktape]") {
StyleContext ctx;
REQUIRE(ctx.setFunctions({ R"(function() { return $meters_per_pixel <= 100; })"}));
ctx.setZoom(10); // $meters_per_pixel should be 152.9
REQUIRE(ctx.evalFilter(0) == false);
ctx.setZoom(11); // $meters_per_pixel should be 76.4
REQUIRE(ctx.evalFilter(0) == true);
}
TEST_CASE( "Test evalFilterFn with feature and keyword geometry", "[Duktape][evalFilterFn]") {
Feature points;
points.geometryType = GeometryType::points;
Feature lines;
lines.geometryType = GeometryType::lines;
Feature polygons;
polygons.geometryType = GeometryType::polygons;
StyleContext ctx;
// Test $geometry keyword
REQUIRE(ctx.setFunctions({
R"(function() { return $geometry === 'point'; })",
R"(function() { return $geometry === 'line'; })",
R"(function() { return $geometry === 'polygon'; })"}));
ctx.setFeature(points);
REQUIRE(ctx.evalFilter(0) == true);
REQUIRE(ctx.evalFilter(1) == false);
REQUIRE(ctx.evalFilter(2) == false);
ctx.setFeature(lines);
REQUIRE(ctx.evalFilter(0) == false);
REQUIRE(ctx.evalFilter(1) == true);
REQUIRE(ctx.evalFilter(2) == false);
ctx.setFeature(polygons);
REQUIRE(ctx.evalFilter(0) == false);
REQUIRE(ctx.evalFilter(1) == false);
REQUIRE(ctx.evalFilter(2) == true);
}
TEST_CASE( "Test evalFilterFn with different features", "[Duktape][evalFilterFn]") {
StyleContext ctx;
REQUIRE(ctx.setFunctions({ R"(function() { return feature.scalerank === 2; })"}));
Feature feat1;
feat1.props.set("scalerank", 2);
ctx.setFeature(feat1);
REQUIRE(ctx.evalFilter(0) == true);
Feature feat2;
ctx.setFeature(feat2);
REQUIRE(ctx.evalFilter(0) == false);
ctx.setFeature(feat1);
REQUIRE(ctx.evalFilter(0) == true);
}
TEST_CASE( "Test evalStyleFn - StyleParamKey::order", "[Duktape][evalStyleFn]") {
Feature feat;
feat.props.set("sort_key", 2);
StyleContext ctx;
ctx.setFeature(feat);
REQUIRE(ctx.setFunctions({ R"(function () { return feature.sort_key + 5 })"}));
StyleParam::Value value;
REQUIRE(ctx.evalStyle(0, StyleParamKey::order, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 7);
}
TEST_CASE( "Test evalStyleFn - StyleParamKey::color", "[Duktape][evalStyleFn]") {
StyleContext ctx;
StyleParam::Value value;
REQUIRE(ctx.setFunctions({ R"(function () { return '#f0f'; })"}));
REQUIRE(ctx.evalStyle(0, StyleParamKey::color, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xffff00ff);
REQUIRE(ctx.setFunctions({ R"(function () { return 0xff00ffff; })"}));
REQUIRE(ctx.evalStyle(0, StyleParamKey::color, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xff00ffff);
REQUIRE(ctx.setFunctions({ R"(function () { return [1.0, 1.0, 0.0, 1.0] })"}));
REQUIRE(ctx.evalStyle(0, StyleParamKey::color, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xff00ffff);
REQUIRE(ctx.setFunctions({ R"(function () { return [0.0, 1.0, 0.0] })"}));
REQUIRE(ctx.evalStyle(0, StyleParamKey::color, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xff00ff00);
}
TEST_CASE( "Test evalStyleFn - StyleParamKey::width", "[Duktape][evalStyleFn]") {
Feature feat;
feat.props.set("width", 2.0);
StyleContext ctx;
ctx.setFeature(feat);
REQUIRE(ctx.setFunctions({ R"(function () { return feature.width * 2.3; })"}));
StyleParam::Value value;
REQUIRE(ctx.evalStyle(0, StyleParamKey::width, value) == true);
REQUIRE(value.is<StyleParam::Width>() == true);
REQUIRE(value.get<StyleParam::Width>().value == 4.6f);
}
TEST_CASE( "Test evalStyleFn - StyleParamKey::extrude", "[Duktape][evalStyleFn]") {
Feature feat;
feat.props.set("width", 2.0);
StyleContext ctx;
ctx.setFeature(feat);
REQUIRE(ctx.setFunctions({
R"(function () { return true; })",
R"(function () { return false; })",
R"(function () { return [1.1, 2.2]; })"}));
StyleParam::Value value;
REQUIRE(ctx.evalStyle(0, StyleParamKey::extrude, value) == true);
REQUIRE(value.is<glm::vec2>() == true);
StyleParam::Value e1(glm::vec2(NAN, NAN));
REQUIRE(std::isnan(value.get<glm::vec2>()[0]) == true);
REQUIRE(ctx.evalStyle(1, StyleParamKey::extrude, value) == true);
REQUIRE(value.is<glm::vec2>() == true);
StyleParam::Value e2(glm::vec2(0.0f, 0.0f));
REQUIRE(value == e2);
REQUIRE(ctx.evalStyle(2, StyleParamKey::extrude, value) == true);
REQUIRE(value.is<glm::vec2>() == true);
StyleParam::Value e3(glm::vec2(1.1f, 2.2f));
REQUIRE(value == e3);
}
TEST_CASE( "Test evalStyleFn - StyleParamKey::text_source", "[Duktape][evalStyleFn]") {
Feature feat;
feat.props.set("name", "my name is my name");
StyleContext ctx;
ctx.setFeature(feat);
REQUIRE(ctx.setFunctions({
R"(function () { return 'hello!'; })",
R"(function () { return feature.name; })"}));
StyleParam::Value value;
REQUIRE(ctx.evalStyle(0, StyleParamKey::text_source, value) == true);
REQUIRE(value.is<std::string>());
REQUIRE(value.get<std::string>() == "hello!");
REQUIRE(ctx.evalStyle(1, StyleParamKey::text_source, value) == true);
REQUIRE(value.is<std::string>());
REQUIRE(value.get<std::string>() == "my name is my name");
}
TEST_CASE( "Test evalFilter - Init filter function from yaml", "[Duktape][evalFilter]") {
SceneFunctions fns;
YAML::Node n0 = YAML::Load(R"(filter: function() { return feature.sort_key === 2; })");
YAML::Node n1 = YAML::Load(R"(filter: function() { return feature.name === 'test'; })");
Filter filter0 = SceneLoader::generateFilter(fns, n0["filter"]);
Filter filter1 = SceneLoader::generateFilter(fns, n1["filter"]);
REQUIRE(fns.size() == 2);
REQUIRE(filter0.data.is<Filter::Function>());
REQUIRE(filter1.data.is<Filter::Function>());
StyleContext ctx;
ctx.setFunctions(fns);
Feature feat1;
feat1.props.set("sort_key", 2);
feat1.props.set("name", "test");
ctx.setFeature(feat1);
// NB: feature parameter is ignored for Function evaluation
REQUIRE(filter0.eval(feat1, ctx) == true);
REQUIRE(filter1.eval(feat1, ctx) == true);
// This is what happens in the above 'eval' internally
REQUIRE(ctx.evalFilter(filter0.data.get<Filter::Function>().id) == true);
REQUIRE(ctx.evalFilter(filter1.data.get<Filter::Function>().id) == true);
// ... Also check that setFeature updates the ctx
Feature feat2;
feat2.props.set("name", "nope");
ctx.setFeature(feat2);
REQUIRE(filter0.eval(feat2, ctx) == false);
REQUIRE(filter1.eval(feat2, ctx) == false);
REQUIRE(ctx.evalFilter(filter0.data.get<Filter::Function>().id) == false);
REQUIRE(ctx.evalFilter(filter1.data.get<Filter::Function>().id) == false);
}
TEST_CASE("Test evalStyle - Init StyleParam function from yaml", "[Duktape][evalStyle]") {
SceneStops stops;
SceneFunctions fns;
YAML::Node n0 = YAML::Load(R"(
draw:
color: function() { return '#ffff00ff'; }
width: function() { return 2; }
cap: function() { return 'round'; }
)");
auto params = SceneLoader::parseStyleParams(n0["draw"], stops, fns);
REQUIRE(fns.size() == 3);
// for (auto& str : scene.functions()) {
// logMsg("F: '%s'\n", str.c_str());
// }
StyleContext ctx;
ctx.setFunctions(fns);
for (auto& style : params) {
//logMsg("S: %d - '%s' %d\n", style.key, style.toString().c_str(), style.function);
if (style.key == StyleParamKey::color) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xffff00ff);
} else if (style.key == StyleParamKey::width) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<StyleParam::Width>() == true);
REQUIRE(value.get<StyleParam::Width>().value == 2);
} else if (style.key == StyleParamKey::cap) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(static_cast<CapTypes>(value.get<uint32_t>()) == CapTypes::round);
} else {
REQUIRE(true == false);
}
}
}
TEST_CASE( "Test evalFunction explicit", "[Duktape][evalFunction]") {
YAML::Node n0 = YAML::Load(R"(
global:
width: 2
mapNode:
color: function(c) { return c; }
caps:
cap: round
test: function
draw:
color: function() { return global.mapNode.color("blue"); }
width: function() { return global.width; }
cap: function() { return global.mapNode.caps.cap; }
text_source: function() { return global.mapNode.test; }
)");
SceneStops stops;
SceneFunctions fns;
auto params = SceneLoader::parseStyleParams(n0["draw"], stops, fns);
REQUIRE(fns.size() == 4);
StyleContext ctx;
ctx.setSceneGlobals(n0["global"]);
ctx.setFunctions(fns);
for (auto& style : params) {
if (style.key == StyleParamKey::color) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(value.get<uint32_t>() == 0xffff0000);
} else if (style.key == StyleParamKey::width) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<StyleParam::Width>() == true);
REQUIRE(value.get<StyleParam::Width>().value == 2);
} else if (style.key == StyleParamKey::cap) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<uint32_t>() == true);
REQUIRE(static_cast<CapTypes>(value.get<uint32_t>()) == CapTypes::round);
} else if(style.key == StyleParamKey::text_source) {
StyleParam::Value value;
REQUIRE(ctx.evalStyle(style.function, style.key, value) == true);
REQUIRE(value.is<std::string>() == true);
REQUIRE(value.get<std::string>() == "function");
} else {
REQUIRE(true == false);
}
}
}
| 5,442 |
2,542 | <filename>src/prod/src/ServiceModel/reliability/failover/ReconfigurationType.cpp<gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace Common;
namespace Reliability
{
namespace ReconfigurationType
{
void WriteToTextWriter(TextWriter & w, Enum const& value)
{
switch (value)
{
case Other:
w << "Other";
return;
case SwapPrimary:
w << "SwapPrimary";
return;
case Failover:
w << L"Failover";
return;
case None:
w << L"None";
return;
default:
Assert::CodingError("unknown value for enum {0}", static_cast<int>(value));
return;
}
}
ENUM_STRUCTURED_TRACE(ReconfigurationType, Other, LastValidEnum);
::FABRIC_RECONFIGURATION_TYPE Reliability::ReconfigurationType::ConvertToPublicReconfigurationType(ReconfigurationType::Enum toConvert)
{
switch (toConvert)
{
case Other:
return ::FABRIC_RECONFIGURATION_TYPE_OTHER;
case SwapPrimary:
return ::FABRIC_RECONFIGURATION_TYPE_SWAPPRIMARY;
case Failover:
return ::FABRIC_RECONFIGURATION_TYPE_FAILOVER;
case None:
return ::FABRIC_RECONFIGURATION_TYPE_NONE;
default:
Common::Assert::CodingError("Unknown Reconfiguration Type");
}
}
}
}
| 741 |
452 | <reponame>MatteoManzoni/kadalu<filename>cli/kubectl_kadalu/utils.py
"""
Utility methods for the CLI tool
"""
from __future__ import print_function
import subprocess
import sys
KUBECTL_CMD = "kubectl"
# noqa # pylint: disable=too-many-instance-attributes
# noqa # pylint: disable=useless-object-inheritance
# noqa # pylint: disable=too-few-public-methods
# noqa # pylint: disable=bad-option-value
class CmdResponse(object):
""" Class for checking the response """
def __init__(self, returncode, out, err):
self.returncode = returncode
self.stdout = out
self.stderr = err
class CommandError(Exception):
""" Class for handling exceptions """
def __init__(self, returncode, err):
super().__init__(u"error %d %s" % (returncode, err))
self.returncode = returncode
self.stderr = err
def execute(cmd):
""" execute the CLI command """
with subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True) as proc:
out, err = proc.communicate()
if proc.returncode == 0:
return CmdResponse(proc.returncode, out, err)
raise CommandError(proc.returncode, err)
def add_global_flags(parser):
"""Global Flags available with every subcommand"""
parser.add_argument("--kubectl-cmd", default=KUBECTL_CMD,
help="Kubectl Command Path")
parser.add_argument("--verbose", action="store_true",
help="Verbose output")
parser.add_argument("--dry-run", action="store_true",
help="Skip execution only preview")
parser.add_argument("--script-mode", action="store_true",
help="Script mode, bypass Prompts")
parser.add_argument("--kubectl-context", default=None,
help="Kubectl Context")
def command_error(cmd, msg):
"""Print error message and Exit"""
print("Error while running the following command", file=sys.stderr)
print("$ " + " ".join(cmd), file=sys.stderr)
print("", file=sys.stderr)
print(msg, file=sys.stderr)
sys.exit(1)
def kubectl_cmd_help(cmd):
"""Print error and exit if kubectl not found"""
print("Failed to execute the command: \"%s\"" % cmd, file=sys.stderr)
print("Use `--kubectl-cmd` option if kubectl is installed "
"in custom path", file=sys.stderr)
sys.exit(1)
def kubectl_cmd(args):
"""k3s embeds kubectl into the k3s binary itself and
provides kubectl as subcommand. For example `k3s kubectl`.
Split the given command to support these types.
"""
cmd_args = args.kubectl_cmd.split()
if args.kubectl_context is not None:
cmd_args += ["--context", args.kubectl_context]
return cmd_args
| 1,208 |
348 | {"nom":"Perpezac-le-Noir","circ":"1ère circonscription","dpt":"Corrèze","inscrits":884,"abs":417,"votants":467,"blancs":47,"nuls":30,"exp":390,"res":[{"nuance":"REM","nom":"<NAME>","voix":246},{"nuance":"SOC","nom":"<NAME>","voix":144}]} | 95 |
317 | package com.googlecode.totallylazy.numbers;
import com.googlecode.totallylazy.functions.CurriedMonoid;
import com.googlecode.totallylazy.comparators.NullComparator;
public class Maximum implements com.googlecode.totallylazy.comparators.Maximum<Number>, CurriedMonoid<Number> {
@Override
public Number call(Number a, Number b) throws Exception {
return NullComparator.compare(a, b, NullComparator.Direction.Down, Numbers.ascending()) > 0 ? a : b;
}
@Override
public Number identity() {
return Numbers.NEGATIVE_INFINITY;
}
}
| 198 |
619 | <reponame>moredu/upm
/*
* Author: <NAME> <<EMAIL>>
* Copyright (c) 2015 Intel Corporation.
*
* This program and the accompanying materials are made available under the
* terms of the The MIT License which is available at
* https://opensource.org/licenses/MIT.
*
* SPDX-License-Identifier: MIT
*/
#pragma once
#include <string>
#include <mraa/aio.h>
#include <interfaces/iGas.hpp>
namespace upm {
/**
* @brief Oxygen Gas Sensor
* @defgroup o2 libupm-o2
* @ingroup seeed analog gaseous
*/
/**
* @library o2
* @sensor o2
* @comname Oxygen (O2) Concentration Sensor
* @altname Grove O2 Sensor
* @type gaseous
* @man seeed
* @con analog
* @web http://wiki.seeed.cc/Grove-Gas_Sensor-O2/
*
* @brief API for the Grove O2 Oxygen Gas Sensor
*
* The Grove O2 Oxygen Gas sensor measures the oxygen concentration in the air
*
* @image html o2.jpg
* @snippet o2.cxx Interesting
*/
class O2: virtual public iGas {
public:
/**
* Grove O2 Oxygen Gas sensor constructor
*
* @param pin Analog pin to use
*/
O2(int pin);
/**
* O2 destructor
*/
~O2();
/**
* Measures O2 from the sensor
*
* @return Oxygen concentration as voltage
*/
float voltageValue();
/**
* Measures O2 from the sensor
*
* @return Oxygen concentration as PPM
*/
float getConcentration();
private:
mraa_aio_context m_aio;
};
}
| 583 |
379 | package tellh.com.recyclerstickyheaderview;
import tellh.com.stickyheaderview_rv.adapter.DataBean;
import tellh.com.stickyheaderview_rv.adapter.StickyHeaderViewAdapter;
/**
* Created by tlh on 2017/1/22 :)
*/
public class ItemHeader extends DataBean {
private String prefix;
public String getPrefix() {
return prefix;
}
public ItemHeader(String prefix) {
this.prefix = prefix;
}
@Override
public int getItemLayoutId(StickyHeaderViewAdapter adapter) {
return R.layout.header;
}
@Override
public boolean shouldSticky() {
return true;
}
}
| 241 |
1,428 | import java.util.*;
import java.lang.*;
import java.io.*;
class BinarySearch2DArray
{
public static int get(int in[][], int index)
{
int r = index / in[0].length;
int c = index % in[0].length;
//System.out.println("Get ==> index: " + index + " r: " + r + " c: " + c);
return in[r][c];
}
public static boolean contains(int in[][], int val)
{
int start = 0;
int end = in.length * in[0].length - 1;
int mid;
while (start <= end)
{
mid = (end + start) / 2;
//System.out.println(start + " " + end + " " + mid);
if (get(in, mid) > val)
{
end = mid - 1;
}
else if (get(in, mid) < val)
{
start = mid + 1;
}
else
{
return true;
}
}
return false;
}
public static void main (String[] args) throws java.lang.Exception
{
int in[][] = new int[][]{{1, 2, 3}, {5, 6, 9}};
System.out.println("1: " + contains(in, 1));
System.out.println("2: " + contains(in, 2));
System.out.println("3: " + contains(in, 3));
System.out.println("4: " + contains(in, 4));
System.out.println("5: " + contains(in, 5));
System.out.println("6: " + contains(in, 6));
System.out.println("7: " + contains(in, 7));
System.out.println("8: " + contains(in, 8));
System.out.println("9: " + contains(in, 9));
System.out.println("10: " + contains(in, 10));
}
} | 642 |
679 | <gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _SDR_ATTRIBUTE_SDRALLFILLATTRIBUTESHELPER_HXX
#define _SDR_ATTRIBUTE_SDRALLFILLATTRIBUTESHELPER_HXX
#include "svx/svxdllapi.h"
#include <drawinglayer/attribute/fillgradientattribute.hxx>
#include <drawinglayer/attribute/sdrfillattribute.hxx>
#include <drawinglayer/primitive2d/baseprimitive2d.hxx>
#include <boost/shared_ptr.hpp>
#include <tools/color.hxx>
#include <svl/itemset.hxx>
//////////////////////////////////////////////////////////////////////////////
namespace drawinglayer
{
namespace attribute
{
class SVX_DLLPUBLIC SdrAllFillAttributesHelper
{
private:
basegfx::B2DRange maLastPaintRange;
basegfx::B2DRange maLastDefineRange;
boost::shared_ptr< drawinglayer::attribute::SdrFillAttribute > maFillAttribute;
boost::shared_ptr< drawinglayer::attribute::FillGradientAttribute > maFillGradientAttribute;
drawinglayer::primitive2d::Primitive2DSequence maPrimitives;
void createPrimitive2DSequence(
const basegfx::B2DRange& rPaintRange,
const basegfx::B2DRange& rDefineRange);
protected:
public:
SdrAllFillAttributesHelper();
SdrAllFillAttributesHelper(const Color& rColor);
SdrAllFillAttributesHelper(const SfxItemSet& rSet);
~SdrAllFillAttributesHelper();
bool isUsed() const;
bool hasSdrFillAttribute() const { return maFillAttribute.get(); }
bool hasFillGradientAttribute() const { return maFillGradientAttribute.get(); }
bool isTransparent() const;
const drawinglayer::attribute::SdrFillAttribute& getFillAttribute() const;
const drawinglayer::attribute::FillGradientAttribute& getFillGradientAttribute() const;
const drawinglayer::primitive2d::Primitive2DSequence& getPrimitive2DSequence(
const basegfx::B2DRange& rPaintRange,
const basegfx::B2DRange& rDefineRange) const;
// get average fill color; tries to calculate a 'medium' color
// which e.g. may be used as comparison to decide if other
// colors are visible
basegfx::BColor getAverageColor(const basegfx::BColor& rFallback) const;
// return if a repaint of this content needs a complete repaint. This
// is e.g. not needed for no fill or color fill (a partial repaint
// will do the trick), but necessary for everything that is not top-left
// oriented
bool needCompleteRepaint() const;
};
} // end of namespace attribute
} // end of namespace drawinglayer
//////////////////////////////////////////////////////////////////////////////
namespace drawinglayer
{
namespace attribute
{
typedef boost::shared_ptr< SdrAllFillAttributesHelper > SdrAllFillAttributesHelperPtr;
} // end of namespace attribute
} // end of namespace drawinglayer
//////////////////////////////////////////////////////////////////////////////
#endif // _SDR_ATTRIBUTE_SDRALLFILLATTRIBUTESHELPER_HXX
// eof
| 1,549 |
893 | <gh_stars>100-1000
package com.bytedance.android.aabresguard.executors;
import com.android.tools.build.bundletool.model.AppBundle;
import com.android.tools.build.bundletool.model.BundleModule;
import com.bytedance.android.aabresguard.BaseTest;
import com.bytedance.android.aabresguard.bundle.AppBundleAnalyzer;
import com.google.common.collect.ImmutableSet;
import org.junit.Test;
import java.io.IOException;
import java.nio.file.Path;
import java.util.HashSet;
import java.util.Set;
/**
* Created by YangJing on 2019/10/14 .
* Email: <EMAIL>
*/
public class ResourcesObfuscatorTest extends BaseTest {
@Test
public void test() throws IOException {
Set<String> whiteList = new HashSet<>(
ImmutableSet.of(
"com.bytedance.android.ugc.aweme.R.raw.*",
"*.R.drawable.icon",
"*.R.anim.ab*"
)
);
Path bundlePath = loadResourceFile("demo/demo.aab").toPath();
Path outputDir = getTempDirPath();
AppBundleAnalyzer analyzer = new AppBundleAnalyzer(bundlePath);
AppBundle appBundle = analyzer.analyze();
ResourcesObfuscator obfuscator = new ResourcesObfuscator(bundlePath, appBundle, whiteList, outputDir, loadResourceFile("demo/mapping.txt").toPath());
AppBundle obfuscateAppBundle = obfuscator.obfuscate();
assert obfuscateAppBundle != null;
assert obfuscateAppBundle.getModules().size() == appBundle.getModules().size();
appBundle.getModules().forEach((bundleModuleName, bundleModule) -> {
BundleModule obfuscatedModule = obfuscateAppBundle.getModule(bundleModuleName);
assert obfuscatedModule.getEntries().size() == bundleModule.getEntries().size();
});
}
}
| 736 |
3,400 | /*
* Copyright 2014-2021 Real Logic Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cinttypes>
#include "aeron_driver_conductor_test.h"
using testing::_;
using testing::Eq;
using testing::Ne;
using testing::Args;
using testing::Mock;
using testing::AnyNumber;
class ConductorTestParam
{
public:
const char *m_name;
const char *m_channel;
char m_initial_separator_char;
ConductorTestParam(const char *name, const char *channel, char initial_separator_char) :
m_name(name), m_channel(channel), m_initial_separator_char(initial_separator_char)
{
}
virtual bool publicationExists(aeron_driver_conductor_t *conductor, int64_t publication_id) = 0;
virtual size_t numSubscriptions(aeron_driver_conductor_t *conductor) = 0;
virtual size_t numPublications(aeron_driver_conductor_t *conductor) = 0;
virtual bool publicationHasRefCnt(aeron_driver_conductor_t *conductor, int64_t publication_id, int32_t refcnt) = 0;
virtual bool sendEndpointExists(aeron_driver_conductor_t *conductor, const char *channel)
{
return true;
}
virtual bool receiveEndpointExists(aeron_driver_conductor_t *conductor, const char *channel)
{
return true;
}
virtual bool receiveEndpointHasRefCnt(
aeron_driver_conductor_t *conductor, const char *channel, int32_t stream_id, int32_t session_id, int32_t refcnt)
{
return true;
}
virtual bool receiveEndpointHasStatus(
aeron_driver_conductor_t *conductor, const char *channel, aeron_receive_channel_endpoint_status_t status)
{
return true;
}
virtual bool hasReceiveEndpointCount(aeron_driver_conductor_t *conductor, size_t count)
{
return true;
}
virtual bool hasSendEndpointCount(aeron_driver_conductor_t *conductor, size_t count)
{
return true;
}
virtual void channelWithParams(char *path, size_t len, int32_t session_id, int32_t mtu = 0, int32_t term_length = 0)
{
int offset = snprintf(path, len - 1, "%s%csession-id=%" PRId32, m_channel, m_initial_separator_char, session_id);
if (0 != mtu)
{
offset += snprintf(&path[offset], len - (offset + 1), "|mtu=%" PRId32, mtu);
}
if (0 != term_length)
{
offset += snprintf(&path[offset], len - (offset + 1), "|term-length=%" PRId32, term_length);
}
}
};
class NetworkTestParam : public ConductorTestParam
{
public:
explicit NetworkTestParam(const char *channel) : ConductorTestParam("UDP", channel, '|') {}
static NetworkTestParam *instance()
{
static NetworkTestParam instance{ CHANNEL_1 };
return &instance;
}
bool publicationExists(aeron_driver_conductor_t *conductor, int64_t publication_id) override
{
return nullptr != aeron_driver_conductor_find_network_publication(conductor, publication_id);
}
size_t numPublications(aeron_driver_conductor_t *conductor) override
{
return aeron_driver_conductor_num_network_publications(conductor);
}
bool publicationHasRefCnt(aeron_driver_conductor_t *conductor, int64_t publication_id, int32_t refcnt) override
{
aeron_network_publication_t *pub = aeron_driver_conductor_find_network_publication(conductor, publication_id);
return nullptr != pub && refcnt == pub->conductor_fields.refcnt;
}
bool sendEndpointExists(aeron_driver_conductor_t *conductor, const char *channel) override
{
return nullptr != aeron_driver_conductor_find_send_channel_endpoint(conductor, channel);
}
bool receiveEndpointExists(aeron_driver_conductor_t *conductor, const char *channel) override
{
return nullptr != aeron_driver_conductor_find_receive_channel_endpoint(conductor, channel);
}
bool hasReceiveEndpointCount(aeron_driver_conductor_t *conductor, size_t count) override
{
return count == aeron_driver_conductor_num_receive_channel_endpoints(conductor);
}
bool receiveEndpointHasRefCnt(
aeron_driver_conductor_t *conductor, const char *channel, int32_t stream_id, int32_t session_id, int32_t refcnt)
override
{
aeron_receive_channel_endpoint_t *receive_endpoint =
aeron_driver_conductor_find_receive_channel_endpoint(conductor, channel);
return nullptr != receive_endpoint && refcnt == aeron_int64_counter_map_get(
&receive_endpoint->stream_and_session_id_to_refcnt_map,
aeron_map_compound_key(STREAM_ID_1, SESSION_ID_1));
}
bool receiveEndpointHasStatus(
aeron_driver_conductor_t *conductor, const char *channel, aeron_receive_channel_endpoint_status_t status)
override
{
aeron_receive_channel_endpoint_t *receive_endpoint =
aeron_driver_conductor_find_receive_channel_endpoint(conductor, channel);
return nullptr != receive_endpoint && status == receive_endpoint->conductor_fields.status;
}
size_t numSubscriptions(aeron_driver_conductor_t *conductor) override
{
return aeron_driver_conductor_num_network_subscriptions(conductor);
}
bool hasSendEndpointCount(aeron_driver_conductor_t *conductor, size_t count) override
{
return count == aeron_driver_conductor_num_send_channel_endpoints(conductor);
}
};
class IpcTestParam : public ConductorTestParam
{
public:
explicit IpcTestParam(const char *channel) : ConductorTestParam("IPC", channel, '?') {}
static IpcTestParam *instance()
{
static IpcTestParam instance{AERON_IPC_CHANNEL};
return &instance;
};
bool publicationExists(aeron_driver_conductor_t *conductor, int64_t publication_id) override
{
return nullptr != aeron_driver_conductor_find_ipc_publication(conductor, publication_id);
}
size_t numPublications(aeron_driver_conductor_t *conductor) override
{
return aeron_driver_conductor_num_ipc_publications(conductor);
}
bool publicationHasRefCnt(aeron_driver_conductor_t *conductor, int64_t publication_id, int32_t refcnt) override
{
aeron_ipc_publication_t *pub = aeron_driver_conductor_find_ipc_publication(conductor, publication_id);
return nullptr != pub && refcnt == pub->conductor_fields.refcnt;
}
size_t numSubscriptions(aeron_driver_conductor_t *conductor) override
{
return aeron_driver_conductor_num_ipc_subscriptions(conductor);
}
};
class DriverConductorPubSubTest :
public DriverConductorTest,
public testing::TestWithParam<ConductorTestParam *>
{
};
INSTANTIATE_TEST_SUITE_P(
DriverConductorPubSubParameterisedTest,
DriverConductorPubSubTest,
testing::Values(NetworkTestParam::instance(), IpcTestParam::instance()),
[](const testing::TestParamInfo<ConductorTestParam *> &info)
{
return std::string(info.param->m_name);
});
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddAndRemoveSingleNetworkPublication)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
int32_t counter_id;
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->sendEndpointExists(&m_conductor.m_conductor, channel));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_COUNTER_READY, _, _))
.WillOnce(CaptureCounterId(&counter_id));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id, Eq(STREAM_ID_1), _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
EXPECT_CALL(m_mockCallbacks, onCounter(_, _, _, _, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, onCounter(counter_id, AERON_COUNTER_CLIENT_HEARTBEAT_TIMESTAMP_TYPE_ID, _, _, _, _)).
With(IsIdCounter(client_id, std::string("client-heartbeat: 0")));
readCounters(mock_counter_handler);
ASSERT_EQ(removePublication(client_id, remove_correlation_id, pub_id), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_OPERATION_SUCCESS, _, _))
.With(IsOperationSuccess(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddAndRemoveSingleNetworkSubscription)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
ASSERT_TRUE(GetParam()->receiveEndpointExists(&m_conductor.m_conductor, channel));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_COUNTER_READY, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_SUBSCRIPTION_READY, _, _))
.With(IsSubscriptionReady(sub_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(removeSubscription(client_id, remove_correlation_id, sub_id), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 0u);
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_OPERATION_SUCCESS, _, _))
.With(IsOperationSuccess(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddAndRemoveSingleNetworkSubscriptionBySession)
{
char channel_with_session[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel_with_session, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_SUBSCRIPTION_READY, _, _))
.With(IsSubscriptionReady(sub_id));
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(mock_broadcast_handler);
ASSERT_TRUE(GetParam()->receiveEndpointHasRefCnt(&m_conductor.m_conductor, channel_with_session, STREAM_ID_1, SESSION_ID_1, 1));
ASSERT_EQ(removeSubscription(client_id, remove_correlation_id, sub_id), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 0u);
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_OPERATION_SUCCESS, _, _))
.With(IsOperationSuccess(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
ASSERT_TRUE(GetParam()->receiveEndpointHasRefCnt(&m_conductor.m_conductor, channel_with_session, STREAM_ID_1, SESSION_ID_1, 0));
ASSERT_TRUE(GetParam()->receiveEndpointHasStatus(&m_conductor.m_conductor, channel_with_session, AERON_RECEIVE_CHANNEL_ENDPOINT_STATUS_CLOSED));
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddMultipleNetworkPublications)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
int64_t pub_id_3 = nextCorrelationId();
int64_t pub_id_4 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel, STREAM_ID_1, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_2, channel, STREAM_ID_2, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_3, channel, STREAM_ID_3, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_4, channel, STREAM_ID_4, false), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->sendEndpointExists(&m_conductor.m_conductor, channel));
// TODO:
ASSERT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 1u));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_1));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_2));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_3));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_4));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id_1, Eq(STREAM_ID_1), _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id_2, Eq(STREAM_ID_2), _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id_3, Eq(STREAM_ID_3), _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id_4, Eq(STREAM_ID_4), _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddAndRemoveMultipleNetworkPublicationsToSameChannelSameStreamId)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
int64_t pub_id_3 = nextCorrelationId();
int64_t pub_id_4 = nextCorrelationId();
int64_t remove_correlation_id_1 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel, STREAM_ID_1, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_2, channel, STREAM_ID_1, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_3, channel, STREAM_ID_1, false), 0);
ASSERT_EQ(addPublication(client_id, pub_id_4, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_1));
ASSERT_TRUE(GetParam()->publicationHasRefCnt(&m_conductor.m_conductor, pub_id_1, 4));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(_, Eq(STREAM_ID_1), _))
.Times(4);
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(removePublication(client_id, remove_correlation_id_1, pub_id_2), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->publicationHasRefCnt(&m_conductor.m_conductor, pub_id_1, 3));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_OPERATION_SUCCESS, _, _))
.With(IsOperationSuccess(remove_correlation_id_1));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddMultipleExclusiveNetworkPublicationsWithSameChannelSameStreamId)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
int64_t pub_id_3 = nextCorrelationId();
int64_t pub_id_4 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_2, channel, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_3, channel, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_4, channel, STREAM_ID_1, true), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->sendEndpointExists(&m_conductor.m_conductor, channel));
ASSERT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 1u));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_1));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_2));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_3));
ASSERT_TRUE(GetParam()->publicationExists(&m_conductor.m_conductor, pub_id_4));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_EXCLUSIVE_PUBLICATION_READY, _, _))
.With(IsPublicationReady(_, Eq(STREAM_ID_1), _))
.Times(4);
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddAndRemoveSingleNetworkPublicationWithExplicitSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel_with_session_id, STREAM_ID_1, false), 0);
doWorkUntilDone();
GetParam()->sendEndpointExists(&m_conductor.m_conductor, channel_with_session_id);
GetParam()->publicationExists(&m_conductor.m_conductor, pub_id);
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.With(IsPublicationReady(_, Eq(STREAM_ID_1), _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(removePublication(client_id, remove_correlation_id, pub_id), 0);
doWork();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_OPERATION_SUCCESS, _, _))
.With(IsOperationSuccess(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldAddSecondNetworkPublicationWithSpecifiedSessionIdAndSameMtu)
{
char channel[AERON_MAX_PATH];
GetParam()->channelWithParams(channel, AERON_MAX_PATH, SESSION_ID_1, MTU_1);
int64_t client_id1 = nextCorrelationId();
int64_t pub_id1 = nextCorrelationId();
int64_t client_id2 = nextCorrelationId();
int64_t pub_id2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id1, pub_id1, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(addPublication(client_id2, pub_id2, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldFailToAddSecondNetworkPublicationWithSpecifiedSessionIdAndDifferentMtu)
{
char channel1[AERON_MAX_PATH];
char channel2[AERON_MAX_PATH];
GetParam()->channelWithParams(channel1, AERON_MAX_PATH, SESSION_ID_1, MTU_1);
GetParam()->channelWithParams(channel2, AERON_MAX_PATH, SESSION_ID_1, MTU_2);
int64_t client_id1 = nextCorrelationId();
int64_t pub_id1 = nextCorrelationId();
int64_t client_id2 = nextCorrelationId();
int64_t pub_id2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id1, pub_id1, channel1, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(addPublication(client_id2, pub_id2, channel2, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldAddSecondNetworkPublicationWithSpecifiedSessionIdAndSameTermLength)
{
char channel[AERON_MAX_PATH];
GetParam()->channelWithParams(channel, AERON_MAX_PATH, SESSION_ID_1, MTU_1, TERM_LENGTH);
int64_t client_id1 = nextCorrelationId();
int64_t pub_id1 = nextCorrelationId();
int64_t client_id2 = nextCorrelationId();
int64_t pub_id2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id1, pub_id1, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
readAllBroadcastsFromConductor(mock_broadcast_handler);
testing::Mock::VerifyAndClear(&m_mockCallbacks);
ASSERT_EQ(addPublication(client_id2, pub_id2, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldFailToAddSecondNetworkPublicationWithSpecifiedSessionIdAndDifferentTermLength)
{
char channel1[AERON_MAX_PATH];
char channel2[AERON_MAX_PATH];
GetParam()->channelWithParams(channel1, AERON_MAX_PATH, SESSION_ID_1, MTU_1, TERM_LENGTH);
GetParam()->channelWithParams(channel2, AERON_MAX_PATH, SESSION_ID_1, MTU_1, TERM_LENGTH * 2);
int64_t client_id1 = nextCorrelationId();
int64_t pub_id1 = nextCorrelationId();
int64_t client_id2 = nextCorrelationId();
int64_t pub_id2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id1, pub_id1, channel1, STREAM_ID_1, false), 0);
doWorkUntilDone();
readAllBroadcastsFromConductor(null_broadcast_handler);
ASSERT_EQ(addPublication(client_id2, pub_id2, channel2, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id2));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddSingleNetworkPublicationThatAvoidCollisionWithSpecifiedSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
const char *channel = GetParam()->m_channel;
int32_t next_session_id = SESSION_ID_1;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, next_session_id);
m_conductor.manuallySetNextSessionId(next_session_id);
ASSERT_EQ(addPublication(client_id, pub_id, channel_with_session_id, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 2u);
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_EXCLUSIVE_PUBLICATION_READY, _, _))
.With(IsPublicationReady(pub_id, STREAM_ID_1, Ne(next_session_id)));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldErrorOnDuplicateExclusivePublicationWithSameSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel_with_session_id, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkUntilDone();
ASSERT_EQ(addPublication(client_id, pub_id_2, channel_with_session_id, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id_2));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldErrorOnDuplicateSharedPublicationWithDifferentSessionId)
{
char channel1[AERON_MAX_PATH];
char channel2[AERON_MAX_PATH];
GetParam()->channelWithParams(channel1, AERON_MAX_PATH, SESSION_ID_1);
GetParam()->channelWithParams(channel2, AERON_MAX_PATH, SESSION_ID_3);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel1, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkUntilDone();
ASSERT_EQ(addPublication(client_id, pub_id_2, channel2, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id_2));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldErrorOnDuplicateSharedPublicationWithExclusivePublicationWithSameSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel_with_session_id, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkUntilDone();
ASSERT_EQ(addPublication(client_id, pub_id_2, channel_with_session_id, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id_2));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldErrorOnDuplicateExclusivePublicationWithSharedPublicationWithSameSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel_with_session_id, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkUntilDone();
ASSERT_EQ(addPublication(client_id, pub_id_2, channel_with_session_id, STREAM_ID_1, true), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id_2));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddMultipleNetworkSubscriptionsWithSameChannelSameStreamId)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t sub_id_1 = nextCorrelationId();
int64_t sub_id_2 = nextCorrelationId();
int64_t sub_id_3 = nextCorrelationId();
int64_t sub_id_4 = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id_1, channel, STREAM_ID_1), 0);
ASSERT_EQ(addNetworkSubscription(client_id, sub_id_2, channel, STREAM_ID_1), 0);
ASSERT_EQ(addNetworkSubscription(client_id, sub_id_3, channel, STREAM_ID_1), 0);
ASSERT_EQ(addNetworkSubscription(client_id, sub_id_4, channel, STREAM_ID_1), 0);
doWorkUntilDone();
ASSERT_TRUE(GetParam()->receiveEndpointExists(&m_conductor.m_conductor, channel));
ASSERT_TRUE(GetParam()->hasReceiveEndpointCount(&m_conductor.m_conductor, 1u));
ASSERT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 4u);
readAllBroadcastsFromConductor(null_broadcast_handler);
}
TEST_F(DriverConductorPubSubTest, shouldErrorOnRemovePublicationOnUnknownRegistrationId)
{
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(removePublication(client_id, remove_correlation_id, pub_id), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_F(DriverConductorPubSubTest, shouldErrorOnRemoveSubscriptionOnUnknownRegistrationId)
{
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(removeSubscription(client_id, remove_correlation_id, sub_id), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(remove_correlation_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_F(DriverConductorPubSubTest, shouldErrorOnAddPublicationWithInvalidUri)
{
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, INVALID_URI, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(pub_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_F(DriverConductorPubSubTest, shouldErrorOnAddSubscriptionWithInvalidUri)
{
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, INVALID_URI, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_ERROR, _, _)).With(IsError(sub_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToTimeoutNetworkPublication)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
EXPECT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 1u));
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkForNs(
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2));
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 0u);
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 0u);
EXPECT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 0u));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_CLIENT_TIMEOUT, _, _)).With(IsTimeout(client_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToNotTimeoutNetworkPublicationOnKeepalive)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
int64_t timeout =
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2);
doWorkForNs(
timeout,
100,
[&]()
{
clientKeepalive(client_id);
});
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToTimeoutNetworkSubscription)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_TRUE(GetParam()->hasReceiveEndpointCount(&m_conductor.m_conductor, 1u));
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
doWorkForNs(
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2));
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 0u);
EXPECT_TRUE(GetParam()->hasReceiveEndpointCount(&m_conductor.m_conductor, 0u));
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 0u);
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_CLIENT_TIMEOUT, _, _)).With(IsTimeout(client_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToNotTimeoutNetworkSubscriptionOnKeepalive)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
readAllBroadcastsFromConductor(null_broadcast_handler);
int64_t timeout =
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2);
doWorkForNs(
timeout,
100,
[&]()
{
clientKeepalive(client_id);
});
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToTimeoutSendChannelEndpointWithClientKeepaliveAfterRemovePublication)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
ASSERT_EQ(removePublication(client_id, remove_correlation_id, pub_id), 0);
doWork();
readAllBroadcastsFromConductor(null_broadcast_handler);
int64_t timeout =
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2);
doWorkForNs(
timeout,
100,
[&]()
{
clientKeepalive(client_id);
});
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 0u);
EXPECT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 0u));
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToTimeoutSendChannelEndpointWithClientKeepaliveAfterRemovePublicationAfterRetryingFreeOperation)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
ASSERT_EQ(removePublication(client_id, remove_correlation_id, pub_id), 0);
doWorkUntilDone();
readAllBroadcastsFromConductor(null_broadcast_handler);
const int64_t timeout =
m_context.m_context->publication_linger_timeout_ns + (m_context.m_context->client_liveness_timeout_ns * 2);
free_map_raw_log = false;
int64_t *free_fails_counter = aeron_system_counter_addr(
&m_conductor.m_conductor.system_counters, AERON_SYSTEM_COUNTER_FREE_FAILS);
const int64_t free_fails = aeron_counter_get(free_fails_counter);
EXPECT_EQ(free_fails, 0);
doWorkForNs(
timeout,
100,
[&]()
{
clientKeepalive(client_id);
});
const int64_t free_fails_new = aeron_counter_get(free_fails_counter);
EXPECT_GE(free_fails_new, 1);
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 1u);
EXPECT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 1u));
const int64_t resource_check_interval = m_context.m_context->timer_interval_ns * 2;
free_map_raw_log = true;
doWorkForNs(
resource_check_interval,
100,
[&]()
{
clientKeepalive(client_id);
});
EXPECT_GE(aeron_counter_get(free_fails_counter), free_fails_new);
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numPublications(&m_conductor.m_conductor), 0u);
EXPECT_TRUE(GetParam()->hasSendEndpointCount(&m_conductor.m_conductor, 0u));
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToTimeoutReceiveChannelEndpointWithClientKeepaliveAfterRemoveSubscription)
{
const char *channel = GetParam()->m_channel;
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
int64_t remove_correlation_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel, STREAM_ID_1), 0);
doWorkUntilDone();
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
ASSERT_EQ(removeSubscription(client_id, remove_correlation_id, sub_id), 0);
doWorkUntilDone();
readAllBroadcastsFromConductor(null_broadcast_handler);
int64_t timeout = m_context.m_context->client_liveness_timeout_ns;
doWorkForNs(
timeout,
100,
[&]()
{
clientKeepalive(client_id);
});
EXPECT_EQ(aeron_driver_conductor_num_clients(&m_conductor.m_conductor), 1u);
EXPECT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 0u);
EXPECT_TRUE(GetParam()->hasReceiveEndpointCount(&m_conductor.m_conductor, 0u));
}
TEST_P(DriverConductorPubSubTest, shouldNotAddDynamicSessionIdInReservedRange)
{
const char *channel = GetParam()->m_channel;
m_conductor.manuallySetNextSessionId(m_conductor.m_conductor.publication_reserved_session_id_low);
int64_t client_id = nextCorrelationId();
int64_t pub_id = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id, channel, STREAM_ID_1, false), 0);
doWorkUntilDone();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_PUBLICATION_READY, _, _))
.WillRepeatedly(
[&](std::int32_t msgTypeId, uint8_t *buffer, size_t length)
{
const aeron_publication_buffers_ready_t *msg = reinterpret_cast<aeron_publication_buffers_ready_t *>(buffer);
EXPECT_TRUE(
msg->session_id < m_conductor.m_conductor.publication_reserved_session_id_low ||
m_conductor.m_conductor.publication_reserved_session_id_high < msg->session_id)
<< "Session Id [" << msg->session_id << "] should not be in the range: "
<< m_conductor.m_conductor.publication_reserved_session_id_low
<< " to "
<< m_conductor.m_conductor.publication_reserved_session_id_high;
});
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldNotAccidentallyBumpIntoExistingSessionId)
{
const char *channel = GetParam()->m_channel;
char channel1[AERON_MAX_PATH];
char channel2[AERON_MAX_PATH];
char channel3[AERON_MAX_PATH];
GetParam()->channelWithParams(channel1, AERON_MAX_PATH, SESSION_ID_3);
GetParam()->channelWithParams(channel2, AERON_MAX_PATH, SESSION_ID_4);
GetParam()->channelWithParams(channel3, AERON_MAX_PATH, SESSION_ID_5);
int next_session_id = SESSION_ID_3;
m_conductor.manuallySetNextSessionId(next_session_id);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
int64_t pub_id_3 = nextCorrelationId();
int64_t pub_id_4 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel1, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_2, channel2, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_3, channel3, STREAM_ID_1, true), 0);
doWorkUntilDone();
readAllBroadcastsFromConductor(null_broadcast_handler);
ASSERT_EQ(addPublication(client_id, pub_id_4, channel, STREAM_ID_1, true), 0);
doWork();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_EXCLUSIVE_PUBLICATION_READY, _, _))
.WillRepeatedly(
[&](std::int32_t msgTypeId, uint8_t *buffer, size_t length)
{
const aeron_publication_buffers_ready_t *msg = reinterpret_cast<aeron_publication_buffers_ready_t *>(buffer);
EXPECT_EQ(msg->correlation_id, pub_id_4);
EXPECT_NE(msg->session_id, SESSION_ID_3);
EXPECT_NE(msg->session_id, SESSION_ID_4);
EXPECT_NE(msg->session_id, SESSION_ID_5);
});
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldNotAccidentallyBumpIntoExistingSessionIdWithSessionIdWrapping)
{
int32_t session_id_1 = INT32_MAX - 1;
int32_t session_id_2 = session_id_1 + 1;
int32_t session_id_3 = INT32_MIN;
int32_t session_id_4 = session_id_3 + 1;
const char *channel = GetParam()->m_channel;
char channel1[AERON_MAX_PATH];
char channel2[AERON_MAX_PATH];
char channel3[AERON_MAX_PATH];
char channel4[AERON_MAX_PATH];
GetParam()->channelWithParams(channel1, AERON_MAX_PATH, session_id_1);
GetParam()->channelWithParams(channel2, AERON_MAX_PATH, session_id_2);
GetParam()->channelWithParams(channel3, AERON_MAX_PATH, session_id_3);
GetParam()->channelWithParams(channel4, AERON_MAX_PATH, session_id_4);
m_conductor.manuallySetNextSessionId(session_id_1);
int64_t client_id = nextCorrelationId();
int64_t pub_id_1 = nextCorrelationId();
int64_t pub_id_2 = nextCorrelationId();
int64_t pub_id_3 = nextCorrelationId();
int64_t pub_id_4 = nextCorrelationId();
int64_t pub_id_5 = nextCorrelationId();
ASSERT_EQ(addPublication(client_id, pub_id_1, channel1, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_2, channel2, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_3, channel3, STREAM_ID_1, true), 0);
ASSERT_EQ(addPublication(client_id, pub_id_4, channel4, STREAM_ID_1, true), 0);
doWorkUntilDone();
readAllBroadcastsFromConductor(null_broadcast_handler);
ASSERT_EQ(addPublication(client_id, pub_id_5, channel, STREAM_ID_1, true), 0);
doWork();
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_EXCLUSIVE_PUBLICATION_READY, _, _))
.WillRepeatedly(
[&](std::int32_t msgTypeId, uint8_t *buffer, size_t length)
{
const aeron_publication_buffers_ready_t *msg = reinterpret_cast<aeron_publication_buffers_ready_t *>(buffer);
EXPECT_EQ(msg->correlation_id, pub_id_5);
EXPECT_NE(msg->session_id, session_id_1);
EXPECT_NE(msg->session_id, session_id_2);
EXPECT_NE(msg->session_id, session_id_3);
EXPECT_NE(msg->session_id, session_id_4);
});
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
TEST_P(DriverConductorPubSubTest, shouldBeAbleToAddSingleNetworkSubscriptionWithSpecifiedSessionId)
{
char channel_with_session_id[AERON_MAX_PATH];
GetParam()->channelWithParams(channel_with_session_id, AERON_MAX_PATH, SESSION_ID_1);
int64_t client_id = nextCorrelationId();
int64_t sub_id = nextCorrelationId();
ASSERT_EQ(addNetworkSubscription(client_id, sub_id, channel_with_session_id, STREAM_ID_1), 0);
doWorkUntilDone();
ASSERT_EQ(GetParam()->numSubscriptions(&m_conductor.m_conductor), 1u);
ASSERT_TRUE(GetParam()->receiveEndpointExists(&m_conductor.m_conductor, channel_with_session_id));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(_, _, _));
EXPECT_CALL(m_mockCallbacks, broadcastToClient(AERON_RESPONSE_ON_SUBSCRIPTION_READY, _, _))
.With(IsSubscriptionReady(sub_id));
readAllBroadcastsFromConductor(mock_broadcast_handler);
}
| 18,899 |
884 | /*
* Copyright 2014 - 2021 Blazebit.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.blazebit.persistence.impl;
import com.blazebit.persistence.ConfigurationProperties;
import com.blazebit.persistence.CriteriaBuilderFactory;
import com.blazebit.persistence.impl.dialect.CockroachSQLDbmsDialect;
import com.blazebit.persistence.impl.dialect.DB2DbmsDialect;
import com.blazebit.persistence.impl.dialect.DefaultDbmsDialect;
import com.blazebit.persistence.impl.dialect.H2DbmsDialect;
import com.blazebit.persistence.impl.dialect.MSSQLDbmsDialect;
import com.blazebit.persistence.impl.dialect.MySQL8DbmsDialect;
import com.blazebit.persistence.impl.dialect.MySQLDbmsDialect;
import com.blazebit.persistence.impl.dialect.OracleDbmsDialect;
import com.blazebit.persistence.impl.dialect.PostgreSQLDbmsDialect;
import com.blazebit.persistence.impl.function.alias.AliasFunction;
import com.blazebit.persistence.impl.function.base64.Base64Function;
import com.blazebit.persistence.impl.function.base64.PostgreSQLBase64Function;
import com.blazebit.persistence.impl.function.cast.CastFunction;
import com.blazebit.persistence.impl.function.cast.DB2CastFunction;
import com.blazebit.persistence.impl.function.chr.CharChrFunction;
import com.blazebit.persistence.impl.function.chr.ChrFunction;
import com.blazebit.persistence.impl.function.colldml.CollectionDmlSupportFunction;
import com.blazebit.persistence.impl.function.coltrunc.ColumnTruncFunction;
import com.blazebit.persistence.impl.function.concat.ConcatFunction;
import com.blazebit.persistence.impl.function.concat.PipeBasedConcatFunction;
import com.blazebit.persistence.impl.function.concat.PlusBasedConcatFunction;
import com.blazebit.persistence.impl.function.count.AbstractCountFunction;
import com.blazebit.persistence.impl.function.count.CountTupleEmulationFunction;
import com.blazebit.persistence.impl.function.count.CountTupleFunction;
import com.blazebit.persistence.impl.function.count.MySQLCountTupleFunction;
import com.blazebit.persistence.impl.function.countwrapper.CountWrapperFunction;
import com.blazebit.persistence.impl.function.dateadd.day.DB2DayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.DayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.H2DayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.MSSQLDayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.MySQLDayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.OracleDayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.day.PostgreSQLDayAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.DB2HourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.H2HourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.HourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.MSSQLHourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.MySQLHourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.OracleHourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.hour.PostgreSQLHourAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.DB2MicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.H2MicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.MSSQLMicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.MicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.MySQLMicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.OracleMicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.microseconds.PostgreSQLMicrosecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.DB2MillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.H2MillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.MSSQLMillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.MillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.MySQLMillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.OracleMillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.milliseconds.PostgreSQLMillisecondsAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.DB2MinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.H2MinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.MSSQLMinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.MinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.MySQLMinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.OracleMinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.minute.PostgreSQLMinuteAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.DB2MonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.H2MonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.MSSQLMonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.MonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.MySQLMonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.OracleMonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.month.PostgreSQLMonthAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.DB2QuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.H2QuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.MSSQLQuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.MySQLQuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.OracleQuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.PostgreSQLQuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.quarter.QuarterAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.DB2SecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.H2SecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.MSSQLSecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.MySQLSecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.OracleSecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.PostgreSQLSecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.second.SecondAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.DB2WeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.H2WeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.MSSQLWeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.MySQLWeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.OracleWeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.PostgreSQLWeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.week.WeekAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.DB2YearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.H2YearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.MSSQLYearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.MySQLYearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.OracleYearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.PostgreSQLYearAddFunction;
import com.blazebit.persistence.impl.function.dateadd.year.YearAddFunction;
import com.blazebit.persistence.impl.function.datediff.day.AccessDayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.day.DB2DayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.day.DefaultDayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.day.MySQLDayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.day.OracleDayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.day.PostgreSQLDayDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.AccessHourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.DB2HourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.DefaultHourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.MySQLHourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.OracleHourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.hour.PostgreSQLHourDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.AccessMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.DB2MicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.DefaultMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.MSSQLMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.MySQLMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.OracleMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.microsecond.PostgreSQLMicrosecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.AccessMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.DB2MillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.DefaultMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.MSSQLMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.MySQLMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.OracleMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.millisecond.PostgreSQLMillisecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.AccessMinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.DB2MinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.DefaultMinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.MySQLMinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.OracleMinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.minute.PostgreSQLMinuteDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.AccessMonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.DB2MonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.DefaultMonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.MySQLMonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.OracleMonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.month.PostgreSQLMonthDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.AccessQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.DB2QuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.DefaultQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.H2QuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.MSSQLQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.MySQLQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.OracleQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.quarter.PostgreSQLQuarterDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.AccessSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.DB2SecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.DefaultSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.MSSQLSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.MySQLSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.OracleSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.second.PostgreSQLSecondDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.DB2WeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.DefaultWeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.H2WeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.MSSQLWeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.MySQLWeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.OracleWeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.week.PostgreSQLWeekDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.AccessYearDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.DB2YearDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.DefaultYearDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.MySQLYearDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.OracleYearDiffFunction;
import com.blazebit.persistence.impl.function.datediff.year.PostgreSQLYearDiffFunction;
import com.blazebit.persistence.impl.function.datetime.day.AccessDayFunction;
import com.blazebit.persistence.impl.function.datetime.day.DB2DayFunction;
import com.blazebit.persistence.impl.function.datetime.day.DayFunction;
import com.blazebit.persistence.impl.function.datetime.day.DerbyDayFunction;
import com.blazebit.persistence.impl.function.datetime.day.MSSQLDayFunction;
import com.blazebit.persistence.impl.function.datetime.day.PostgreSQLDayFunction;
import com.blazebit.persistence.impl.function.datetime.day.SqliteDayFunction;
import com.blazebit.persistence.impl.function.datetime.day.SybaseDayFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.AccessDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.DB2DayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.DayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.MSSQLDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.MySQLDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.OracleDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.PostgreSQLDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.SqliteDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofweek.SybaseDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.AccessDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.DB2DayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.DayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.MSSQLDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.MySQLDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.OracleDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.PostgreSQLDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.SqliteDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.dayofyear.SybaseDayOfYearFunction;
import com.blazebit.persistence.impl.function.datetime.epoch.DB2EpochFunction;
import com.blazebit.persistence.impl.function.datetime.epoch.DefaultEpochFunction;
import com.blazebit.persistence.impl.function.datetime.epoch.MySQLEpochFunction;
import com.blazebit.persistence.impl.function.datetime.epoch.OracleEpochFunction;
import com.blazebit.persistence.impl.function.datetime.epoch.PostgreSQLEpochFunction;
import com.blazebit.persistence.impl.function.datetime.epochday.DB2EpochDayFunction;
import com.blazebit.persistence.impl.function.datetime.epochday.DefaultEpochDayFunction;
import com.blazebit.persistence.impl.function.datetime.epochday.MySQLEpochDayFunction;
import com.blazebit.persistence.impl.function.datetime.epochday.OracleEpochDayFunction;
import com.blazebit.persistence.impl.function.datetime.epochday.PostgreSQLEpochDayFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.DB2EpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.DefaultEpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.MSSQLEpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.MySQLEpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.OracleEpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmicro.PostgreSQLEpochMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.DB2EpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.DefaultEpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.MSSQLEpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.MySQLEpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.OracleEpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.epochmilli.PostgreSQLEpochMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.hour.AccessHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.DB2HourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.DerbyHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.HourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.MSSQLHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.OracleHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.PostgreSQLHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.SqliteHourFunction;
import com.blazebit.persistence.impl.function.datetime.hour.SybaseHourFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.AccessIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.DB2IsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.IsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.MSSQLIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.MySQLIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.OracleIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.PostgreSQLIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.SqliteIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isodayofweek.SybaseIsoDayOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.AccessIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.DB2IsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.H2IsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.IsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.MSSQLIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.MySQLIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.OracleIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.PostgreSQLIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.SqliteIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.isoweek.SybaseIsoWeekFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.DB2MicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.MSSQLMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.MicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.MySQLMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.OracleMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.PostgreSQLMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.microsecond.SybaseMicrosecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.DB2MillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.MSSQLMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.MillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.MySQLMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.OracleMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.PostgreSQLMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.millisecond.SybaseMillisecondFunction;
import com.blazebit.persistence.impl.function.datetime.minute.AccessMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.DB2MinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.DerbyMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.MSSQLMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.MinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.OracleMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.PostgreSQLMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.SqliteMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.minute.SybaseMinuteFunction;
import com.blazebit.persistence.impl.function.datetime.month.AccessMonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.DB2MonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.DerbyMonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.MSSQLMonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.MonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.PostgreSQLMonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.SqliteMonthFunction;
import com.blazebit.persistence.impl.function.datetime.month.SybaseMonthFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.AccessQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.DB2QuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.MSSQLQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.OracleQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.PostgreSQLQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.QuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.SqliteQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.quarter.SybaseQuarterFunction;
import com.blazebit.persistence.impl.function.datetime.second.AccessSecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.DB2SecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.DerbySecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.MSSQLSecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.OracleSecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.PostgreSQLSecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.SecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.SqliteSecondFunction;
import com.blazebit.persistence.impl.function.datetime.second.SybaseSecondFunction;
import com.blazebit.persistence.impl.function.datetime.week.DB2WeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.week.MSSQLWeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.week.MySQLWeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.week.OracleWeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.week.PostgreSQLWeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.week.WeekInYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.AccessYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.DB2YearFunction;
import com.blazebit.persistence.impl.function.datetime.year.DerbyYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.MSSQLYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.PostgreSQLYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.SqliteYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.SybaseYearFunction;
import com.blazebit.persistence.impl.function.datetime.year.YearFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.DB2YearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.MSSQLYearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.MySQLYearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.OracleYearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.PostgreSQLYearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearofweek.YearOfWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.DB2YearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.H2YearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.MSSQLYearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.MySQLYearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.OracleYearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.PostgreSQLYearWeekFunction;
import com.blazebit.persistence.impl.function.datetime.yearweek.YearWeekFunction;
import com.blazebit.persistence.impl.function.entity.EntityFunction;
import com.blazebit.persistence.impl.function.every.EveryFunction;
import com.blazebit.persistence.impl.function.every.FallbackEveryFunction;
import com.blazebit.persistence.impl.function.exist.ExistFunction;
import com.blazebit.persistence.impl.function.greatest.AbstractGreatestFunction;
import com.blazebit.persistence.impl.function.greatest.DefaultGreatestFunction;
import com.blazebit.persistence.impl.function.greatest.MaxGreatestFunction;
import com.blazebit.persistence.impl.function.greatest.SelectMaxUnionGreatestFunction;
import com.blazebit.persistence.impl.function.groupconcat.AbstractGroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.DB2GroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.H2GroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.MSSQLGroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.MySQLGroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.OracleListaggGroupConcatFunction;
import com.blazebit.persistence.impl.function.groupconcat.PostgreSQLGroupConcatFunction;
import com.blazebit.persistence.impl.function.grouping.GroupingFunction;
import com.blazebit.persistence.impl.function.groupingsets.CubeFunction;
import com.blazebit.persistence.impl.function.groupingsets.GroupingSetFunction;
import com.blazebit.persistence.impl.function.groupingsets.GroupingSetsFunction;
import com.blazebit.persistence.impl.function.groupingsets.RollupFunction;
import com.blazebit.persistence.impl.function.jsonget.AbstractJsonGetFunction;
import com.blazebit.persistence.impl.function.jsonget.DB2JsonGetFunction;
import com.blazebit.persistence.impl.function.jsonget.MSSQLJsonGetFunction;
import com.blazebit.persistence.impl.function.jsonget.MySQL8JsonGetFunction;
import com.blazebit.persistence.impl.function.jsonget.OracleJsonGetFunction;
import com.blazebit.persistence.impl.function.jsonget.PostgreSQLJsonGetFunction;
import com.blazebit.persistence.impl.function.jsonset.AbstractJsonSetFunction;
import com.blazebit.persistence.impl.function.jsonset.DB2JsonSetFunction;
import com.blazebit.persistence.impl.function.jsonset.MSSQLJsonSetFunction;
import com.blazebit.persistence.impl.function.jsonset.MySQL8JsonSetFunction;
import com.blazebit.persistence.impl.function.jsonset.OracleJsonSetFunction;
import com.blazebit.persistence.impl.function.jsonset.PostgreSQLJsonSetFunction;
import com.blazebit.persistence.impl.function.least.AbstractLeastFunction;
import com.blazebit.persistence.impl.function.least.DefaultLeastFunction;
import com.blazebit.persistence.impl.function.least.MinLeastFunction;
import com.blazebit.persistence.impl.function.least.SelectMinUnionLeastFunction;
import com.blazebit.persistence.impl.function.limit.LimitFunction;
import com.blazebit.persistence.impl.function.literal.LiteralCalendarFunction;
import com.blazebit.persistence.impl.function.literal.LiteralDateFunction;
import com.blazebit.persistence.impl.function.literal.LiteralDateTimestampFunction;
import com.blazebit.persistence.impl.function.literal.LiteralInstantFunction;
import com.blazebit.persistence.impl.function.literal.LiteralLocalDateFunction;
import com.blazebit.persistence.impl.function.literal.LiteralLocalDateTimeFunction;
import com.blazebit.persistence.impl.function.literal.LiteralLocalTimeFunction;
import com.blazebit.persistence.impl.function.literal.LiteralOffsetDateTimeFunction;
import com.blazebit.persistence.impl.function.literal.LiteralOffsetTimeFunction;
import com.blazebit.persistence.impl.function.literal.LiteralTimeFunction;
import com.blazebit.persistence.impl.function.literal.LiteralTimestampFunction;
import com.blazebit.persistence.impl.function.literal.LiteralZonedDateTimeFunction;
import com.blazebit.persistence.impl.function.nullfn.NullfnFunction;
import com.blazebit.persistence.impl.function.nullsubquery.NullSubqueryFunction;
import com.blazebit.persistence.impl.function.oragg.FallbackOrAggFunction;
import com.blazebit.persistence.impl.function.oragg.OrAggFunction;
import com.blazebit.persistence.impl.function.pageposition.MySQLPagePositionFunction;
import com.blazebit.persistence.impl.function.pageposition.OraclePagePositionFunction;
import com.blazebit.persistence.impl.function.pageposition.PagePositionFunction;
import com.blazebit.persistence.impl.function.pageposition.TransactSQLPagePositionFunction;
import com.blazebit.persistence.impl.function.param.ParamFunction;
import com.blazebit.persistence.impl.function.querywrapper.QueryWrapperFunction;
import com.blazebit.persistence.impl.function.repeat.AbstractRepeatFunction;
import com.blazebit.persistence.impl.function.repeat.DefaultRepeatFunction;
import com.blazebit.persistence.impl.function.repeat.LpadRepeatFunction;
import com.blazebit.persistence.impl.function.repeat.ReplicateRepeatFunction;
import com.blazebit.persistence.impl.function.replace.ReplaceFunction;
import com.blazebit.persistence.impl.function.rowvalue.RowValueComparisonFunction;
import com.blazebit.persistence.impl.function.rowvalue.RowValueSubqueryComparisonFunction;
import com.blazebit.persistence.impl.function.set.SetFunction;
import com.blazebit.persistence.impl.function.stringjsonagg.AbstractStringJsonAggFunction;
import com.blazebit.persistence.impl.function.stringjsonagg.GroupConcatBasedStringJsonAggFunction;
import com.blazebit.persistence.impl.function.stringjsonagg.MySQLStringJsonAggFunction;
import com.blazebit.persistence.impl.function.stringjsonagg.OracleStringJsonAggFunction;
import com.blazebit.persistence.impl.function.stringjsonagg.PostgreSQLStringJsonAggFunction;
import com.blazebit.persistence.impl.function.stringxmlagg.AbstractStringXmlAggFunction;
import com.blazebit.persistence.impl.function.stringxmlagg.GroupConcatBasedStringXmlAggFunction;
import com.blazebit.persistence.impl.function.stringxmlagg.OracleGroupConcatBasedStringXmlAggFunction;
import com.blazebit.persistence.impl.function.stringxmlagg.PostgreSQLStringXmlAggFunction;
import com.blazebit.persistence.impl.function.subquery.SubqueryFunction;
import com.blazebit.persistence.impl.function.tomultiset.ToMultisetFunction;
import com.blazebit.persistence.impl.function.tostringjson.AbstractToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringjson.ForJsonPathToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringjson.GroupConcatBasedToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringjson.MySQLToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringjson.OracleToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringjson.PostgreSQLToStringJsonFunction;
import com.blazebit.persistence.impl.function.tostringxml.AbstractToStringXmlFunction;
import com.blazebit.persistence.impl.function.tostringxml.ForXmlPathToStringXmlFunction;
import com.blazebit.persistence.impl.function.tostringxml.GroupConcatBasedToStringXmlFunction;
import com.blazebit.persistence.impl.function.tostringxml.OracleGroupConcatBasedToStringXmlFunction;
import com.blazebit.persistence.impl.function.tostringxml.PostgreSQLToStringXmlFunction;
import com.blazebit.persistence.impl.function.treat.TreatFunction;
import com.blazebit.persistence.impl.function.trunc.day.DB2TruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.H2TruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.MSSQLTruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.MySQLTruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.OracleTruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.PostgreSQLTruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.day.TruncDayFunction;
import com.blazebit.persistence.impl.function.trunc.hour.DB2TruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.H2TruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.MSSQLTruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.MySQLTruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.OracleTruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.PostgreSQLTruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.hour.TruncHourFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.DB2TruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.H2TruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.MSSQLTruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.MySQLTruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.OracleTruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.PostgreSQLTruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.microseconds.TruncMicrosecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.DB2TruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.H2TruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.MSSQLTruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.MySQLTruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.OracleTruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.PostgreSQLTruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.milliseconds.TruncMillisecondsFunction;
import com.blazebit.persistence.impl.function.trunc.minute.DB2TruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.H2TruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.MSSQLTruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.MySQLTruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.OracleTruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.PostgreSQLTruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.minute.TruncMinuteFunction;
import com.blazebit.persistence.impl.function.trunc.month.DB2TruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.H2TruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.MSSQLTruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.MySQLTruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.OracleTruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.PostgreSQLTruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.month.TruncMonthFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.DB2TruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.H2TruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.MSSQLTruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.MySQLTruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.OracleTruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.PostgreSQLTruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.quarter.TruncQuarterFunction;
import com.blazebit.persistence.impl.function.trunc.second.DB2TruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.H2TruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.MSSQLTruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.MySQLTruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.OracleTruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.PostgreSQLTruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.second.TruncSecondFunction;
import com.blazebit.persistence.impl.function.trunc.week.MSSQLTruncWeekFunction;
import com.blazebit.persistence.impl.function.trunc.week.MySQLTruncWeekFunction;
import com.blazebit.persistence.impl.function.trunc.week.OracleTruncWeekFunction;
import com.blazebit.persistence.impl.function.trunc.week.TruncWeekFunction;
import com.blazebit.persistence.impl.function.trunc.year.DB2TruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.H2TruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.MSSQLTruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.MySQLTruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.OracleTruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.PostgreSQLTruncYearFunction;
import com.blazebit.persistence.impl.function.trunc.year.TruncYearFunction;
import com.blazebit.persistence.impl.function.window.avg.AvgFunction;
import com.blazebit.persistence.impl.function.window.count.CountFunction;
import com.blazebit.persistence.impl.function.window.cumedist.CumeDistFunction;
import com.blazebit.persistence.impl.function.window.denserank.DenseRankFunction;
import com.blazebit.persistence.impl.function.window.every.FallbackWindowEveryFunction;
import com.blazebit.persistence.impl.function.window.every.WindowEveryFunction;
import com.blazebit.persistence.impl.function.window.first.FirstValueFunction;
import com.blazebit.persistence.impl.function.window.groupconcat.DB2GroupConcatWindowFunction;
import com.blazebit.persistence.impl.function.window.groupconcat.H2GroupConcatWindowFunction;
import com.blazebit.persistence.impl.function.window.groupconcat.MySQLGroupConcatWindowFunction;
import com.blazebit.persistence.impl.function.window.groupconcat.OracleListaggGroupConcatWindowFunction;
import com.blazebit.persistence.impl.function.window.groupconcat.PostgreSQLGroupConcatWindowFunction;
import com.blazebit.persistence.impl.function.window.lag.LagFunction;
import com.blazebit.persistence.impl.function.window.last.LastValueFunction;
import com.blazebit.persistence.impl.function.window.lead.LeadFunction;
import com.blazebit.persistence.impl.function.window.max.MaxFunction;
import com.blazebit.persistence.impl.function.window.min.MinFunction;
import com.blazebit.persistence.impl.function.window.nth.NthValueFunction;
import com.blazebit.persistence.impl.function.window.ntile.NtileFunction;
import com.blazebit.persistence.impl.function.window.oragg.FallbackWindowOrAggFunction;
import com.blazebit.persistence.impl.function.window.oragg.WindowOrAggFunction;
import com.blazebit.persistence.impl.function.window.percentrank.PercentRankFunction;
import com.blazebit.persistence.impl.function.window.rank.RankFunction;
import com.blazebit.persistence.impl.function.window.row.RowNumberFunction;
import com.blazebit.persistence.impl.function.window.sum.SumFunction;
import com.blazebit.persistence.impl.util.CriteriaBuilderConfigurationContributorComparator;
import com.blazebit.persistence.parser.expression.ConcurrentHashMapExpressionCache;
import com.blazebit.persistence.spi.CriteriaBuilderConfiguration;
import com.blazebit.persistence.spi.CriteriaBuilderConfigurationContributor;
import com.blazebit.persistence.spi.DbmsDialect;
import com.blazebit.persistence.spi.EntityManagerFactoryIntegrator;
import com.blazebit.persistence.spi.ExtendedQuerySupport;
import com.blazebit.persistence.spi.JpqlFunction;
import com.blazebit.persistence.spi.JpqlFunctionGroup;
import com.blazebit.persistence.spi.JpqlFunctionKind;
import com.blazebit.persistence.spi.JpqlMacro;
import com.blazebit.persistence.spi.LateralStyle;
import com.blazebit.persistence.spi.PackageOpener;
import com.blazebit.persistence.spi.SetOperationType;
import javax.persistence.EntityManagerFactory;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.TimeZone;
/**
*
* @author <NAME>
* @author <NAME>
* @since 1.0.0
*/
public class CriteriaBuilderConfigurationImpl implements CriteriaBuilderConfiguration {
private final Map<String, DbmsDialect> dbmsDialects = new HashMap<String, DbmsDialect>();
private final Map<String, JpqlFunctionGroup> functions = new HashMap<String, JpqlFunctionGroup>();
private final Map<String, Class<?>> treatTypes = new HashMap<String, Class<?>>();
private final Map<String, JpqlMacro> macros = new HashMap<String, JpqlMacro>();
private final List<EntityManagerFactoryIntegrator> entityManagerIntegrators = new ArrayList<EntityManagerFactoryIntegrator>();
private PackageOpener packageOpener;
private Properties properties = new Properties();
private ExtendedQuerySupport extendedQuerySupport;
public CriteriaBuilderConfigurationImpl(PackageOpener packageOpener) {
this.packageOpener = packageOpener;
loadDefaultProperties();
loadExtendedQuerySupport();
loadEntityManagerIntegrator();
loadDbmsDialects();
loadFunctions();
loadExtensions();
}
// NOTE: When adding a function here, you might want to also add it in AbstractCoreTest so it is recognized
@SuppressWarnings("checkstyle:methodlength")
private void loadFunctions() {
JpqlFunctionGroup jpqlFunctionGroup;
// limit
jpqlFunctionGroup = new JpqlFunctionGroup(LimitFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new LimitFunction(dbmsDialects.get(null)));
jpqlFunctionGroup.add("mysql", new LimitFunction(dbmsDialects.get("mysql")));
jpqlFunctionGroup.add("mysql8", new LimitFunction(dbmsDialects.get("mysql8")));
jpqlFunctionGroup.add("oracle", new LimitFunction(dbmsDialects.get("oracle")));
jpqlFunctionGroup.add("db2", new LimitFunction(dbmsDialects.get("db2")));
jpqlFunctionGroup.add("sybase", null); // Does not support limit
jpqlFunctionGroup.add("microsoft", new LimitFunction(dbmsDialects.get("microsoft")));
registerFunction(jpqlFunctionGroup);
// page_position
jpqlFunctionGroup = new JpqlFunctionGroup(PagePositionFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new PagePositionFunction());
jpqlFunctionGroup.add("mysql", new MySQLPagePositionFunction());
jpqlFunctionGroup.add("mysql8", new MySQLPagePositionFunction());
jpqlFunctionGroup.add("oracle", new OraclePagePositionFunction());
jpqlFunctionGroup.add("sybase", new TransactSQLPagePositionFunction());
jpqlFunctionGroup.add("microsoft", new TransactSQLPagePositionFunction());
registerFunction(jpqlFunctionGroup);
// entity_function
registerFunction(EntityFunction.FUNCTION_NAME, new EntityFunction());
// nullfn
registerFunction(NullfnFunction.FUNCTION_NAME, new NullfnFunction());
// collection_dml_support
registerFunction(CollectionDmlSupportFunction.FUNCTION_NAME, new CollectionDmlSupportFunction());
// param
registerFunction(ParamFunction.FUNCTION_NAME, new ParamFunction());
// exist
registerFunction(ExistFunction.FUNCTION_NAME, new ExistFunction());
// replace
registerFunction(ReplaceFunction.FUNCTION_NAME, new ReplaceFunction());
// chr
jpqlFunctionGroup = new JpqlFunctionGroup(ChrFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new ChrFunction());
jpqlFunctionGroup.add("mysql", new CharChrFunction());
jpqlFunctionGroup.add("mysql8", new CharChrFunction());
jpqlFunctionGroup.add("microsoft", new CharChrFunction());
jpqlFunctionGroup.add("sybase", new CharChrFunction());
registerFunction(jpqlFunctionGroup);
// base64
jpqlFunctionGroup = new JpqlFunctionGroup(Base64Function.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new Base64Function());
jpqlFunctionGroup.add("postgresql", new PostgreSQLBase64Function());
registerFunction(jpqlFunctionGroup);
// set operations
for (SetOperationType setType : SetOperationType.values()) {
// Use a prefix because hibernate uses UNION as keyword
jpqlFunctionGroup = new JpqlFunctionGroup("set_" + setType.name().toLowerCase(), false);
for (Map.Entry<String, DbmsDialect> dbmsDialectEntry : dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dbmsDialectEntry.getKey(), new SetFunction(setType, dbmsDialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
}
// temporal literals
registerFunction(LiteralTimeFunction.FUNCTION_NAME, new LiteralTimeFunction());
registerFunction(LiteralDateFunction.FUNCTION_NAME, new LiteralDateFunction());
registerFunction(LiteralTimestampFunction.FUNCTION_NAME, new LiteralTimestampFunction());
registerFunction(LiteralDateTimestampFunction.FUNCTION_NAME, new LiteralDateTimestampFunction());
registerFunction(LiteralCalendarFunction.FUNCTION_NAME, new LiteralCalendarFunction());
// treat
registerNamedType("Boolean", Boolean.class);
registerNamedType("Byte", Byte.class);
registerNamedType("Short", Short.class);
registerNamedType("Integer", Integer.class);
registerNamedType("Long", Long.class);
registerNamedType("Float", Float.class);
registerNamedType("Double", Double.class);
registerNamedType("Character", Character.class);
registerNamedType("String", String.class);
registerNamedType("BigInteger", BigInteger.class);
registerNamedType("BigDecimal", BigDecimal.class);
registerNamedType("Time", Time.class);
registerNamedType("Date", java.sql.Date.class);
registerNamedType("Timestamp", Timestamp.class);
registerNamedType("TimeZone", TimeZone.class);
registerNamedType("Calendar", Calendar.class);
registerNamedType("GregorianCalendar", GregorianCalendar.class);
registerNamedType("Class", java.lang.Class.class);
registerNamedType("Currency", java.util.Currency.class);
registerNamedType("Locale", java.util.Locale.class);
registerNamedType("UUID", java.util.UUID.class);
registerNamedType("URL", java.net.URL.class);
// Java 8 time types
try {
registerNamedType("LocalDate", Class.forName("java.time.LocalDate"));
registerNamedType("LocalTime", Class.forName("java.time.LocalTime"));
registerNamedType("LocalDateTime", Class.forName("java.time.LocalDateTime"));
registerNamedType("OffsetTime", Class.forName("java.time.OffsetTime"));
registerNamedType("OffsetDateTime", Class.forName("java.time.OffsetDateTime"));
registerNamedType("ZonedDateTime", Class.forName("java.time.ZonedDateTime"));
registerNamedType("Duration", Class.forName("java.time.Duration"));
registerNamedType("Instant", Class.forName("java.time.Instant"));
registerNamedType("MonthDay", Class.forName("java.time.MonthDay"));
registerNamedType("Year", Class.forName("java.time.Year"));
registerNamedType("YearMonth", Class.forName("java.time.YearMonth"));
registerNamedType("Period", Class.forName("java.time.Period"));
registerNamedType("ZoneId", Class.forName("java.time.ZoneId"));
registerNamedType("ZoneOffset", Class.forName("java.time.ZoneOffset"));
registerFunction(LiteralLocalDateFunction.FUNCTION_NAME, new LiteralLocalDateFunction());
registerFunction(LiteralLocalTimeFunction.FUNCTION_NAME, new LiteralLocalTimeFunction());
registerFunction(LiteralLocalDateTimeFunction.FUNCTION_NAME, new LiteralLocalDateTimeFunction());
registerFunction(LiteralInstantFunction.FUNCTION_NAME, new LiteralInstantFunction());
registerFunction(LiteralZonedDateTimeFunction.FUNCTION_NAME, new LiteralZonedDateTimeFunction());
registerFunction(LiteralOffsetTimeFunction.FUNCTION_NAME, new LiteralOffsetTimeFunction());
registerFunction(LiteralOffsetDateTimeFunction.FUNCTION_NAME, new LiteralOffsetDateTimeFunction());
} catch (ClassNotFoundException ex) {
// If they aren't found, we ignore them
}
// cast
registerFunction(new JpqlFunctionGroup("cast_boolean"));
registerFunction(new JpqlFunctionGroup("cast_byte"));
registerFunction(new JpqlFunctionGroup("cast_short"));
registerFunction(new JpqlFunctionGroup("cast_integer"));
registerFunction(new JpqlFunctionGroup("cast_long"));
registerFunction(new JpqlFunctionGroup("cast_float"));
registerFunction(new JpqlFunctionGroup("cast_double"));
registerFunction(new JpqlFunctionGroup("cast_character"));
registerFunction(new JpqlFunctionGroup("cast_string"));
registerFunction(new JpqlFunctionGroup("cast_biginteger"));
registerFunction(new JpqlFunctionGroup("cast_bigdecimal"));
registerFunction(new JpqlFunctionGroup("cast_time"));
registerFunction(new JpqlFunctionGroup("cast_date"));
registerFunction(new JpqlFunctionGroup("cast_timestamp"));
registerFunction(new JpqlFunctionGroup("cast_calendar"));
for (Map.Entry<String, DbmsDialect> dbmsDialectEntry : dbmsDialects.entrySet()) {
for (Class<?> type : BasicCastTypes.TYPES) {
CastFunction castFunction;
if ("db2".equals(dbmsDialectEntry.getKey())) {
castFunction = new DB2CastFunction(type, dbmsDialectEntry.getValue());
} else {
castFunction = new CastFunction(type, dbmsDialectEntry.getValue());
}
functions.get("cast_" + type.getSimpleName().toLowerCase()).add(dbmsDialectEntry.getKey(), castFunction);
}
}
// concat
jpqlFunctionGroup = new JpqlFunctionGroup(ConcatFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, PipeBasedConcatFunction.INSTANCE);
jpqlFunctionGroup.add("mysql", ConcatFunction.INSTANCE);
jpqlFunctionGroup.add("mysql8", ConcatFunction.INSTANCE);
jpqlFunctionGroup.add("microsoft", PlusBasedConcatFunction.INSTANCE);
jpqlFunctionGroup.add("sybase", PlusBasedConcatFunction.INSTANCE);
registerFunction(jpqlFunctionGroup);
// group_concat
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractGroupConcatFunction.FUNCTION_NAME, true);
jpqlFunctionGroup.add("db2", new DB2GroupConcatFunction());
jpqlFunctionGroup.add("oracle", new OracleListaggGroupConcatFunction());
jpqlFunctionGroup.add("h2", new H2GroupConcatFunction());
jpqlFunctionGroup.add("mysql", new MySQLGroupConcatFunction());
jpqlFunctionGroup.add("mysql8", new MySQLGroupConcatFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLGroupConcatFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLGroupConcatFunction());
registerFunction(jpqlFunctionGroup);
// window_group_concat
jpqlFunctionGroup = new JpqlFunctionGroup("window_group_concat", JpqlFunctionKind.WINDOW);
jpqlFunctionGroup.add("db2", new DB2GroupConcatWindowFunction(dbmsDialects.get("db2")));
jpqlFunctionGroup.add("oracle", new OracleListaggGroupConcatWindowFunction(dbmsDialects.get("oracle")));
jpqlFunctionGroup.add("h2", new H2GroupConcatWindowFunction(dbmsDialects.get("h2")));
jpqlFunctionGroup.add("mysql", new MySQLGroupConcatWindowFunction(dbmsDialects.get("mysql")));
jpqlFunctionGroup.add("mysql8", new MySQLGroupConcatWindowFunction(dbmsDialects.get("mysql8")));
jpqlFunctionGroup.add("postgresql", new PostgreSQLGroupConcatWindowFunction(dbmsDialects.get("postgresql")));
registerFunction(jpqlFunctionGroup);
// datetime
jpqlFunctionGroup = new JpqlFunctionGroup("year", false);
jpqlFunctionGroup.add(null, new YearFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLYearFunction());
jpqlFunctionGroup.add("access", new AccessYearFunction());
jpqlFunctionGroup.add("db2", new DB2YearFunction());
jpqlFunctionGroup.add("derby", new DerbyYearFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLYearFunction());
jpqlFunctionGroup.add("sybase", new SybaseYearFunction());
jpqlFunctionGroup.add("sqlite", new SqliteYearFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("year_of_week", false);
jpqlFunctionGroup.add(null, new YearOfWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLYearOfWeekFunction());
jpqlFunctionGroup.add("db2", new DB2YearOfWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLYearOfWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLYearOfWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLYearOfWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleYearOfWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("year_week", false);
jpqlFunctionGroup.add(null, new YearWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLYearWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLYearWeekFunction());
jpqlFunctionGroup.add("db2", new DB2YearWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleYearWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLYearWeekFunction());
jpqlFunctionGroup.add("h2", new H2YearWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLYearWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("month", false);
jpqlFunctionGroup.add(null, new MonthFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMonthFunction());
jpqlFunctionGroup.add("access", new AccessMonthFunction());
jpqlFunctionGroup.add("db2", new DB2MonthFunction());
jpqlFunctionGroup.add("derby", new DerbyMonthFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMonthFunction());
jpqlFunctionGroup.add("sybase", new SybaseMonthFunction());
jpqlFunctionGroup.add("sqlite", new SqliteMonthFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("week", false);
jpqlFunctionGroup.add(null, new IsoWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLIsoWeekFunction());
jpqlFunctionGroup.add("access", new AccessIsoWeekFunction());
jpqlFunctionGroup.add("db2", new DB2IsoWeekFunction());
jpqlFunctionGroup.add("h2", new H2IsoWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLIsoWeekFunction());
jpqlFunctionGroup.add("sybase", new SybaseIsoWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteIsoWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLIsoWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLIsoWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleIsoWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteIsoWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("iso_week", false);
jpqlFunctionGroup.add(null, new IsoWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLIsoWeekFunction());
jpqlFunctionGroup.add("access", new AccessIsoWeekFunction());
jpqlFunctionGroup.add("db2", new DB2IsoWeekFunction());
jpqlFunctionGroup.add("h2", new H2IsoWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLIsoWeekFunction());
jpqlFunctionGroup.add("sybase", new SybaseIsoWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteIsoWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLIsoWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLIsoWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleIsoWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteIsoWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("week_in_year", false);
jpqlFunctionGroup.add(null, new WeekInYearFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLWeekInYearFunction());
jpqlFunctionGroup.add("db2", new DB2WeekInYearFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLWeekInYearFunction());
jpqlFunctionGroup.add("mysql", new MySQLWeekInYearFunction());
jpqlFunctionGroup.add("mysql8", new MySQLWeekInYearFunction());
jpqlFunctionGroup.add("oracle", new OracleWeekInYearFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("quarter", false);
jpqlFunctionGroup.add(null, new QuarterFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLQuarterFunction());
jpqlFunctionGroup.add("access", new AccessQuarterFunction());
jpqlFunctionGroup.add("db2", new DB2QuarterFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLQuarterFunction());
jpqlFunctionGroup.add("sybase", new SybaseQuarterFunction());
jpqlFunctionGroup.add("sqlite", new SqliteQuarterFunction());
jpqlFunctionGroup.add("oracle", new OracleQuarterFunction());
jpqlFunctionGroup.add("sqlite", new SqliteQuarterFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("day", false);
jpqlFunctionGroup.add(null, new DayFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLDayFunction());
jpqlFunctionGroup.add("access", new AccessDayFunction());
jpqlFunctionGroup.add("db2", new DB2DayFunction());
jpqlFunctionGroup.add("derby", new DerbyDayFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLDayFunction());
jpqlFunctionGroup.add("sybase", new SybaseDayFunction());
jpqlFunctionGroup.add("sqlite", new SqliteDayFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("dayofyear", false);
jpqlFunctionGroup.add(null, new DayOfYearFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLDayOfYearFunction());
jpqlFunctionGroup.add("access", new AccessDayOfYearFunction());
jpqlFunctionGroup.add("db2", new DB2DayOfYearFunction());
jpqlFunctionGroup.add("mysql", new MySQLDayOfYearFunction());
jpqlFunctionGroup.add("mysql8", new MySQLDayOfYearFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLDayOfYearFunction());
jpqlFunctionGroup.add("sybase", new SybaseDayOfYearFunction());
jpqlFunctionGroup.add("oracle", new OracleDayOfYearFunction());
jpqlFunctionGroup.add("sqlite", new SqliteDayOfYearFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("dayofweek", false);
jpqlFunctionGroup.add(null, new DayOfWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLDayOfWeekFunction());
jpqlFunctionGroup.add("access", new AccessDayOfWeekFunction());
jpqlFunctionGroup.add("db2", new DB2DayOfWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLDayOfWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLDayOfWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLDayOfWeekFunction());
jpqlFunctionGroup.add("sybase", new SybaseDayOfWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleDayOfWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteDayOfWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("isodayofweek", false);
jpqlFunctionGroup.add(null, new IsoDayOfWeekFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLIsoDayOfWeekFunction());
jpqlFunctionGroup.add("access", new AccessIsoDayOfWeekFunction());
jpqlFunctionGroup.add("db2", new DB2IsoDayOfWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLIsoDayOfWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLIsoDayOfWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLIsoDayOfWeekFunction());
jpqlFunctionGroup.add("sybase", new SybaseIsoDayOfWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleIsoDayOfWeekFunction());
jpqlFunctionGroup.add("sqlite", new SqliteIsoDayOfWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("hour", false);
jpqlFunctionGroup.add(null, new HourFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLHourFunction());
jpqlFunctionGroup.add("access", new AccessHourFunction());
jpqlFunctionGroup.add("db2", new DB2HourFunction());
jpqlFunctionGroup.add("derby", new DerbyHourFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLHourFunction());
jpqlFunctionGroup.add("sybase", new SybaseHourFunction());
jpqlFunctionGroup.add("oracle", new OracleHourFunction());
jpqlFunctionGroup.add("sqlite", new SqliteHourFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("minute", false);
jpqlFunctionGroup.add(null, new MinuteFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMinuteFunction());
jpqlFunctionGroup.add("access", new AccessMinuteFunction());
jpqlFunctionGroup.add("db2", new DB2MinuteFunction());
jpqlFunctionGroup.add("derby", new DerbyMinuteFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMinuteFunction());
jpqlFunctionGroup.add("sybase", new SybaseMinuteFunction());
jpqlFunctionGroup.add("oracle", new OracleMinuteFunction());
jpqlFunctionGroup.add("sqlite", new SqliteMinuteFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("second", false);
jpqlFunctionGroup.add(null, new SecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLSecondFunction());
jpqlFunctionGroup.add("access", new AccessSecondFunction());
jpqlFunctionGroup.add("db2", new DB2SecondFunction());
jpqlFunctionGroup.add("derby", new DerbySecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLSecondFunction());
jpqlFunctionGroup.add("sybase", new SybaseSecondFunction());
jpqlFunctionGroup.add("oracle", new OracleSecondFunction());
jpqlFunctionGroup.add("sqlite", new SqliteSecondFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("millisecond", false);
jpqlFunctionGroup.add(null, new MillisecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMillisecondFunction());
jpqlFunctionGroup.add("db2", new DB2MillisecondFunction());
jpqlFunctionGroup.add("mysql", new MySQLMillisecondFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMillisecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMillisecondFunction());
jpqlFunctionGroup.add("sybase", new SybaseMillisecondFunction());
jpqlFunctionGroup.add("oracle", new OracleMillisecondFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("microsecond", false);
jpqlFunctionGroup.add(null, new MicrosecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMicrosecondFunction());
jpqlFunctionGroup.add("db2", new DB2MicrosecondFunction());
jpqlFunctionGroup.add("mysql", new MySQLMicrosecondFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMicrosecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMicrosecondFunction());
jpqlFunctionGroup.add("sybase", new SybaseMicrosecondFunction());
jpqlFunctionGroup.add("oracle", new OracleMicrosecondFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch", false);
jpqlFunctionGroup.add(null, new DefaultEpochFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLEpochFunction());
jpqlFunctionGroup.add("oracle", new OracleEpochFunction());
jpqlFunctionGroup.add("db2", new DB2EpochFunction());
jpqlFunctionGroup.add("mysql", new MySQLEpochFunction());
jpqlFunctionGroup.add("mysql8", new MySQLEpochFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch_seconds", false);
jpqlFunctionGroup.add(null, new DefaultEpochFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLEpochFunction());
jpqlFunctionGroup.add("oracle", new OracleEpochFunction());
jpqlFunctionGroup.add("db2", new DB2EpochFunction());
jpqlFunctionGroup.add("mysql", new MySQLEpochFunction());
jpqlFunctionGroup.add("mysql8", new MySQLEpochFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch_days", false);
jpqlFunctionGroup.add(null, new DefaultEpochDayFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLEpochDayFunction());
jpqlFunctionGroup.add("oracle", new OracleEpochDayFunction());
jpqlFunctionGroup.add("db2", new DB2EpochDayFunction());
jpqlFunctionGroup.add("mysql", new MySQLEpochDayFunction());
jpqlFunctionGroup.add("mysql8", new MySQLEpochDayFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch_milliseconds", false);
jpqlFunctionGroup.add(null, new DefaultEpochMillisecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLEpochMillisecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLEpochMillisecondFunction());
jpqlFunctionGroup.add("oracle", new OracleEpochMillisecondFunction());
jpqlFunctionGroup.add("db2", new DB2EpochMillisecondFunction());
jpqlFunctionGroup.add("mysql", new MySQLEpochMillisecondFunction());
jpqlFunctionGroup.add("mysql8", new MySQLEpochMillisecondFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch_microseconds", false);
jpqlFunctionGroup.add(null, new DefaultEpochMicrosecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLEpochMicrosecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLEpochMicrosecondFunction());
jpqlFunctionGroup.add("oracle", new OracleEpochMicrosecondFunction());
jpqlFunctionGroup.add("db2", new DB2EpochMicrosecondFunction());
jpqlFunctionGroup.add("mysql", new MySQLEpochMicrosecondFunction());
jpqlFunctionGroup.add("mysql8", new MySQLEpochMicrosecondFunction());
registerFunction(jpqlFunctionGroup);
// dateadd
jpqlFunctionGroup = new JpqlFunctionGroup(DayAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new DayAddFunction());
jpqlFunctionGroup.add("db2", new DB2DayAddFunction());
jpqlFunctionGroup.add("h2", new H2DayAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLDayAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLDayAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLDayAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLDayAddFunction());
jpqlFunctionGroup.add("oracle", new OracleDayAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(HourAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new HourAddFunction());
jpqlFunctionGroup.add("db2", new DB2HourAddFunction());
jpqlFunctionGroup.add("h2", new H2HourAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLHourAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLHourAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLHourAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLHourAddFunction());
jpqlFunctionGroup.add("oracle", new OracleHourAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(MicrosecondsAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new MicrosecondsAddFunction());
jpqlFunctionGroup.add("db2", new DB2MicrosecondsAddFunction());
jpqlFunctionGroup.add("h2", new H2MicrosecondsAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMicrosecondsAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLMicrosecondsAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMicrosecondsAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMicrosecondsAddFunction());
jpqlFunctionGroup.add("oracle", new OracleMicrosecondsAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(MillisecondsAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new MillisecondsAddFunction());
jpqlFunctionGroup.add("db2", new DB2MillisecondsAddFunction());
jpqlFunctionGroup.add("h2", new H2MillisecondsAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMillisecondsAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLMillisecondsAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMillisecondsAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMillisecondsAddFunction());
jpqlFunctionGroup.add("oracle", new OracleMillisecondsAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(MinuteAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new MinuteAddFunction());
jpqlFunctionGroup.add("db2", new DB2MinuteAddFunction());
jpqlFunctionGroup.add("h2", new H2MinuteAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMinuteAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLMinuteAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMinuteAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMinuteAddFunction());
jpqlFunctionGroup.add("oracle", new OracleMinuteAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(MonthAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new MonthAddFunction());
jpqlFunctionGroup.add("db2", new DB2MonthAddFunction());
jpqlFunctionGroup.add("h2", new H2MonthAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMonthAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLMonthAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMonthAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMonthAddFunction());
jpqlFunctionGroup.add("oracle", new OracleMonthAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(QuarterAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new QuarterAddFunction());
jpqlFunctionGroup.add("db2", new DB2QuarterAddFunction());
jpqlFunctionGroup.add("h2", new H2QuarterAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLQuarterAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLQuarterAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLQuarterAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLQuarterAddFunction());
jpqlFunctionGroup.add("oracle", new OracleQuarterAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(SecondAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new SecondAddFunction());
jpqlFunctionGroup.add("db2", new DB2SecondAddFunction());
jpqlFunctionGroup.add("h2", new H2SecondAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLSecondAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLSecondAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLSecondAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLSecondAddFunction());
jpqlFunctionGroup.add("oracle", new OracleSecondAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(WeekAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new WeekAddFunction());
jpqlFunctionGroup.add("db2", new DB2WeekAddFunction());
jpqlFunctionGroup.add("h2", new H2WeekAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLWeekAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLWeekAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLWeekAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLWeekAddFunction());
jpqlFunctionGroup.add("oracle", new OracleWeekAddFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(YearAddFunction.NAME, false);
jpqlFunctionGroup.add(null, new YearAddFunction());
jpqlFunctionGroup.add("db2", new DB2YearAddFunction());
jpqlFunctionGroup.add("h2", new H2YearAddFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLYearAddFunction());
jpqlFunctionGroup.add("mysql", new MySQLYearAddFunction());
jpqlFunctionGroup.add("mysql8", new MySQLYearAddFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLYearAddFunction());
jpqlFunctionGroup.add("oracle", new OracleYearAddFunction());
registerFunction(jpqlFunctionGroup);
// datediff
jpqlFunctionGroup = new JpqlFunctionGroup("year_diff", false);
jpqlFunctionGroup.add("access", new AccessYearDiffFunction());
jpqlFunctionGroup.add("db2", new DB2YearDiffFunction());
jpqlFunctionGroup.add("h2", new DefaultYearDiffFunction());
jpqlFunctionGroup.add("microsoft", new DefaultYearDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLYearDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLYearDiffFunction());
jpqlFunctionGroup.add("sybase", new DefaultYearDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLYearDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleYearDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("month_diff", false);
jpqlFunctionGroup.add("access", new AccessMonthDiffFunction());
jpqlFunctionGroup.add("db2", new DB2MonthDiffFunction());
jpqlFunctionGroup.add("h2", new DefaultMonthDiffFunction());
jpqlFunctionGroup.add("microsoft", new DefaultMonthDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLMonthDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMonthDiffFunction());
jpqlFunctionGroup.add("sybase", new DefaultMonthDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMonthDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleMonthDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("day_diff", false);
jpqlFunctionGroup.add("access", new AccessDayDiffFunction());
jpqlFunctionGroup.add("db2", new DB2DayDiffFunction());
jpqlFunctionGroup.add("h2", new DefaultDayDiffFunction());
jpqlFunctionGroup.add("microsoft", new DefaultDayDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLDayDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLDayDiffFunction());
jpqlFunctionGroup.add("sybase", new DefaultDayDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLDayDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleDayDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("hour_diff", false);
jpqlFunctionGroup.add("access", new AccessHourDiffFunction());
jpqlFunctionGroup.add("db2", new DB2HourDiffFunction());
jpqlFunctionGroup.add("h2", new DefaultHourDiffFunction());
jpqlFunctionGroup.add("microsoft", new DefaultHourDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLHourDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLHourDiffFunction());
jpqlFunctionGroup.add("sybase", new DefaultHourDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLHourDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleHourDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("minute_diff", false);
jpqlFunctionGroup.add("access", new AccessMinuteDiffFunction());
jpqlFunctionGroup.add("db2", new DB2MinuteDiffFunction());
jpqlFunctionGroup.add("h2", new DefaultMinuteDiffFunction());
jpqlFunctionGroup.add("microsoft", new DefaultMinuteDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLMinuteDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMinuteDiffFunction());
jpqlFunctionGroup.add("sybase", new DefaultMinuteDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMinuteDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleMinuteDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("second_diff", false);
jpqlFunctionGroup.add(null, new DefaultSecondDiffFunction());
jpqlFunctionGroup.add("access", new AccessSecondDiffFunction());
jpqlFunctionGroup.add("db2", new DB2SecondDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLSecondDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLSecondDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLSecondDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLSecondDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleSecondDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("epoch_diff", false);
jpqlFunctionGroup.add(null, new DefaultSecondDiffFunction());
jpqlFunctionGroup.add("access", new AccessSecondDiffFunction());
jpqlFunctionGroup.add("db2", new DB2SecondDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLSecondDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLSecondDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLSecondDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLSecondDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleSecondDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("millisecond_diff", false);
jpqlFunctionGroup.add(null, new DefaultMillisecondDiffFunction());
jpqlFunctionGroup.add("access", new AccessMillisecondDiffFunction());
jpqlFunctionGroup.add("db2", new DB2MillisecondDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMillisecondDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLMillisecondDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMillisecondDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMillisecondDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleMillisecondDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("microsecond_diff", false);
jpqlFunctionGroup.add(null, new DefaultMicrosecondDiffFunction());
jpqlFunctionGroup.add("access", new AccessMicrosecondDiffFunction());
jpqlFunctionGroup.add("db2", new DB2MicrosecondDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLMicrosecondDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLMicrosecondDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLMicrosecondDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLMicrosecondDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleMicrosecondDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("week_diff", false);
jpqlFunctionGroup.add(null, new DefaultWeekDiffFunction());
jpqlFunctionGroup.add("h2", new H2WeekDiffFunction());
jpqlFunctionGroup.add("db2", new DB2WeekDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLWeekDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLWeekDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLWeekDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleWeekDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLWeekDiffFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("quarter_diff", false);
jpqlFunctionGroup.add(null, new DefaultQuarterDiffFunction());
jpqlFunctionGroup.add("access", new AccessQuarterDiffFunction());
jpqlFunctionGroup.add("h2", new H2QuarterDiffFunction());
jpqlFunctionGroup.add("db2", new DB2QuarterDiffFunction());
jpqlFunctionGroup.add("mysql", new MySQLQuarterDiffFunction());
jpqlFunctionGroup.add("mysql8", new MySQLQuarterDiffFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLQuarterDiffFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLQuarterDiffFunction());
jpqlFunctionGroup.add("oracle", new OracleQuarterDiffFunction());
registerFunction(jpqlFunctionGroup);
// date trunc
jpqlFunctionGroup = new JpqlFunctionGroup(TruncDayFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncDayFunction());
jpqlFunctionGroup.add("db2", new DB2TruncDayFunction());
jpqlFunctionGroup.add("h2", new H2TruncDayFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncDayFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncDayFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncDayFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncDayFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncDayFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncHourFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncHourFunction());
jpqlFunctionGroup.add("db2", new DB2TruncHourFunction());
jpqlFunctionGroup.add("h2", new H2TruncHourFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncHourFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncHourFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncHourFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncHourFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncHourFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncMicrosecondsFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncMicrosecondsFunction());
jpqlFunctionGroup.add("db2", new DB2TruncMicrosecondsFunction());
jpqlFunctionGroup.add("h2", new H2TruncMicrosecondsFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncMicrosecondsFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncMicrosecondsFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncMicrosecondsFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncMicrosecondsFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncMicrosecondsFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncMillisecondsFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncMillisecondsFunction());
jpqlFunctionGroup.add("db2", new DB2TruncMillisecondsFunction());
jpqlFunctionGroup.add("h2", new H2TruncMillisecondsFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncMillisecondsFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncMillisecondsFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncMillisecondsFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncMillisecondsFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncMillisecondsFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncMinuteFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncMinuteFunction());
jpqlFunctionGroup.add("db2", new DB2TruncMinuteFunction());
jpqlFunctionGroup.add("h2", new H2TruncMinuteFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncMinuteFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncMinuteFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncMinuteFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncMinuteFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncMinuteFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncMonthFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncMonthFunction());
jpqlFunctionGroup.add("db2", new DB2TruncMonthFunction());
jpqlFunctionGroup.add("h2", new H2TruncMonthFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncMonthFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncMonthFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncMonthFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncMonthFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncMonthFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncQuarterFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncQuarterFunction());
jpqlFunctionGroup.add("db2", new DB2TruncQuarterFunction());
jpqlFunctionGroup.add("h2", new H2TruncQuarterFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncQuarterFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncQuarterFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncQuarterFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncQuarterFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncQuarterFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncSecondFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncSecondFunction());
jpqlFunctionGroup.add("db2", new DB2TruncSecondFunction());
jpqlFunctionGroup.add("h2", new H2TruncSecondFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncSecondFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncSecondFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncSecondFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncSecondFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncSecondFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncWeekFunction.NAME, false);
jpqlFunctionGroup.add(null, new TruncWeekFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncWeekFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncWeekFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncWeekFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncWeekFunction());
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup(TruncYearFunction.NAME, false);
jpqlFunctionGroup.add(null, new PostgreSQLTruncYearFunction());
jpqlFunctionGroup.add("db2", new DB2TruncYearFunction());
jpqlFunctionGroup.add("h2", new H2TruncYearFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLTruncYearFunction());
jpqlFunctionGroup.add("mysql", new MySQLTruncYearFunction());
jpqlFunctionGroup.add("mysql8", new MySQLTruncYearFunction());
jpqlFunctionGroup.add("oracle", new OracleTruncYearFunction());
jpqlFunctionGroup.add("postgresql", new PostgreSQLTruncYearFunction());
registerFunction(jpqlFunctionGroup);
// count
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractCountFunction.FUNCTION_NAME, true);
jpqlFunctionGroup.add(null, new CountTupleFunction());
jpqlFunctionGroup.add("mysql", new MySQLCountTupleFunction());
jpqlFunctionGroup.add("mysql8", new MySQLCountTupleFunction());
jpqlFunctionGroup.add("db2", new CountTupleEmulationFunction());
jpqlFunctionGroup.add("microsoft", new CountTupleEmulationFunction("+", "varchar(max)"));
jpqlFunctionGroup.add("oracle", new CountTupleEmulationFunction());
jpqlFunctionGroup.add("hsql", new CountTupleEmulationFunction());
registerFunction(jpqlFunctionGroup);
// row values
jpqlFunctionGroup = new JpqlFunctionGroup(RowValueComparisonFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new RowValueComparisonFunction());
registerFunction(jpqlFunctionGroup);
// row values subquery
jpqlFunctionGroup = new JpqlFunctionGroup(RowValueSubqueryComparisonFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new RowValueSubqueryComparisonFunction());
registerFunction(jpqlFunctionGroup);
// alias function
jpqlFunctionGroup = new JpqlFunctionGroup(AliasFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new AliasFunction());
registerFunction(jpqlFunctionGroup);
// column trunc function
jpqlFunctionGroup = new JpqlFunctionGroup(ColumnTruncFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new ColumnTruncFunction());
registerFunction(jpqlFunctionGroup);
// count wrapper function
jpqlFunctionGroup = new JpqlFunctionGroup(CountWrapperFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new CountWrapperFunction());
registerFunction(jpqlFunctionGroup);
// query wrapper function
jpqlFunctionGroup = new JpqlFunctionGroup(QueryWrapperFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new QueryWrapperFunction());
registerFunction(jpqlFunctionGroup);
// null subquery function
jpqlFunctionGroup = new JpqlFunctionGroup(NullSubqueryFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new NullSubqueryFunction());
registerFunction(jpqlFunctionGroup);
// greatest
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractGreatestFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new DefaultGreatestFunction());
jpqlFunctionGroup.add("db2", new MaxGreatestFunction());
jpqlFunctionGroup.add("microsoft", new SelectMaxUnionGreatestFunction());
registerFunction(jpqlFunctionGroup);
// least
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractLeastFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new DefaultLeastFunction());
jpqlFunctionGroup.add("db2", new MinLeastFunction());
jpqlFunctionGroup.add("microsoft", new SelectMinUnionLeastFunction());
registerFunction(jpqlFunctionGroup);
// repeat
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractRepeatFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new DefaultRepeatFunction());
jpqlFunctionGroup.add("oracle", new LpadRepeatFunction());
jpqlFunctionGroup.add("microsoft", new ReplicateRepeatFunction());
registerFunction(jpqlFunctionGroup);
// subquery
jpqlFunctionGroup = new JpqlFunctionGroup(SubqueryFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add(null, new SubqueryFunction());
registerFunction(jpqlFunctionGroup);
// every
jpqlFunctionGroup = new JpqlFunctionGroup(EveryFunction.FUNCTION_NAME, true);
jpqlFunctionGroup.add(null, EveryFunction.INSTANCE);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
EveryFunction.INSTANCE :
FallbackEveryFunction.INSTANCE);
}
registerFunction(jpqlFunctionGroup);
// andagg
jpqlFunctionGroup = new JpqlFunctionGroup("AND_AGG", true);
jpqlFunctionGroup.add(null, EveryFunction.INSTANCE);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
EveryFunction.INSTANCE :
FallbackEveryFunction.INSTANCE);
}
registerFunction(jpqlFunctionGroup);
// oragg
jpqlFunctionGroup = new JpqlFunctionGroup(OrAggFunction.FUNCTION_NAME, true);
jpqlFunctionGroup.add(null, OrAggFunction.INSTANCE);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
OrAggFunction.INSTANCE :
FallbackOrAggFunction.INSTANCE);
}
registerFunction(jpqlFunctionGroup);
jpqlFunctionGroup = new JpqlFunctionGroup("OR_AGG", true);
jpqlFunctionGroup.add(null, OrAggFunction.INSTANCE);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
OrAggFunction.INSTANCE :
FallbackOrAggFunction.INSTANCE);
}
registerFunction(jpqlFunctionGroup);
// string_json_agg
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractStringJsonAggFunction.FUNCTION_NAME, true);
{
JpqlFunctionGroup chrFunctionGroup = functions.get(ChrFunction.FUNCTION_NAME);
JpqlFunctionGroup replaceFunctionGroup = functions.get(ReplaceFunction.FUNCTION_NAME);
JpqlFunctionGroup concatFunctionGroup = functions.get(ConcatFunction.FUNCTION_NAME);
JpqlFunctionGroup groupConcatFunctionGroup = functions.get(AbstractGroupConcatFunction.FUNCTION_NAME);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
ChrFunction chrFunction = (ChrFunction) chrFunctionGroup.get(dialectEntry.getKey());
if (chrFunction == null) {
chrFunction = (ChrFunction) chrFunctionGroup.get(null);
}
ReplaceFunction replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(dialectEntry.getKey());
if (replaceFunction == null) {
replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(null);
}
ConcatFunction concatFunction = (ConcatFunction) concatFunctionGroup.get(dialectEntry.getKey());
if (concatFunction == null) {
concatFunction = (ConcatFunction) concatFunctionGroup.get(null);
}
jpqlFunctionGroup.add(dialectEntry.getKey(), new GroupConcatBasedStringJsonAggFunction((AbstractGroupConcatFunction) groupConcatFunctionGroup.get(dialectEntry.getKey()), chrFunction, replaceFunction, concatFunction));
}
}
jpqlFunctionGroup.add("postgresql", new PostgreSQLStringJsonAggFunction());
jpqlFunctionGroup.add("oracle", new OracleStringJsonAggFunction((AbstractGroupConcatFunction) findFunction(AbstractGroupConcatFunction.FUNCTION_NAME, "oracle"), (ChrFunction) findFunction(ChrFunction.FUNCTION_NAME, "oracle"), (ReplaceFunction) findFunction(ReplaceFunction.FUNCTION_NAME, "oracle"), (ConcatFunction) findFunction(ConcatFunction.FUNCTION_NAME, "oracle")));
jpqlFunctionGroup.add("mysql", new MySQLStringJsonAggFunction());
jpqlFunctionGroup.add("mysql8", new MySQLStringJsonAggFunction());
registerFunction(jpqlFunctionGroup);
// string_xml_agg
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractStringXmlAggFunction.FUNCTION_NAME, true);
{
JpqlFunctionGroup replaceFunctionGroup = functions.get(ReplaceFunction.FUNCTION_NAME);
JpqlFunctionGroup concatFunctionGroup = functions.get(ConcatFunction.FUNCTION_NAME);
JpqlFunctionGroup groupConcatFunctionGroup = functions.get(AbstractGroupConcatFunction.FUNCTION_NAME);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
ReplaceFunction replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(dialectEntry.getKey());
if (replaceFunction == null) {
replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(null);
}
ConcatFunction concatFunction = (ConcatFunction) concatFunctionGroup.get(dialectEntry.getKey());
if (concatFunction == null) {
concatFunction = (ConcatFunction) concatFunctionGroup.get(null);
}
jpqlFunctionGroup.add(dialectEntry.getKey(), new GroupConcatBasedStringXmlAggFunction((AbstractGroupConcatFunction) groupConcatFunctionGroup.get(dialectEntry.getKey()), replaceFunction, concatFunction));
}
}
jpqlFunctionGroup.add("postgresql", new PostgreSQLStringXmlAggFunction());
jpqlFunctionGroup.add("oracle", new OracleGroupConcatBasedStringXmlAggFunction((AbstractGroupConcatFunction) findFunction(AbstractGroupConcatFunction.FUNCTION_NAME, "oracle"), (ReplaceFunction) findFunction(ReplaceFunction.FUNCTION_NAME, "oracle"), (ConcatFunction) findFunction(ConcatFunction.FUNCTION_NAME, "oracle")));
registerFunction(jpqlFunctionGroup);
// to_string_json
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractToStringJsonFunction.FUNCTION_NAME, false);
{
JpqlFunctionGroup chrFunctionGroup = functions.get(ChrFunction.FUNCTION_NAME);
JpqlFunctionGroup replaceFunctionGroup = functions.get(ReplaceFunction.FUNCTION_NAME);
JpqlFunctionGroup concatFunctionGroup = functions.get(ConcatFunction.FUNCTION_NAME);
JpqlFunctionGroup groupConcatFunctionGroup = functions.get(AbstractGroupConcatFunction.FUNCTION_NAME);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
ChrFunction chrFunction = (ChrFunction) chrFunctionGroup.get(dialectEntry.getKey());
if (chrFunction == null) {
chrFunction = (ChrFunction) chrFunctionGroup.get(null);
}
ReplaceFunction replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(dialectEntry.getKey());
if (replaceFunction == null) {
replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(null);
}
ConcatFunction concatFunction = (ConcatFunction) concatFunctionGroup.get(dialectEntry.getKey());
if (concatFunction == null) {
concatFunction = (ConcatFunction) concatFunctionGroup.get(null);
}
jpqlFunctionGroup.add(dialectEntry.getKey(), new GroupConcatBasedToStringJsonFunction((AbstractGroupConcatFunction) groupConcatFunctionGroup.get(dialectEntry.getKey()), chrFunction, replaceFunction, concatFunction, dialectEntry.getValue().getLateralStyle()));
}
}
jpqlFunctionGroup.add("postgresql", new PostgreSQLToStringJsonFunction());
jpqlFunctionGroup.add("microsoft", new ForJsonPathToStringJsonFunction((CastFunction) findFunction("cast_string", "microsoft")));
jpqlFunctionGroup.add("oracle", new OracleToStringJsonFunction(
(AbstractGroupConcatFunction) findFunction(AbstractGroupConcatFunction.FUNCTION_NAME, "oracle"),
(ChrFunction) findFunction(ChrFunction.FUNCTION_NAME, "oracle"),
(ReplaceFunction) findFunction(ReplaceFunction.FUNCTION_NAME, "oracle"),
(ConcatFunction) findFunction(ConcatFunction.FUNCTION_NAME, "oracle")
));
jpqlFunctionGroup.add("mysql", new MySQLToStringJsonFunction());
jpqlFunctionGroup.add("mysql8", new MySQLToStringJsonFunction());
registerFunction(jpqlFunctionGroup);
// to_string_xml
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractToStringXmlFunction.FUNCTION_NAME, false);
{
JpqlFunctionGroup replaceFunctionGroup = functions.get(ReplaceFunction.FUNCTION_NAME);
JpqlFunctionGroup concatFunctionGroup = functions.get(ConcatFunction.FUNCTION_NAME);
JpqlFunctionGroup groupConcatFunctionGroup = functions.get(AbstractGroupConcatFunction.FUNCTION_NAME);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
ReplaceFunction replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(dialectEntry.getKey());
if (replaceFunction == null) {
replaceFunction = (ReplaceFunction) replaceFunctionGroup.get(null);
}
ConcatFunction concatFunction = (ConcatFunction) concatFunctionGroup.get(dialectEntry.getKey());
if (concatFunction == null) {
concatFunction = (ConcatFunction) concatFunctionGroup.get(null);
}
jpqlFunctionGroup.add(dialectEntry.getKey(), new GroupConcatBasedToStringXmlFunction((AbstractGroupConcatFunction) groupConcatFunctionGroup.get(dialectEntry.getKey()), replaceFunction, concatFunction, dialectEntry.getValue().getLateralStyle()));
}
}
jpqlFunctionGroup.add("postgresql", new PostgreSQLToStringXmlFunction());
jpqlFunctionGroup.add("microsoft", new ForXmlPathToStringXmlFunction((CastFunction) findFunction("cast_string", "microsoft")));
jpqlFunctionGroup.add("oracle", new OracleGroupConcatBasedToStringXmlFunction(
(AbstractGroupConcatFunction) findFunction(AbstractGroupConcatFunction.FUNCTION_NAME, "oracle"),
(ReplaceFunction) findFunction(ReplaceFunction.FUNCTION_NAME, "oracle"),
(ConcatFunction) findFunction(ConcatFunction.FUNCTION_NAME, "oracle"),
LateralStyle.LATERAL
));
registerFunction(jpqlFunctionGroup);
// to_multiset
jpqlFunctionGroup = new JpqlFunctionGroup(ToMultisetFunction.FUNCTION_NAME, false);
{
JpqlFunctionGroup jsonFunctionGroup = functions.get(AbstractToStringJsonFunction.FUNCTION_NAME);
JpqlFunctionGroup xmlFunctionGroup = functions.get(AbstractToStringXmlFunction.FUNCTION_NAME);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
AbstractToStringJsonFunction jsonFunction = (AbstractToStringJsonFunction) jsonFunctionGroup.get(dialectEntry.getKey());
AbstractToStringXmlFunction xmlFunction = (AbstractToStringXmlFunction) xmlFunctionGroup.get(dialectEntry.getKey());
jpqlFunctionGroup.add(dialectEntry.getKey(), new ToMultisetFunction(jsonFunction, xmlFunction));
}
}
registerFunction(jpqlFunctionGroup);
// window every
jpqlFunctionGroup = new JpqlFunctionGroup(WindowEveryFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
new WindowEveryFunction(dialectEntry.getValue()) :
new FallbackWindowEveryFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window andagg
jpqlFunctionGroup = new JpqlFunctionGroup("AND_AGG", JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
new WindowEveryFunction(dialectEntry.getValue()) :
new FallbackWindowEveryFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window oragg
jpqlFunctionGroup = new JpqlFunctionGroup(WindowOrAggFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(),
dialectEntry.getValue().supportsBooleanAggregation() ?
new WindowOrAggFunction(dialectEntry.getValue()) :
new FallbackWindowOrAggFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window sum
jpqlFunctionGroup = new JpqlFunctionGroup(SumFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new SumFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window avg
jpqlFunctionGroup = new JpqlFunctionGroup(AvgFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new AvgFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window min
jpqlFunctionGroup = new JpqlFunctionGroup(MinFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new MinFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window max
jpqlFunctionGroup = new JpqlFunctionGroup(MaxFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new MaxFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// window count
jpqlFunctionGroup = new JpqlFunctionGroup(CountFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new CountFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// row number
jpqlFunctionGroup = new JpqlFunctionGroup(RowNumberFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new RowNumberFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// rank
jpqlFunctionGroup = new JpqlFunctionGroup(RankFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new RankFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// dense_rank
jpqlFunctionGroup = new JpqlFunctionGroup(DenseRankFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new DenseRankFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// PERCENT_RANK
jpqlFunctionGroup = new JpqlFunctionGroup(PercentRankFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new PercentRankFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// CUME_DIST
jpqlFunctionGroup = new JpqlFunctionGroup(CumeDistFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new CumeDistFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// NTILE
jpqlFunctionGroup = new JpqlFunctionGroup(NtileFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new NtileFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// LAG
jpqlFunctionGroup = new JpqlFunctionGroup(LagFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new LagFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// LEAD
jpqlFunctionGroup = new JpqlFunctionGroup(LeadFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new LeadFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// FIRST_VALUE
jpqlFunctionGroup = new JpqlFunctionGroup(FirstValueFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new FirstValueFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// LAST_VALUE
jpqlFunctionGroup = new JpqlFunctionGroup(LastValueFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new LastValueFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// NTH_VALUE
jpqlFunctionGroup = new JpqlFunctionGroup(NthValueFunction.FUNCTION_NAME, JpqlFunctionKind.WINDOW);
for (Map.Entry<String, DbmsDialect> dialectEntry : this.dbmsDialects.entrySet()) {
jpqlFunctionGroup.add(dialectEntry.getKey(), new NthValueFunction(dialectEntry.getValue()));
}
registerFunction(jpqlFunctionGroup);
// JSON_GET
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractJsonGetFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add("postgresql", new PostgreSQLJsonGetFunction());
jpqlFunctionGroup.add("mysql8", new MySQL8JsonGetFunction());
jpqlFunctionGroup.add("oracle", new OracleJsonGetFunction());
jpqlFunctionGroup.add("db2", new DB2JsonGetFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLJsonGetFunction());
registerFunction(jpqlFunctionGroup);
// JSON_SET
jpqlFunctionGroup = new JpqlFunctionGroup(AbstractJsonSetFunction.FUNCTION_NAME, false);
jpqlFunctionGroup.add("postgresql", new PostgreSQLJsonSetFunction());
jpqlFunctionGroup.add("mysql8", new MySQL8JsonSetFunction());
jpqlFunctionGroup.add("oracle", new OracleJsonSetFunction());
jpqlFunctionGroup.add("db2", new DB2JsonSetFunction());
jpqlFunctionGroup.add("microsoft", new MSSQLJsonSetFunction());
registerFunction(jpqlFunctionGroup);
// grouping
registerFunction(GroupingFunction.FUNCTION_NAME, new GroupingFunction());
registerFunction(GroupingSetFunction.FUNCTION_NAME, new GroupingSetFunction());
registerFunction(GroupingSetsFunction.FUNCTION_NAME, new GroupingSetsFunction());
registerFunction(CubeFunction.FUNCTION_NAME, new CubeFunction());
registerFunction(RollupFunction.FUNCTION_NAME, new RollupFunction());
}
private void registerFunction(String name, JpqlFunction function) {
String functionName = name.toLowerCase();
JpqlFunctionGroup jpqlFunctionGroup = new JpqlFunctionGroup(name, false);
functions.put(functionName, jpqlFunctionGroup);
jpqlFunctionGroup.add(null, function);
}
private <T extends JpqlFunction> T findFunction(String name, String dbms) {
JpqlFunctionGroup jpqlFunctionGroup = functions.get(name);
JpqlFunction jpqlFunction = jpqlFunctionGroup.get(dbms);
if (jpqlFunction == null) {
jpqlFunction = jpqlFunctionGroup.get(null);
}
return (T) jpqlFunction;
}
private void loadDbmsDialects() {
registerDialect(null, new DefaultDbmsDialect());
registerDialect("mysql", new MySQLDbmsDialect());
registerDialect("mysql8", new MySQL8DbmsDialect());
registerDialect("h2", new H2DbmsDialect());
registerDialect("db2", new DB2DbmsDialect());
registerDialect("postgresql", new PostgreSQLDbmsDialect());
registerDialect("oracle", new OracleDbmsDialect());
registerDialect("microsoft", new MSSQLDbmsDialect());
registerDialect("cockroach", new CockroachSQLDbmsDialect());
}
private void loadDefaultProperties() {
properties.put(ConfigurationProperties.COMPATIBLE_MODE, "false");
properties.put(ConfigurationProperties.RETURNING_CLAUSE_CASE_SENSITIVE, "true");
properties.put(ConfigurationProperties.EXPRESSION_CACHE_CLASS, ConcurrentHashMapExpressionCache.class.getName());
properties.put(ConfigurationProperties.OPTIMIZED_KEYSET_PREDICATE_RENDERING, "true");
properties.put(ConfigurationProperties.INLINE_ID_QUERY, "auto");
properties.put(ConfigurationProperties.INLINE_COUNT_QUERY, "auto");
properties.put(ConfigurationProperties.INLINE_CTES, "true");
}
private void loadExtendedQuerySupport() {
ServiceLoader<ExtendedQuerySupport> serviceLoader = ServiceLoader.load(ExtendedQuerySupport.class);
Iterator<ExtendedQuerySupport> iterator = serviceLoader.iterator();
if (iterator.hasNext()) {
extendedQuerySupport = iterator.next();
}
}
private void loadEntityManagerIntegrator() {
ServiceLoader<EntityManagerFactoryIntegrator> serviceLoader = ServiceLoader.load(EntityManagerFactoryIntegrator.class);
Iterator<EntityManagerFactoryIntegrator> iterator = serviceLoader.iterator();
if (iterator.hasNext()) {
EntityManagerFactoryIntegrator enricher = iterator.next();
entityManagerIntegrators.add(enricher);
}
}
private void loadExtensions() {
List<CriteriaBuilderConfigurationContributor> contributors = new ArrayList<>();
for (CriteriaBuilderConfigurationContributor contributor : ServiceLoader.load(CriteriaBuilderConfigurationContributor.class)) {
contributors.add(contributor);
}
Collections.sort(contributors, new CriteriaBuilderConfigurationContributorComparator());
for (CriteriaBuilderConfigurationContributor contributor : contributors) {
contributor.contribute(this);
}
}
@Override
public CriteriaBuilderConfiguration withPackageOpener(PackageOpener packageOpener) {
this.packageOpener = packageOpener;
return this;
}
PackageOpener getPackageOpener() {
return packageOpener;
}
@Override
public CriteriaBuilderConfiguration registerFunction(JpqlFunctionGroup jpqlFunctionGroup) {
String functionName = jpqlFunctionGroup.getName().toLowerCase();
functions.put(functionName, jpqlFunctionGroup);
return this;
}
@Override
public CriteriaBuilderConfiguration registerMacro(String macroName, JpqlMacro jpqlMacro) {
macros.put(macroName.toUpperCase(), jpqlMacro);
return this;
}
public Map<String, JpqlFunctionGroup> getFunctions() {
return functions;
}
@Override
public JpqlFunctionGroup getFunction(String name) {
return functions.get(name.toLowerCase());
}
@Override
public Set<String> getFunctionNames() {
return functions.keySet();
}
public Map<String, JpqlMacro> getMacros() {
return macros;
}
@Override
public Set<String> getMacroNames() {
return macros.keySet();
}
@Override
public CriteriaBuilderConfiguration registerNamedType(String name, Class<?> type) {
treatTypes.put(name, type);
registerFunction(new JpqlFunctionGroup("treat_" + name.toLowerCase(), new TreatFunction(type)));
return this;
}
@Override
public Map<String, Class<?>> getNamedTypes() {
return treatTypes;
}
@Override
public CriteriaBuilderConfiguration registerDialect(String dbms, DbmsDialect dialect) {
dbmsDialects.put(dbms, dialect);
return this;
}
public Map<String, DbmsDialect> getDbmsDialects() {
return dbmsDialects;
}
public ExtendedQuerySupport getExtendedQuerySupport() {
return extendedQuerySupport;
}
@Override
public CriteriaBuilderConfiguration registerEntityManagerIntegrator(EntityManagerFactoryIntegrator entityManagerEnricher) {
entityManagerIntegrators.add(entityManagerEnricher);
return this;
}
@Override
public List<EntityManagerFactoryIntegrator> getEntityManagerIntegrators() {
return entityManagerIntegrators;
}
@Override
public CriteriaBuilderFactory createCriteriaBuilderFactory(EntityManagerFactory emf) {
return new CriteriaBuilderFactoryImpl(this, emf);
}
@Override
public Properties getProperties() {
return properties;
}
@Override
public String getProperty(String propertyName) {
return properties.getProperty(propertyName);
}
@Override
public CriteriaBuilderConfiguration setProperties(Properties properties) {
this.properties = properties;
return this;
}
@Override
public CriteriaBuilderConfiguration addProperties(Properties extraProperties) {
this.properties.putAll(extraProperties);
return this;
}
@Override
public CriteriaBuilderConfiguration mergeProperties(Properties properties) {
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (this.properties.containsKey(entry.getKey())) {
continue;
}
this.properties.setProperty((String) entry.getKey(), (String) entry.getValue());
}
return this;
}
@Override
public CriteriaBuilderConfiguration setProperty(String propertyName, String value) {
properties.setProperty(propertyName, value);
return this;
}
}
| 45,106 |
852 | <filename>Alignment/LaserAlignment/python/LaserAlignmentConstants_cff.py
import FWCore.ParameterSet.Config as cms
LaserAlignmentConstants = cms.untracked.VPSet(
# all beamsplitter kinks (beam0, ..., beam7) in radians
# the global offsets are syst. offsets observed in the lab measurements wrt. data, ask Bruno..
cms.PSet(
PSetName = cms.string( "BeamsplitterKinks" ),
LASTecPlusRing4BsKinks = cms.vdouble( -0.00140, -0.00080, 0.00040, -0.00126, 0.00016, 0.00007, -0.00063, 0.00056 ),
LASTecPlusRing6BsKinks = cms.vdouble( -0.00253, -0.00027, -0.00207, -0.00120, -0.00198, 0.00082, 0.00069, 0.00001 ),
TecPlusGlobalOffset = cms.double( 0.0007 ), # global syst. offset added to all kinks in TEC+
LASTecMinusRing4BsKinks = cms.vdouble( 0.00101, 0.00035, -0.00212, 0.00015, 0.00121, -0.00278, 0.00031, -0.00140 ),
LASTecMinusRing6BsKinks = cms.vdouble( -0.00047, 0.00036, -0.00235, -0.00043, 0.00025, -0.00159, -0.00258, -0.00048 ),
TecMinusGlobalOffset = cms.double( 0.0 ),# global syst. offset added to all kinks in TEC-
LASAlignmentTubeBsKinks = cms.vdouble( 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000 ) # yet unknown
),
# the beam radii in mm
cms.PSet(
PSetName = cms.string( "Radii" ),
LASTecRadius = cms.vdouble( 564., 840. ),
LASAtRadius = cms.double( 564. )
),
# z positions in mm
cms.PSet(
PSetName = cms.string( "ZPositions" ),
LASTecZPositions = cms.vdouble( 1322.5, 1462.5, 1602.5, 1742.5, 1882.5, 2057.5, 2247.5, 2452.5, 2667.5 ),
LASTibZPositions = cms.vdouble( 620., 380., 180., -100., -340., -540 ),
LASTobZPositions = cms.vdouble( 1040., 580., 220., -140., -500., -860 ),
LASTecBeamSplitterZPosition = cms.double( 2057.5 ),
LASAtBeamsplitterZPosition = cms.double( 1123. )
)
)
| 838 |
430 | <reponame>NVIDIA/Torch-TensorRT
#include <string>
#include "core/compiler.h"
#include "gtest/gtest.h"
#include "tests/util/util.h"
#include "torch/csrc/jit/ir/irparser.h"
std::string gen_test_graph() {
return R"IR(
graph(%0: Tensor):
%3 : Tensor = aten::bitwise_not(%0)
return (%3))IR";
}
#define test_bitwise_not(dtype) \
TEST(Converters, ATenBitwiseNot##dtype##ConvertsCorrectly) { \
const auto graph = gen_test_graph(); \
\
auto g = std::make_shared<torch::jit::Graph>(); \
torch::jit::parseIR(graph, g.get()); \
\
at::Tensor in; \
if (strcmp(#dtype, "Integer") == 0) \
in = at::randint(-128, 128, {10}, {at::kCUDA}).toType(at::kInt); \
if (strcmp(#dtype, "Boolean") == 0) \
in = at::randint(0, 1, {10}, {at::kCUDA}).toType(at::kBool); \
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); \
\
in = at::clone(in); \
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); \
\
auto jit_int = jit_results[0].toType(at::kInt); \
auto trt_int = trt_results[0].toType(at::kInt); \
\
ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_int, trt_int)); \
}
test_bitwise_not(Integer);
test_bitwise_not(Boolean);
#undef test_bitwise_not
| 1,598 |
2,338 | #include <stdio.h>
#include <unistd.h>
#include <string.h>
void
call_me()
{
sleep(1);
}
int
main (int argc, char **argv)
{
printf ("Hello there!\n"); // Set break point at this line.
if (argc == 2 && strcmp(argv[1], "keep_waiting") == 0)
while (1)
{
call_me();
}
return 0;
}
| 147 |
552 | #!/usr/bin/env python
# Start netplugin and netmaster
import api.tnode
import argparse
import os
import re
import time
# Parse command line args
# Create the parser and sub parser
parser = argparse.ArgumentParser()
parser.add_argument("-nodes", required=True, help="list of nodes(comma separated)")
parser.add_argument("-user", default='vagrant', help="User id for ssh")
parser.add_argument("-password", default='<PASSWORD>', help="password for ssh")
parser.add_argument("-binpath", default='/opt/gopath/bin', help="netplugin/netmaster binary path")
parser.add_argument("-swarm", default='classic_mode', help="classic_mode or swarm_mode")
# Parse the args
args = parser.parse_args()
addrList = args.nodes.split(",")
# Use tnode class object to gather information for all nodes.
nodes = []
for addr in addrList:
node = api.tnode.Node(addr,args.user,args.password,args.binpath)
nodes.append(node)
gopath = "/opt/gopath"
scriptPath = gopath + "/src/github.com/contiv/netplugin/scripts"
if args.swarm == "swarm_mode":
# Nodes leave the swarm
for node in nodes:
node.runCmd("docker swarm leave --force")
# Create a swarm with Node0 as manager
nodes[0].runCmd("docker swarm init --advertise-addr " + nodes[0].addr + ":2377")
# Get the token for joining swarm
out, x, y = nodes[0].runCmd("docker swarm join-token worker -q")
token = out[0][:-1] #remove newline
# Make all workers join the swarm
for node in nodes[1:]:
command = "docker swarm join --token "+ token + " " + nodes[0].addr + ":2377"
node.runCmdThread(command)
time.sleep(15)
print "Check netplugin is installed and enabled"
out, _, _ = nodes[0].runCmd("docker plugin ls")
installed = re.search('contiv/v2plugin', out[1])
if installed == None:
print "Make target failed: Contiv plugin is not installed"
os._exit(1)
enabled = re.search('false', out[1])
if enabled != None:
print "Make target failed: Contiv plugin is installed but disabled"
os._exit(1)
print "################### Swarm Mode is up #####################"
else:
swarmScript= scriptPath + "/start-swarm.sh"
print "SwarmScript is : " + swarmScript
print "Stopping and removing swarm containers from all Nodes"
for node in nodes:
command = swarmScript + " stop " + node.addr + " > /tmp/swarmStop.log 2>&1"
node.runCmdThread(command)
print "Pulling and starting swarm containers from all Nodes"
for node in nodes:
command = swarmScript + " start " + node.addr + " > /tmp/startSwarm.log 2>&1"
node.runCmdThread(command)
time.sleep(15)
print "################### Classic Swarm cluster is up #####################"
os._exit(0)
| 968 |
308 | <gh_stars>100-1000
//
// ZXNavHistoryStackView.h
// ZXNavigationBar
//
// Created by 李兆祥 on 2020/12/22.
// Copyright © 2020 ZXLee. All rights reserved.
// https://github.com/SmileZXLee/ZXNavigationBar
// V1.4.1
#import <UIKit/UIKit.h>
NS_ASSUME_NONNULL_BEGIN
@interface ZXNavHistoryStackView : UICollectionView
@end
NS_ASSUME_NONNULL_END
| 151 |
2,206 | /*
*
* Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.speedment.common.injector.internal.execution;
import com.speedment.common.injector.MissingArgumentStrategy;
import com.speedment.common.injector.State;
import com.speedment.common.injector.dependency.Dependency;
import com.speedment.common.injector.dependency.DependencyGraph;
import com.speedment.common.injector.dependency.DependencyNode;
import com.speedment.common.injector.execution.Execution;
import com.speedment.common.injector.execution.ExecutionBuilder;
import com.speedment.common.injector.execution.ExecutionOneParamBuilder;
import com.speedment.common.injector.execution.ExecutionTwoParamBuilder;
import com.speedment.common.injector.internal.dependency.DependencyImpl;
import java.util.function.BiConsumer;
import static java.util.Collections.singleton;
import static java.util.Objects.requireNonNull;
/**
* First step of an {@link ExecutionBuilder}-chain.
*
* @param <T> the component to withExecute on
* @param <P0> the first parameter type
*
* @author <NAME>
* @since 1.2.0
*/
public final class ExecutionOneParamBuilderImpl<T, P0>
extends AbstractExecutionBuilder<T>
implements ExecutionOneParamBuilder<T, P0> {
private final Class<P0> param0;
private final State state0;
private BiConsumer<T, P0> executeAction;
ExecutionOneParamBuilderImpl(
Class<T> component, State state,
Class<P0> param0, State state0) {
super(component, state);
this.param0 = requireNonNull(param0);
this.state0 = requireNonNull(state0);
}
@Override
public <P1> ExecutionTwoParamBuilder<T, P0, P1> withState(
State state1, Class<P1> param1) {
return new ExecutionTwoParamBuilderImpl<>(
getComponent(), getState(),
param0, state0,
param1, state1
);
}
@Override
public ExecutionBuilder<T> withExecute(BiConsumer<T, P0> executeAction) {
this.executeAction = requireNonNull(executeAction);
return this;
}
@Override
public Execution<T> build(DependencyGraph graph) {
requireNonNull(executeAction, "No execution has been specified.");
final DependencyNode node0 = graph.get(param0);
final Dependency dep0 = new DependencyImpl(node0, state0);
return new AbstractExecution<T>(
getComponent(), getState(), singleton(dep0),
MissingArgumentStrategy.THROW_EXCEPTION) {
@Override
public boolean invoke(T component, ClassMapper classMapper) {
final P0 arg0 = classMapper.apply(param0);
executeAction.accept(component, arg0);
return true;
}
};
}
} | 1,294 |
782 | <filename>src/0503.next-greater-element-ii/next-greater-element-ii.py<gh_stars>100-1000
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
tmp = nums + nums
stack, res = [], [-1] * len(nums)
for i in range(len(tmp)):
while stack and stack[-1][0] < tmp[i]:
_, idx = stack.pop()
if idx < len(nums):
res[idx] = tmp[i]
stack.append((tmp[i], i))
return res | 259 |
631 | #include "Core/Utilities/Compiler/QProgToQuil.h"
#include <iostream>
#include "Core/Core.h"
#include "Core/Utilities/QProgTransform/TransformDecomposition.h"
#include "Core/Utilities/QProgInfo/MetadataValidity.h"
USING_QPANDA
using namespace QGATE_SPACE;
using namespace std;
QProgToQuil::QProgToQuil(QuantumMachine * quantum_machine)
{
m_gate_type_map.insert(pair<int, string>(PAULI_X_GATE, "X"));
m_gate_type_map.insert(pair<int, string>(PAULI_Y_GATE, "Y"));
m_gate_type_map.insert(pair<int, string>(PAULI_Z_GATE, "Z"));
m_gate_type_map.insert(pair<int, string>(HADAMARD_GATE, "H"));
m_gate_type_map.insert(pair<int, string>(T_GATE, "T"));
m_gate_type_map.insert(pair<int, string>(S_GATE, "S"));
m_gate_type_map.insert(pair<int, string>(RX_GATE, "RX"));
m_gate_type_map.insert(pair<int, string>(RY_GATE, "RY"));
m_gate_type_map.insert(pair<int, string>(RZ_GATE, "RZ"));
m_gate_type_map.insert(pair<int, string>(U1_GATE, "PHASE")); /* U1 --> PHASE */
m_gate_type_map.insert(pair<int, string>(CU_GATE, "CU"));
m_gate_type_map.insert(pair<int, string>(CNOT_GATE, "CNOT"));
m_gate_type_map.insert(pair<int, string>(CZ_GATE, "CZ"));
m_gate_type_map.insert(pair<int, string>(CPHASE_GATE, "CPHASE"));
m_gate_type_map.insert(pair<int, string>(ISWAP_GATE, "ISWAP"));
m_instructs.clear();
m_quantum_machine = quantum_machine;
}
QProgToQuil::~QProgToQuil()
{
}
void QProgToQuil::transform(QProg &prog)
{
if (nullptr == m_quantum_machine)
{
QCERR("Quantum machine is nullptr");
throw std::invalid_argument("Quantum machine is nullptr");
}
const int kMetadata_gate_type_count = 2;
vector<vector<string>> valid_gate_matrix(kMetadata_gate_type_count, vector<string>(0));
vector<vector<string>> gate_matrix(kMetadata_gate_type_count, vector<string>(0));
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[PAULI_X_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[PAULI_Y_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[PAULI_Z_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[HADAMARD_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[T_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[S_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[RX_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[RY_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back(m_gate_type_map[RZ_GATE]);
gate_matrix[METADATA_SINGLE_GATE].emplace_back("U1"); /* QPanda U1 Gate Name */
gate_matrix[METADATA_DOUBLE_GATE].emplace_back(m_gate_type_map[CU_GATE]);
gate_matrix[METADATA_DOUBLE_GATE].emplace_back(m_gate_type_map[CNOT_GATE]);
gate_matrix[METADATA_DOUBLE_GATE].emplace_back(m_gate_type_map[CZ_GATE]);
gate_matrix[METADATA_DOUBLE_GATE].emplace_back(m_gate_type_map[CPHASE_GATE]);
gate_matrix[METADATA_DOUBLE_GATE].emplace_back(m_gate_type_map[ISWAP_GATE]);
SingleGateTypeValidator::GateType(gate_matrix[METADATA_SINGLE_GATE],
valid_gate_matrix[METADATA_SINGLE_GATE]); /* single gate data MetadataValidity */
DoubleGateTypeValidator::GateType(gate_matrix[METADATA_DOUBLE_GATE],
valid_gate_matrix[METADATA_DOUBLE_GATE]); /* double gate data MetadataValidity */
TransformDecomposition traversal_vec(valid_gate_matrix,gate_matrix,m_quantum_machine);
traversal_vec.TraversalOptimizationMerge(prog);
transformQProgByTraversalAlg(&prog);
}
void QProgToQuil::transformQProgByTraversalAlg(QProg *prog)
{
if (nullptr == prog)
{
QCERR("p_prog is null");
throw runtime_error("p_prog is null");
return;
}
bool isDagger = false;
execute(prog->getImplementationPtr(), nullptr, isDagger);
}
string QProgToQuil::getInsturctions()
{
string instructions;
for (auto &sInstruct : m_instructs)
{
instructions.append(sInstruct).append("\n");
}
instructions.erase(instructions.size() - 1);
return instructions;
}
void QProgToQuil::execute(std::shared_ptr<AbstractQGateNode> cur_node, std::shared_ptr<QNode> parent_node, bool &is_dagger)
{
transformQGate(cur_node.get(), is_dagger);
}
void QProgToQuil::execute(std::shared_ptr<AbstractQuantumMeasure> cur_node, std::shared_ptr<QNode> parent_node, bool &is_dagger)
{
transformQMeasure(cur_node.get());
}
void QProgToQuil::execute(std::shared_ptr<AbstractQuantumReset> cur_node, std::shared_ptr<QNode> parent_node, bool &is_dagger)
{
transformQReset(cur_node.get());
}
void QProgToQuil::execute(std::shared_ptr<AbstractControlFlowNode> cur_node, std::shared_ptr<QNode> parent_node, bool &)
{
QCERR("Don't support QWhileProg or QIfProg");
throw invalid_argument("Don't support QWhileProg or QIfProg");
}
void QProgToQuil::execute(std::shared_ptr<AbstractQuantumProgram> cur_node, std::shared_ptr<QNode> parent_node, bool &is_dagger)
{
Traversal::traversal(cur_node, *this, is_dagger);
}
void QProgToQuil::execute(std::shared_ptr<AbstractQuantumCircuit> cur_node, std::shared_ptr<QNode> parent_node, bool &is_dagger)
{
bool bDagger = cur_node->isDagger() ^ is_dagger;
Traversal::traversal(cur_node, true, *this, bDagger);
}
void QProgToQuil::execute(std::shared_ptr<AbstractClassicalProg> cur_node, std::shared_ptr<QNode> parent_node, bool&)
{
// error
QCERR("transform error, there shouldn't be classicalProg here.");
throw invalid_argument("transform error, there shouldn't be classicalProg here.");
}
void QProgToQuil::transformQGate(AbstractQGateNode *gate, bool is_dagger)
{
if (nullptr == gate)
{
QCERR("p_gate is null");
throw runtime_error("p_gate is null");
}
auto circuit = transformQPandaBaseGateToQuilBaseGate(gate);
for (auto iter = circuit.getFirstNodeIter(); iter != circuit.getEndNodeIter(); iter++)
{
QNode * p_node = (*iter).get();
dealWithQuilGate(dynamic_cast<AbstractQGateNode *>(p_node));
}
return;
}
void QProgToQuil::transformQMeasure(AbstractQuantumMeasure *measure)
{
if (nullptr == measure)
{
QCERR("p_measure is null");
throw runtime_error("p_measure is null");
}
Qubit *p_qubit = measure->getQuBit();
auto p_physical_qubit = p_qubit->getPhysicalQubitPtr();
size_t qubit_addr = p_physical_qubit->getQubitAddr();
string qubit_addr_str = to_string(qubit_addr);
auto p_cbit = measure->getCBit();
string cbit_name = p_cbit->getName();
string cbit_number_str = cbit_name.substr(1);
string instruct = "MEASURE " + qubit_addr_str + " [" + cbit_number_str + "]";
m_instructs.emplace_back(instruct);
return;
}
void QProgToQuil::transformQReset(AbstractQuantumReset *reset)
{
if (nullptr == reset)
{
QCERR("reset node is null");
throw runtime_error("reset node is null");
}
Qubit *p_qubit = reset->getQuBit();
auto p_physical_qubit = p_qubit->getPhysicalQubitPtr();
size_t qubit_addr = p_physical_qubit->getQubitAddr();
string qubit_addr_str = to_string(qubit_addr);
string instruct = "RESET " + qubit_addr_str;
m_instructs.emplace_back(instruct);
return;
}
void QProgToQuil::transformQControlFlow(AbstractControlFlowNode *controlflow)
{
throw std::runtime_error("not support control flow");
}
void QProgToQuil::dealWithQuilGate(AbstractQGateNode *p_gate)
{
if (nullptr == p_gate)
{
QCERR("pGate is null");
throw invalid_argument("pGate is null");
}
auto p_quantum_gate = p_gate->getQGate();
int gate_type = p_quantum_gate->getGateType();
QVec qubits;
p_gate->getQuBitVector(qubits);
auto iter = m_gate_type_map.find(gate_type);
if (iter == m_gate_type_map.end())
{
QCERR("do not support this gateType");
throw invalid_argument("do not support this gateType");
}
string gate_type_str = iter->second;
string all_qubit_addr_str;
for (auto qubit : qubits)
{
PhysicalQubit *p_physical_qubit = qubit->getPhysicalQubitPtr();
size_t qubit_addr = p_physical_qubit->getQubitAddr();
all_qubit_addr_str += " ";
all_qubit_addr_str += to_string(qubit_addr);
}
string instruct;
AbstractSingleAngleParameter * p_angle;
string angle_str;
switch (gate_type)
{
case GateType::PAULI_X_GATE:
case GateType::PAULI_Y_GATE:
case GateType::PAULI_Z_GATE:
case GateType::HADAMARD_GATE:
case GateType::T_GATE:
case GateType::S_GATE:
case GateType::CNOT_GATE:
case GateType::CZ_GATE:
case GateType::ISWAP_GATE:
case GateType::SQISWAP_GATE:
instruct = gate_type_str + all_qubit_addr_str;
m_instructs.emplace_back(instruct);
break;
case GateType::RX_GATE:
case GateType::RY_GATE:
case GateType::RZ_GATE:
case GateType::U1_GATE:
case GateType::CPHASE_GATE:
p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
if (nullptr == p_angle)
{
QCERR("dynamic_cast error");
throw invalid_argument("dynamic_cast error");
}
angle_str = to_string(p_angle->getParameter());
instruct = gate_type_str + "(" + angle_str + ")" + all_qubit_addr_str;
m_instructs.emplace_back(instruct);
break;
default:
QCERR("do not support this type gate");
throw invalid_argument("do not support this type gate");
break;
}
return ;
}
QCircuit QProgToQuil::transformQPandaBaseGateToQuilBaseGate(AbstractQGateNode *p_gate)
{
QVec target_qubits;
if (p_gate->getQuBitVector(target_qubits) <= 0)
{
QCERR("gate is null");
throw invalid_argument("gate is null");
}
QuantumGate* p_quantum_gate = p_gate->getQGate();
QStat matrix;
p_quantum_gate->getMatrix(matrix);
double theta = 0;
AbstractAngleParameter * angle = nullptr;
int label = p_gate->isDagger() ? -1 : 1; /* iLabel is 1 or -1 */
auto qCircuit = CreateEmptyCircuit();
int gate_type = p_quantum_gate->getGateType();
switch (gate_type)
{
case PAULI_X_GATE:
qCircuit << X(target_qubits[0]);
break;
case PAULI_Y_GATE:
qCircuit << Y(target_qubits[0]);
break;
case PAULI_Z_GATE:
qCircuit << Z(target_qubits[0]);
break;
case X_HALF_PI:
qCircuit << RX(target_qubits[0], label*PI/2);
break;
case Y_HALF_PI:
qCircuit << RY(target_qubits[0], label*PI / 2);
break;
case Z_HALF_PI:
qCircuit << RZ(target_qubits[0], label*PI / 2);
break;
case HADAMARD_GATE:
qCircuit << H(target_qubits[0]);
break;
case T_GATE:
{
auto gate = p_gate->isDagger() ? U1(target_qubits[0], label*PI / 4) : T(target_qubits[0]);
qCircuit << gate;
}
break;
case S_GATE:
{
auto gate = p_gate->isDagger() ? U1(target_qubits[0], label*PI / 2) : S(target_qubits[0]);
qCircuit << gate;
}
break;
case RX_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << RX(target_qubits[0], label*theta);
break;
}
case RY_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << RY(target_qubits[0], label*theta);
break;
}
case RZ_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << RZ(target_qubits[0], label*theta);
break;
}
case U1_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << U1(target_qubits[0], label*theta);
break;
}
case U4_GATE:
{
auto angle = dynamic_cast<AbstractAngleParameter *>(p_quantum_gate);
if (nullptr == angle)
{
QCERR("static cast fail");
throw invalid_argument("static cast fail");
}
if (p_gate->isDagger())
{
qCircuit << RZ(target_qubits[0],-angle->getBeta())
<<RY(target_qubits[0],-angle->getGamma())
<<RZ(target_qubits[0],-angle->getDelta());
}
else
{
qCircuit << RZ(target_qubits[0], angle->getDelta())
<<RY(target_qubits[0], angle->getGamma())
<<RZ(target_qubits[0], angle->getBeta());
}
}
break;
case CU_GATE:
{
auto angle = dynamic_cast<AbstractAngleParameter *>(p_quantum_gate);
if (nullptr == angle)
{
QCERR("static cast fail");
throw invalid_argument("static cast fail");
}
double alpha = angle->getAlpha();
double beta = angle->getBeta();
double delta = angle->getDelta();
double gamma = angle->getGamma();
if (p_gate->isDagger())
{
qCircuit << U1(target_qubits[0], -alpha) << RZ(target_qubits[0], -beta)
<< RY(target_qubits[0], -gamma / 2) << CNOT(target_qubits[0], target_qubits[1])
<< RY(target_qubits[0], gamma / 2) << RZ(target_qubits[0], (delta + beta) / 2)
<< CNOT(target_qubits[0], target_qubits[1]) << RZ(target_qubits[0], -(delta - beta) / 2);
}
else
{
qCircuit << RZ(target_qubits[0], (delta - beta) / 2) << CNOT(target_qubits[0], target_qubits[1])
<< RZ(target_qubits[0], -(delta + beta) / 2) << RY(target_qubits[0], -gamma / 2)
<< CNOT(target_qubits[0], target_qubits[1]) << RY(target_qubits[0], gamma / 2)
<< RZ(target_qubits[0], beta) << U1(target_qubits[0], alpha);
}
break;
}
case CNOT_GATE:
qCircuit << CNOT(target_qubits[0], target_qubits[1]);
break;
case CZ_GATE:
qCircuit << CZ(target_qubits[0], target_qubits[1]);
break;
case CPHASE_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << CR(target_qubits[0], target_qubits[1],label*theta);
}
break;
case ISWAP_GATE:
if (p_gate->isDagger())
{
qCircuit << iSWAP(target_qubits[0], target_qubits[1])
<< Z(target_qubits[0]) << Z(target_qubits[1]);
}
else
{
qCircuit << iSWAP(target_qubits[0], target_qubits[1]);
}
break;
case SQISWAP_GATE:
{
theta = PI/4;
qCircuit << CNOT(target_qubits[1], target_qubits[0])
<< CZ(target_qubits[0], target_qubits[1])
<< RX(target_qubits[1], -label * theta)
<< CZ(target_qubits[0], target_qubits[1])
<< RX(target_qubits[1], label * theta)
<< CNOT(target_qubits[1], target_qubits[0]);
}
break;
case ISWAP_THETA_GATE:
{
auto p_angle = dynamic_cast<AbstractSingleAngleParameter *>(p_gate->getQGate());
theta = p_angle->getParameter();
qCircuit << CNOT(target_qubits[1], target_qubits[0])
<< CZ(target_qubits[0], target_qubits[1])
<< RX(target_qubits[1], -label * theta)
<< CZ(target_qubits[0], target_qubits[1])
<< RX(target_qubits[1], label * theta)
<< CNOT(target_qubits[1], target_qubits[0]);
}
break;
case TWO_QUBIT_GATE:
break;
default:
QCERR("unknow error");
throw runtime_error("unknow error");
}
return qCircuit;
}
string QPanda::transformQProgToQuil(QProg& prog, QuantumMachine * quantum_machine)
{
if (nullptr == quantum_machine)
{
QCERR("Quantum machine is nullptr");
throw std::invalid_argument("Quantum machine is nullptr");
}
QProgToQuil quil_traverse(quantum_machine);
quil_traverse.transform(prog);
return quil_traverse.getInsturctions();
}
string QPanda::convert_qprog_to_quil(QProg &prog, QuantumMachine *qm)
{
return transformQProgToQuil(prog, qm);
}
| 7,715 |
311 | <filename>joyqueue-common/joyqueue-toolkit/src/main/java/org/joyqueue/toolkit/concurrent/CasLock.java
/**
* Copyright 2019 The JoyQueue Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joyqueue.toolkit.concurrent;
import java.util.ConcurrentModificationException;
import java.util.concurrent.atomic.AtomicLong;
/**
* 基于CAS实现的,轻量级的,可重入锁。
* 只适用于锁碰撞非常罕见的场景。
* @author LiYue
* Date: 2020/4/2
*/
public class CasLock {
private static final long FREE = -1L;
private final AtomicLong lockThread = new AtomicLong(FREE);
private final AtomicLong references = new AtomicLong(0L);
/**
* 获取锁,如果当前锁不可用,则等待直到可用后返回。
* 慎用此方法!
* 如果锁碰撞频率较高,此方法会大量占用CPU资源。
*/
public void waitAndLock() {
while (!tryLock()) {
Thread.yield();
}
}
/**
* 检查锁是否可用,可用则获取锁并返回。
* @throws ConcurrentModificationException 锁不可用时抛出此异常。
*/
public void checkLock() {
long thread = Thread.currentThread().getId();
if (thread != lockThread.get() && !lockThread.compareAndSet(FREE, thread)) {
throw new ConcurrentModificationException();
}
references.getAndIncrement();
}
/**
* 检查锁是否可用,可用则获取锁并返回true, 否则返回false。
* @return 可用则获取锁并返回true, 否则返回false。
*/
public boolean tryLock() {
long thread = Thread.currentThread().getId();
if (thread != lockThread.get() && !lockThread.compareAndSet(FREE, thread)) {
return false;
}
references.getAndIncrement();
return true;
}
/**
* 释放锁。
*/
public void unlock() {
long thread = Thread.currentThread().getId();
if (thread == lockThread.get() && references.decrementAndGet() == 0) {
lockThread.set(FREE);
}
}
}
| 1,136 |
456 | /* This file was generated by upbc (the upb compiler) from the input
* file:
*
* envoy/service/load_stats/v2/lrs.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_
#define ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct envoy_service_load_stats_v2_LoadStatsRequest;
struct envoy_service_load_stats_v2_LoadStatsResponse;
typedef struct envoy_service_load_stats_v2_LoadStatsRequest envoy_service_load_stats_v2_LoadStatsRequest;
typedef struct envoy_service_load_stats_v2_LoadStatsResponse envoy_service_load_stats_v2_LoadStatsResponse;
extern const upb_msglayout envoy_service_load_stats_v2_LoadStatsRequest_msginit;
extern const upb_msglayout envoy_service_load_stats_v2_LoadStatsResponse_msginit;
struct envoy_api_v2_core_Node;
struct envoy_api_v2_endpoint_ClusterStats;
struct google_protobuf_Duration;
extern const upb_msglayout envoy_api_v2_core_Node_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_ClusterStats_msginit;
extern const upb_msglayout google_protobuf_Duration_msginit;
/* envoy.service.load_stats.v2.LoadStatsRequest */
UPB_INLINE envoy_service_load_stats_v2_LoadStatsRequest *envoy_service_load_stats_v2_LoadStatsRequest_new(upb_arena *arena) {
return (envoy_service_load_stats_v2_LoadStatsRequest *)upb_msg_new(&envoy_service_load_stats_v2_LoadStatsRequest_msginit, arena);
}
UPB_INLINE envoy_service_load_stats_v2_LoadStatsRequest *envoy_service_load_stats_v2_LoadStatsRequest_parse(const char *buf, size_t size,
upb_arena *arena) {
envoy_service_load_stats_v2_LoadStatsRequest *ret = envoy_service_load_stats_v2_LoadStatsRequest_new(arena);
return (ret && upb_decode(buf, size, ret, &envoy_service_load_stats_v2_LoadStatsRequest_msginit, arena)) ? ret : NULL;
}
UPB_INLINE char *envoy_service_load_stats_v2_LoadStatsRequest_serialize(const envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_service_load_stats_v2_LoadStatsRequest_msginit, arena, len);
}
UPB_INLINE const struct envoy_api_v2_core_Node* envoy_service_load_stats_v2_LoadStatsRequest_node(const envoy_service_load_stats_v2_LoadStatsRequest *msg) { return UPB_FIELD_AT(msg, const struct envoy_api_v2_core_Node*, UPB_SIZE(0, 0)); }
UPB_INLINE const struct envoy_api_v2_endpoint_ClusterStats* const* envoy_service_load_stats_v2_LoadStatsRequest_cluster_stats(const envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t *len) { return (const struct envoy_api_v2_endpoint_ClusterStats* const*)_upb_array_accessor(msg, UPB_SIZE(4, 8), len); }
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsRequest_set_node(envoy_service_load_stats_v2_LoadStatsRequest *msg, struct envoy_api_v2_core_Node* value) {
UPB_FIELD_AT(msg, struct envoy_api_v2_core_Node*, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE struct envoy_api_v2_core_Node* envoy_service_load_stats_v2_LoadStatsRequest_mutable_node(envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena) {
struct envoy_api_v2_core_Node* sub = (struct envoy_api_v2_core_Node*)envoy_service_load_stats_v2_LoadStatsRequest_node(msg);
if (sub == NULL) {
sub = (struct envoy_api_v2_core_Node*)upb_msg_new(&envoy_api_v2_core_Node_msginit, arena);
if (!sub) return NULL;
envoy_service_load_stats_v2_LoadStatsRequest_set_node(msg, sub);
}
return sub;
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats** envoy_service_load_stats_v2_LoadStatsRequest_mutable_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t *len) {
return (struct envoy_api_v2_endpoint_ClusterStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(4, 8), len);
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats** envoy_service_load_stats_v2_LoadStatsRequest_resize_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t len, upb_arena *arena) {
return (struct envoy_api_v2_endpoint_ClusterStats**)_upb_array_resize_accessor(msg, UPB_SIZE(4, 8), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats* envoy_service_load_stats_v2_LoadStatsRequest_add_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_ClusterStats* sub = (struct envoy_api_v2_endpoint_ClusterStats*)upb_msg_new(&envoy_api_v2_endpoint_ClusterStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(4, 8), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* envoy.service.load_stats.v2.LoadStatsResponse */
UPB_INLINE envoy_service_load_stats_v2_LoadStatsResponse *envoy_service_load_stats_v2_LoadStatsResponse_new(upb_arena *arena) {
return (envoy_service_load_stats_v2_LoadStatsResponse *)upb_msg_new(&envoy_service_load_stats_v2_LoadStatsResponse_msginit, arena);
}
UPB_INLINE envoy_service_load_stats_v2_LoadStatsResponse *envoy_service_load_stats_v2_LoadStatsResponse_parse(const char *buf, size_t size,
upb_arena *arena) {
envoy_service_load_stats_v2_LoadStatsResponse *ret = envoy_service_load_stats_v2_LoadStatsResponse_new(arena);
return (ret && upb_decode(buf, size, ret, &envoy_service_load_stats_v2_LoadStatsResponse_msginit, arena)) ? ret : NULL;
}
UPB_INLINE char *envoy_service_load_stats_v2_LoadStatsResponse_serialize(const envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_service_load_stats_v2_LoadStatsResponse_msginit, arena, len);
}
UPB_INLINE upb_strview const* envoy_service_load_stats_v2_LoadStatsResponse_clusters(const envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t *len) { return (upb_strview const*)_upb_array_accessor(msg, UPB_SIZE(8, 16), len); }
UPB_INLINE const struct google_protobuf_Duration* envoy_service_load_stats_v2_LoadStatsResponse_load_reporting_interval(const envoy_service_load_stats_v2_LoadStatsResponse *msg) { return UPB_FIELD_AT(msg, const struct google_protobuf_Duration*, UPB_SIZE(4, 8)); }
UPB_INLINE bool envoy_service_load_stats_v2_LoadStatsResponse_report_endpoint_granularity(const envoy_service_load_stats_v2_LoadStatsResponse *msg) { return UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)); }
UPB_INLINE upb_strview* envoy_service_load_stats_v2_LoadStatsResponse_mutable_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t *len) {
return (upb_strview*)_upb_array_mutable_accessor(msg, UPB_SIZE(8, 16), len);
}
UPB_INLINE upb_strview* envoy_service_load_stats_v2_LoadStatsResponse_resize_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t len, upb_arena *arena) {
return (upb_strview*)_upb_array_resize_accessor(msg, UPB_SIZE(8, 16), len, UPB_SIZE(8, 16), UPB_TYPE_STRING, arena);
}
UPB_INLINE bool envoy_service_load_stats_v2_LoadStatsResponse_add_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_strview val, upb_arena *arena) {
return _upb_array_append_accessor(
msg, UPB_SIZE(8, 16), UPB_SIZE(8, 16), UPB_TYPE_STRING, &val, arena);
}
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsResponse_set_load_reporting_interval(envoy_service_load_stats_v2_LoadStatsResponse *msg, struct google_protobuf_Duration* value) {
UPB_FIELD_AT(msg, struct google_protobuf_Duration*, UPB_SIZE(4, 8)) = value;
}
UPB_INLINE struct google_protobuf_Duration* envoy_service_load_stats_v2_LoadStatsResponse_mutable_load_reporting_interval(envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_arena *arena) {
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_service_load_stats_v2_LoadStatsResponse_load_reporting_interval(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Duration*)upb_msg_new(&google_protobuf_Duration_msginit, arena);
if (!sub) return NULL;
envoy_service_load_stats_v2_LoadStatsResponse_set_load_reporting_interval(msg, sub);
}
return sub;
}
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsResponse_set_report_endpoint_granularity(envoy_service_load_stats_v2_LoadStatsResponse *msg, bool value) {
UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_ */
| 3,293 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-5jjv-x4fq-qjwp",
"modified": "2021-01-07T22:52:18Z",
"published": "2020-10-05T15:48:34Z",
"aliases": [
"CVE-2020-15237"
],
"summary": "Possible timing attack in derivation_endpoint",
"details": "### Impact\n\nWhen using the `derivation_endpoint` plugin, it's possible for the attacker to use a timing attack to guess the signature of the derivation URL.\n\n### Patches\n\nThe problem has been fixed by comparing sent and calculated signature in constant time, using `Rack::Utils.secure_compare`. Users using the `derivation_endpoint` plugin are urged to upgrade to Shrine 3.3.0 or greater.\n\n### Workarounds\n\nUsers of older Shrine versions can apply the following monkey-patch after loading the `derivation_endpoint` plugin:\n\n```rb\nclass Shrine\n class UrlSigner\n def verify_signature(string, signature)\n if signature.nil?\n fail InvalidSignature, \"missing \\\"signature\\\" param\"\n elsif !Rack::Utils.secure_compare(signature, generate_signature(string))\n fail InvalidSignature, \"provided signature does not match the calculated signature\"\n end\n end\n end\nend\n```\n\n### References\n\nYou can read more about timing attacks [here](https://en.wikipedia.org/wiki/Timing_attack).",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:N/A:N"
}
],
"affected": [
{
"package": {
"ecosystem": "RubyGems",
"name": "shrine"
},
"ranges": [
{
"type": "ECOSYSTEM",
"events": [
{
"introduced": "0"
},
{
"fixed": "3.3.0"
}
]
}
]
}
],
"references": [
{
"type": "WEB",
"url": "https://github.com/shrinerb/shrine/security/advisories/GHSA-5jjv-x4fq-qjwp"
},
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2020-15237"
},
{
"type": "WEB",
"url": "https://github.com/shrinerb/shrine/commit/1b27090ce31543bf39f186c20ea47c8250fca2f0"
}
],
"database_specific": {
"cwe_ids": [
"CWE-203",
"CWE-208"
],
"severity": "MODERATE",
"github_reviewed": true
}
} | 1,061 |
14,668 | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "fuchsia/engine/browser/navigation_policy_handler.h"
#include <lib/fidl/cpp/binding.h>
#include "base/fuchsia/fuchsia_logging.h"
#include "content/public/browser/navigation_handle.h"
#include "fuchsia/engine/browser/navigation_policy_throttle.h"
NavigationPolicyHandler::NavigationPolicyHandler(
fuchsia::web::NavigationPolicyProviderParams params,
fidl::InterfaceHandle<fuchsia::web::NavigationPolicyProvider> delegate)
: params_(std::move(params)), provider_(delegate.Bind()) {
provider_.set_error_handler(fit::bind_member(
this, &NavigationPolicyHandler::OnNavigationPolicyProviderDisconnected));
}
NavigationPolicyHandler::~NavigationPolicyHandler() {
for (auto* throttle : navigation_throttles_) {
throttle->OnNavigationPolicyProviderDisconnected(
content::NavigationThrottle::CANCEL);
}
navigation_throttles_.clear();
}
void NavigationPolicyHandler::RegisterNavigationThrottle(
NavigationPolicyThrottle* navigation_throttle) {
navigation_throttles_.insert(navigation_throttle);
}
void NavigationPolicyHandler::RemoveNavigationThrottle(
NavigationPolicyThrottle* navigation_throttle) {
navigation_throttles_.erase(navigation_throttle);
}
void NavigationPolicyHandler::EvaluateRequestedNavigation(
fuchsia::web::RequestedNavigation requested_navigation,
fuchsia::web::NavigationPolicyProvider::EvaluateRequestedNavigationCallback
callback) {
provider_->EvaluateRequestedNavigation(std::move(requested_navigation),
std::move(callback));
}
bool NavigationPolicyHandler::ShouldEvaluateNavigation(
content::NavigationHandle* handle,
fuchsia::web::NavigationPhase phase) {
if (handle->IsInMainFrame()) {
return (phase & params_.main_frame_phases()) == phase;
}
return (phase & params_.subframe_phases()) == phase;
}
bool NavigationPolicyHandler::is_provider_connected() {
return provider_.is_bound();
}
void NavigationPolicyHandler::OnNavigationPolicyProviderDisconnected(
zx_status_t status) {
ZX_LOG(ERROR, status) << "NavigationPolicyProvider disconnected";
for (auto* throttle : navigation_throttles_) {
throttle->OnNavigationPolicyProviderDisconnected(
content::NavigationThrottle::CANCEL);
}
navigation_throttles_.clear();
}
| 803 |
663 | <filename>agate/csv_py2.py<gh_stars>100-1000
#!/usr/bin/env python
"""
This module contains the Python 2 replacement for :mod:`csv`.
"""
import codecs
import csv
import warnings
import six
from agate.exceptions import FieldSizeLimitError
EIGHT_BIT_ENCODINGS = [
'utf-8', 'u8', 'utf', 'utf8',
'latin-1', 'iso-8859-1', 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'l1'
]
POSSIBLE_DELIMITERS = [',', '\t', ';', ' ', ':', '|']
class UTF8Recoder(six.Iterator):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8.
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader).encode('utf-8')
class UnicodeReader(object):
"""
A CSV reader which will read rows from a file in a given encoding.
"""
def __init__(self, f, encoding='utf-8', field_size_limit=None, line_numbers=False, header=True, **kwargs):
self.line_numbers = line_numbers
self.header = header
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, **kwargs)
if field_size_limit:
csv.field_size_limit(field_size_limit)
def next(self):
try:
row = next(self.reader)
except csv.Error as e:
# Terrible way to test for this exception, but there is no subclass
if 'field larger than field limit' in str(e):
raise FieldSizeLimitError(csv.field_size_limit(), self.line_num)
else:
raise e
if self.line_numbers:
if self.header and self.line_num == 1:
row.insert(0, 'line_numbers')
else:
row.insert(0, str(self.line_num - 1 if self.header else self.line_num))
return [six.text_type(s, 'utf-8') for s in row]
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
class UnicodeWriter(object):
"""
A CSV writer which will write rows to a file in the specified encoding.
NB: Optimized so that eight-bit encodings skip re-encoding. See:
https://github.com/wireservice/csvkit/issues/175
"""
def __init__(self, f, encoding='utf-8', **kwargs):
self.encoding = encoding
self._eight_bit = (self.encoding.lower().replace('_', '-') in EIGHT_BIT_ENCODINGS)
if self._eight_bit:
self.writer = csv.writer(f, **kwargs)
else:
# Redirect output to a queue for reencoding
self.queue = six.StringIO()
self.writer = csv.writer(self.queue, **kwargs)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
if self._eight_bit:
self.writer.writerow([six.text_type(s if s is not None else '').encode(self.encoding) for s in row])
else:
self.writer.writerow([six.text_type(s if s is not None else '').encode('utf-8') for s in row])
# Fetch UTF-8 output from the queue...
data = self.queue.getvalue()
data = data.decode('utf-8')
# ...and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the file
self.stream.write(data)
# empty the queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class UnicodeDictReader(csv.DictReader):
"""
Defer almost all implementation to :class:`csv.DictReader`, but wraps our
unicode reader instead of :func:`csv.reader`.
"""
def __init__(self, f, fieldnames=None, restkey=None, restval=None, *args, **kwargs):
reader = UnicodeReader(f, *args, **kwargs)
if 'encoding' in kwargs:
kwargs.pop('encoding')
csv.DictReader.__init__(self, f, fieldnames, restkey, restval, *args, **kwargs)
self.reader = reader
class UnicodeDictWriter(csv.DictWriter):
"""
Defer almost all implementation to :class:`csv.DictWriter`, but wraps our
unicode writer instead of :func:`csv.writer`.
"""
def __init__(self, f, fieldnames, restval='', extrasaction='raise', *args, **kwds):
self.fieldnames = fieldnames
self.restval = restval
if extrasaction.lower() not in ('raise', 'ignore'):
raise ValueError('extrasaction (%s) must be "raise" or "ignore"' % extrasaction)
self.extrasaction = extrasaction
self.writer = UnicodeWriter(f, *args, **kwds)
class Reader(UnicodeReader):
"""
A unicode-aware CSV reader.
"""
pass
class Writer(UnicodeWriter):
"""
A unicode-aware CSV writer.
"""
def __init__(self, f, encoding='utf-8', line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
UnicodeWriter.__init__(self, f, encoding, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row.insert(0, 'line_number')
else:
row.insert(0, self.row_count)
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = [i.replace('\r', '\n') if isinstance(i, six.string_types) else i for i in row]
UnicodeWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class DictReader(UnicodeDictReader):
"""
A unicode-aware CSV DictReader.
"""
pass
class DictWriter(UnicodeDictWriter):
"""
A unicode-aware CSV DictWriter.
"""
def __init__(self, f, fieldnames, encoding='utf-8', line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
UnicodeDictWriter.__init__(self, f, fieldnames, encoding=encoding, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row['line_number'] = 0
else:
row['line_number'] = self.row_count
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = dict([
(k, v.replace('\r', '\n')) if isinstance(v, basestring) else (k, v) for k, v in row.items() # noqa: F821
])
UnicodeDictWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class Sniffer(object):
"""
A functional wrapper of ``csv.Sniffer()``.
"""
def sniff(self, sample):
"""
A functional version of ``csv.Sniffer().sniff``, that extends the
list of possible delimiters to include some seen in the wild.
"""
try:
dialect = csv.Sniffer().sniff(sample, POSSIBLE_DELIMITERS)
except csv.Error as e:
warnings.warn('Error sniffing CSV dialect: %s' % e, RuntimeWarning, stacklevel=2)
dialect = None
return dialect
def reader(*args, **kwargs):
"""
A replacement for Python's :func:`csv.reader` that uses
:class:`.csv_py2.Reader`.
"""
return Reader(*args, **kwargs)
def writer(*args, **kwargs):
"""
A replacement for Python's :func:`csv.writer` that uses
:class:`.csv_py2.Writer`.
"""
return Writer(*args, **kwargs)
| 3,505 |
5,411 | <gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mojo/public/cpp/bindings/associated_binding.h"
#include "base/sequenced_task_runner.h"
#include "mojo/public/cpp/bindings/lib/task_runner_helper.h"
namespace mojo {
AssociatedBindingBase::AssociatedBindingBase() {}
AssociatedBindingBase::~AssociatedBindingBase() {}
void AssociatedBindingBase::SetFilter(std::unique_ptr<MessageFilter> filter) {
DCHECK(endpoint_client_);
endpoint_client_->SetFilter(std::move(filter));
}
void AssociatedBindingBase::Close() {
endpoint_client_.reset();
}
void AssociatedBindingBase::CloseWithReason(uint32_t custom_reason,
const std::string& description) {
if (endpoint_client_)
endpoint_client_->CloseWithReason(custom_reason, description);
Close();
}
void AssociatedBindingBase::set_connection_error_handler(
base::OnceClosure error_handler) {
DCHECK(is_bound());
endpoint_client_->set_connection_error_handler(std::move(error_handler));
}
void AssociatedBindingBase::set_connection_error_with_reason_handler(
ConnectionErrorWithReasonCallback error_handler) {
DCHECK(is_bound());
endpoint_client_->set_connection_error_with_reason_handler(
std::move(error_handler));
}
void AssociatedBindingBase::FlushForTesting() {
endpoint_client_->FlushForTesting();
}
void AssociatedBindingBase::BindImpl(
ScopedInterfaceEndpointHandle handle,
MessageReceiverWithResponderStatus* receiver,
std::unique_ptr<MessageReceiver> payload_validator,
bool expect_sync_requests,
scoped_refptr<base::SequencedTaskRunner> runner,
uint32_t interface_version,
const char* interface_name) {
if (!handle.is_valid()) {
endpoint_client_.reset();
return;
}
endpoint_client_.reset(new InterfaceEndpointClient(
std::move(handle), receiver, std::move(payload_validator),
expect_sync_requests,
internal::GetTaskRunnerToUseFromUserProvidedTaskRunner(std::move(runner)),
interface_version, interface_name));
}
} // namespace mojo
| 736 |
18,012 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.tri;
import org.apache.dubbo.common.constants.CommonConstants;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public enum TripleHeaderEnum {
AUTHORITY_KEY(":authority"),
PATH_KEY(":path"),
HTTP_STATUS_KEY("http-status"),
STATUS_KEY("grpc-status"),
MESSAGE_KEY("grpc-message"),
STATUS_DETAIL_KEY("grpc-status-details-bin"),
TIMEOUT("grpc-timeout"),
CONTENT_TYPE_KEY("content-type"),
CONTENT_PROTO("application/grpc+proto"),
APPLICATION_GRPC("application/grpc"),
GRPC_ENCODING("grpc-encoding"),
GRPC_ACCEPT_ENCODING("grpc-accept-encoding"),
CONSUMER_APP_NAME_KEY("tri-consumer-appname"),
SERVICE_VERSION("tri-service-version"),
SERVICE_GROUP("tri-service-group");
static final Map<String, TripleHeaderEnum> enumMap = new HashMap<>();
static final Set<String> excludeAttachmentsSet = new HashSet<>();
static {
for (TripleHeaderEnum item : TripleHeaderEnum.values()) {
enumMap.put(item.getHeader(), item);
}
excludeAttachmentsSet.add(CommonConstants.GROUP_KEY);
excludeAttachmentsSet.add(CommonConstants.INTERFACE_KEY);
excludeAttachmentsSet.add(CommonConstants.PATH_KEY);
excludeAttachmentsSet.add(CommonConstants.REMOTE_APPLICATION_KEY);
excludeAttachmentsSet.add(CommonConstants.APPLICATION_KEY);
excludeAttachmentsSet.add(TripleConstant.SERIALIZATION_KEY);
excludeAttachmentsSet.add(TripleConstant.TE_KEY);
}
private final String header;
TripleHeaderEnum(String header) {
this.header = header;
}
public static boolean containsExcludeAttachments(String key) {
return excludeAttachmentsSet.contains(key) || enumMap.containsKey(key);
}
public String getHeader() {
return header;
}
}
| 930 |
4,351 | <reponame>hzy001/SPTAG<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "inc/Core/Common.h"
#include "inc/Helper/StringConvert.h"
#include "inc/Helper/CommonHelper.h"
#include <string>
namespace SPTAG {
namespace SSDServing {
class BaseOptions
{
public:
SPTAG::VectorValueType m_valueType;
SPTAG::DistCalcMethod m_distCalcMethod;
SPTAG::IndexAlgoType m_indexAlgoType;
SPTAG::DimensionType m_dim;
std::string m_vectorPath;
SPTAG::VectorFileType m_vectorType;
SPTAG::SizeType m_vectorSize; //Optional on condition
std::string m_vectorDelimiter; //Optional on condition
std::string m_queryPath;
SPTAG::VectorFileType m_queryType;
SPTAG::SizeType m_querySize; //Optional on condition
std::string m_queryDelimiter; //Optional on condition
std::string m_warmupPath;
SPTAG::VectorFileType m_warmupType;
SPTAG::SizeType m_warmupSize; //Optional on condition
std::string m_warmupDelimiter; //Optional on condition
std::string m_truthPath;
SPTAG::TruthFileType m_truthType;
bool m_generateTruth;
std::string m_indexDirectory;
std::string m_headIDFile;
std::string m_headVectorFile;
std::string m_headIndexFolder;
std::string m_ssdIndex;
bool m_deleteHeadVectors;
int m_ssdIndexFileNum;
BaseOptions() {
#define DefineBasicParameter(VarName, VarType, DefaultValue, RepresentStr) \
VarName = DefaultValue; \
#include "inc/SSDServing/IndexBuildManager/ParameterDefinitionList.h"
#undef DefineBasicParameter
}
~BaseOptions() {}
ErrorCode SetParameter(const char* p_param, const char* p_value)
{
if (nullptr == p_param || nullptr == p_value) return ErrorCode::Fail;
#define DefineBasicParameter(VarName, VarType, DefaultValue, RepresentStr) \
else if (SPTAG::Helper::StrUtils::StrEqualIgnoreCase(p_param, RepresentStr)) \
{ \
LOG(Helper::LogLevel::LL_Info, "Setting %s with value %s\n", RepresentStr, p_value); \
VarType tmp; \
if (SPTAG::Helper::Convert::ConvertStringTo<VarType>(p_value, tmp)) \
{ \
VarName = tmp; \
} \
} \
#include "inc/SSDServing/IndexBuildManager/ParameterDefinitionList.h"
#undef DefineBasicParameter
return ErrorCode::Success;
}
};
}
} | 1,350 |
5,535 | #!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
import io
import logging
from mock import ANY, call, patch, Mock
from gppylib import gplog
from gppylib.commands import base
from gppylib.gparray import Segment, GpArray
from gppylib.operations.buildMirrorSegments import GpMirrorToBuild, GpMirrorListToBuild, GpStopSegmentDirectoryDirective
from gppylib.operations.startSegments import StartSegmentsResult
from gppylib.system import configurationInterface
from test.unit.gp_unittest import GpTestCase
class BuildMirrorsTestCase(GpTestCase):
"""
This class only tests for the buildMirrors function and also test_clean_up_failed_segments
"""
def setUp(self):
self.maxDiff = None
self.coordinator = Segment(content=-1, preferred_role='p', dbid=1, role='p', mode='s',
status='u', hostname='coordinatorhost', address='coordinatorhost-1',
port=1111, datadir='/coordinatordir')
self.primary = Segment(content=0, preferred_role='p', dbid=20, role='p', mode='s',
status='u', hostname='primaryhost', address='primaryhost-1',
port=3333, datadir='/primary')
self.mirror = Segment(content=0, preferred_role='m', dbid=30, role='m', mode='s',
status='d', hostname='primaryhost', address='primaryhost-1',
port=3333, datadir='/primary')
gplog.get_unittest_logger()
self.apply_patches([
patch('gppylib.operations.buildMirrorSegments.GpArray.getSegmentsByHostName')
])
self.mock_get_segments_by_hostname = self.get_mock_from_apply_patch('getSegmentsByHostName')
self.action = 'recover'
self.gpEnv = Mock()
self.gpArray = GpArray([self.coordinator, self.primary, self.mirror])
self.mock_logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal'])
def tearDown(self):
super(BuildMirrorsTestCase, self).tearDown()
def _setup_mocks(self, buildMirrorSegs_obj):
buildMirrorSegs_obj._GpMirrorListToBuild__startAll = Mock(return_value=True)
markdown_mock = Mock()
buildMirrorSegs_obj._wait_fts_to_mark_down_segments = markdown_mock
buildMirrorSegs_obj._run_recovery = Mock()
buildMirrorSegs_obj._clean_up_failed_segments = Mock()
buildMirrorSegs_obj._GpMirrorListToBuild__runWaitAndCheckWorkerPoolForErrorsAndClear = Mock()
buildMirrorSegs_obj._get_running_postgres_segments = Mock()
configurationInterface.getConfigurationProvider = Mock()
def _common_asserts_with_stop_and_logger(self, buildMirrorSegs_obj, expected_logger_msg, expected_segs_to_stop,
expected_segs_to_start, expected_segs_to_markdown, expected_segs_to_update,
cleanup_count):
self.mock_logger.info.assert_any_call(expected_logger_msg)
#TODO assert all logger info msgs
self.assertEqual(4, self.mock_logger.info.call_count)
self.assertEqual([call(expected_segs_to_stop)],
buildMirrorSegs_obj._get_running_postgres_segments.call_args_list)
self._common_asserts(buildMirrorSegs_obj, expected_segs_to_start, expected_segs_to_markdown,
expected_segs_to_update, cleanup_count)
def _common_asserts(self, buildMirrorSegs_obj, expected_segs_to_start, expected_segs_to_markdown,
expected_segs_to_update, cleanup_count):
self.assertEqual([call(self.gpEnv, expected_segs_to_markdown)],
buildMirrorSegs_obj._wait_fts_to_mark_down_segments.call_args_list)
self.assertEqual(cleanup_count, buildMirrorSegs_obj._clean_up_failed_segments.call_count)
self.assertEqual(1, buildMirrorSegs_obj._run_recovery.call_count)
self.assertEqual([call(self.gpArray, ANY, dbIdToForceMirrorRemoveAdd=expected_segs_to_update,
useUtilityMode=False, allowPrimary=False)],
configurationInterface.getConfigurationProvider.return_value.updateSystemConfig.call_args_list)
self.assertEqual([call(self.gpEnv, self.gpArray, expected_segs_to_start)],
buildMirrorSegs_obj._GpMirrorListToBuild__startAll.call_args_list)
def _run_no_failed_tests(self, tests):
mirrors_to_build = []
expected_segs_to_start = []
for test in tests:
with self.subTest(msg=test["name"]):
mirrors_to_build.append(GpMirrorToBuild(None, test["live"], test["failover"],
test["forceFull"]))
expected_segs_to_start.append(test["failover"])
buildMirrorSegs_obj = self._run_buildMirrors(mirrors_to_build)
self.assertEqual(3, self.mock_logger.info.call_count)
self.assertEqual(0, buildMirrorSegs_obj._get_running_postgres_segments.call_count)
self._common_asserts(buildMirrorSegs_obj, expected_segs_to_start, [], {2: True, 4: True}, 1)
for test in tests:
self.assertEqual('n', test['live'].getSegmentMode())
self.assertEqual('d', test['failover'].getSegmentStatus())
self.assertEqual('n', test['failover'].getSegmentMode())
def _run_no_failover_tests(self, tests):
mirrors_to_build = []
expected_segs_to_start = []
expected_segs_to_stop = []
expected_segs_to_markdown = []
for test in tests:
with self.subTest(msg=test["name"]):
mirrors_to_build.append(GpMirrorToBuild(test["failed"], test["live"], None,
test["forceFull"]))
expected_segs_to_stop.append(test["failed"])
expected_segs_to_start.append(test["failed"])
if 'is_failed_segment_up' in test and test["is_failed_segment_up"]:
expected_segs_to_markdown.append(test['failed'])
buildMirrorSegs_obj = self._run_buildMirrors(mirrors_to_build)
self._common_asserts_with_stop_and_logger(buildMirrorSegs_obj, "Ensuring 4 failed segment(s) are stopped",
expected_segs_to_stop, expected_segs_to_start,
expected_segs_to_markdown, {4: True, 30: True}, 1)
for test in tests:
self.assertEqual('n', test['live'].getSegmentMode())
self.assertEqual('d', test['failed'].getSegmentStatus())
self.assertEqual('n', test['failed'].getSegmentMode())
def _run_both_failed_failover_tests(self, tests):
mirrors_to_build = []
expected_segs_to_start = []
expected_segs_to_stop = []
expected_segs_to_markdown = []
for test in tests:
with self.subTest(msg=test["name"]):
mirrors_to_build.append(GpMirrorToBuild(test["failed"], test["live"], test["failover"],
test["forceFull"]))
expected_segs_to_stop.append(test["failed"])
expected_segs_to_start.append(test["failover"])
if 'is_failed_segment_up' in test and test["is_failed_segment_up"]:
expected_segs_to_markdown.append(test['failed'])
buildMirrorSegs_obj = self._run_buildMirrors(mirrors_to_build)
self._common_asserts_with_stop_and_logger(buildMirrorSegs_obj, "Ensuring 3 failed segment(s) are stopped",
expected_segs_to_stop, expected_segs_to_start,
expected_segs_to_markdown, {1: True, 5: True, 9: True}, 1)
for test in tests:
self.assertEqual('n', test['live'].getSegmentMode())
self.assertEqual('d', test['failover'].getSegmentStatus())
self.assertEqual('n', test['failover'].getSegmentMode())
def _run_buildMirrors(self, mirrors_to_build):
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=mirrors_to_build,
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger
)
self._setup_mocks(buildMirrorSegs_obj)
self.assertTrue(buildMirrorSegs_obj.buildMirrors(self.action, self.gpEnv, self.gpArray))
return buildMirrorSegs_obj
def create_primary(self, dbid='1', contentid='0', state='n', status='u', host='sdw1'):
return Segment.initFromString('{}|{}|p|p|{}|{}|{}|{}|21000|/primary/gpseg0'
.format(dbid, contentid, state, status, host, host))
def create_mirror(self, dbid='2', contentid='0', state='n', status='u', host='sdw2'):
return Segment.initFromString('{}|{}|p|p|{}|{}|{}|{}|22000|/mirror/gpseg0'
.format(dbid, contentid, state, status, host, host))
def test_buildMirrors_failed_null_pass(self):
failed_null_tests = [
{
"name": "no_failed_full",
"live": self.create_primary(),
"failover": self.create_mirror(),
"forceFull": False,
},
{
"name": "no_failed_full2",
"live": self.create_primary(dbid='3'),
"failover": self.create_mirror(dbid='4'),
"forceFull": True,
}
]
self._run_no_failed_tests(failed_null_tests)
def test_buildMirrors_no_failover_pass(self):
tests = [
{
"name": "no_failover",
"failed": self.create_mirror(status='d'),
"live": self.create_primary(),
"forceFull": False,
},
{
"name": "no_failover_full",
"failed": self.create_mirror(status='d', dbid='4'),
"live": self.create_primary(),
"forceFull": True,
},
{
"name": "no_failover_failed_seg_exists_in_gparray",
"failed": self.mirror,
"live": self.create_primary(),
"forceFull": True,
"forceoverwrite": True
},
{
"name": "no_failover_failed_segment_is_up",
"failed": self.create_mirror(dbid='5'),
"live": self.create_primary(dbid='6'),
"is_failed_segment_up": True,
"forceFull": False,
}
]
self._run_no_failover_tests(tests)
def test_buildMirrors_both_failed_failover_pass(self):
tests = [
{
"name": "both_failed_failover_full",
"failed": self.create_primary(status='d'),
"live": self.create_mirror(),
"failover": self.create_primary(status='d', host='sdw3'),
"forceFull": True
},
{
"name": "both_failed_failover_failed_segment_is_up",
"failed": self.create_primary(dbid='5'),
"live": self.create_mirror(dbid='6'),
"failover": self.create_primary(dbid='5', host='sdw3'),
"is_failed_segment_up": True,
"forceFull": True
},
{
"name": "both_failed_failover_failover_is_down_live_is_marked_as_sync",
"failed": self.create_primary(dbid='9', status='d'),
"live": self.create_mirror(dbid='10', state='s'),
"failover": self.create_primary(dbid='9', status='d'),
"forceFull": False,
},
]
self._run_both_failed_failover_tests(tests)
def test_buildMirrors_forceoverwrite_true(self):
failed = self.create_primary(status='d')
live = self.create_mirror()
failover = self.create_primary(host='sdw3')
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=[GpMirrorToBuild(failed, live, failover, False)],
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger,
forceoverwrite=True
)
self._setup_mocks(buildMirrorSegs_obj)
self.assertTrue(buildMirrorSegs_obj.buildMirrors(self.action, self.gpEnv, self.gpArray))
self._common_asserts_with_stop_and_logger(buildMirrorSegs_obj, "Ensuring 1 failed segment(s) are stopped",
[failed], [failover], [], {1: True}, 0)
self.assertEqual('n', live.getSegmentMode())
self.assertEqual('d', failover.getSegmentStatus())
self.assertEqual('n', failover.getSegmentMode())
def test_buildMirrors_failed_seg_in_gparray_fail(self):
tests = [
{
"name": "failed_seg_exists_in_gparray1",
"failed": self.create_primary(status='d'),
"failover": self.create_primary(status='d'),
"live": self.create_mirror(),
"forceFull": True,
"forceoverwrite": False
},
{
"name": "failed_seg_exists_in_gparray2",
"failed": self.create_primary(dbid='3', status='d'),
"failover": self.create_primary(dbid='3', status='d'),
"live": self.create_mirror(dbid='4'),
"forceFull": False,
"forceoverwrite": False
},
{
"name": "failed_seg_exists_in_gparray2",
"failed": self.create_primary(dbid='3', status='d'),
"failover": self.create_primary(dbid='3', status='d'),
"live": self.create_mirror(dbid='4'),
"forceFull": False,
"forceoverwrite": True
}
]
for test in tests:
mirror_to_build = GpMirrorToBuild(test["failed"], test["live"], test["failover"], test["forceFull"])
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=[mirror_to_build,],
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger,
forceoverwrite=test['forceoverwrite']
)
self._setup_mocks(buildMirrorSegs_obj)
local_gp_array = GpArray([self.coordinator, test["failed"]])
expected_error = "failed segment should not be in the new configuration if failing over to"
with self.subTest(msg=test["name"]):
with self.assertRaisesRegex(Exception, expected_error):
buildMirrorSegs_obj.buildMirrors(self.action, self.gpEnv, local_gp_array)
def test_clean_up_failed_segments(self):
failed1 = self.create_primary(status='d')
live1 = self.create_mirror()
failed2 = self.create_primary(dbid='3', status='d')
failover2 = self.create_primary(dbid='3', status='d')
live2 = self.create_mirror(dbid='4')
failed3 = self.create_primary(dbid='5')
live3 = self.create_mirror(dbid='6')
failed4 = self.create_primary(dbid='5')
live4 = self.create_mirror(dbid='7')
inplace_full1 = GpMirrorToBuild(failed1, live1, None, True)
not_inplace_full = GpMirrorToBuild(failed2, live2, failover2, True)
inplace_full2 = GpMirrorToBuild(failed3, live3, None, True)
inplace_not_full = GpMirrorToBuild(failed4, live4, None, False)
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=[inplace_full1, not_inplace_full, inplace_full2, inplace_not_full],
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger,
forceoverwrite=True
)
buildMirrorSegs_obj._GpMirrorListToBuild__runWaitAndCheckWorkerPoolForErrorsAndClear = Mock()
buildMirrorSegs_obj._clean_up_failed_segments()
self.mock_get_segments_by_hostname.assert_called_once_with([failed1, failed3])
self.mock_logger.info.called_once_with('"Cleaning files from 2 segment(s)')
def test_clean_up_failed_segments_no_segs_to_cleanup(self):
failed2 = self.create_primary(dbid='3', status='d')
failover2 = self.create_primary(dbid='3', status='d')
live2 = self.create_mirror(dbid='4')
failed4 = self.create_primary(dbid='5')
live4 = self.create_mirror(dbid='7')
not_inplace_full = GpMirrorToBuild(failed2, live2, failover2, True)
inplace_not_full = GpMirrorToBuild(failed4, live4, None, False)
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=[not_inplace_full, inplace_not_full],
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger,
forceoverwrite=True
)
buildMirrorSegs_obj._GpMirrorListToBuild__runWaitAndCheckWorkerPoolForErrorsAndClear = Mock()
buildMirrorSegs_obj._clean_up_failed_segments()
self.assertEqual(0, self.mock_get_segments_by_hostname.call_count)
self.assertEqual(0, self.mock_logger.info.call_count)
def test_buildMirrors_noMirrors(self):
buildMirrorSegs_obj = GpMirrorListToBuild(
toBuild=[],
pool=None,
quiet=True,
parallelDegree=0,
logger=self.mock_logger
)
self.assertTrue(buildMirrorSegs_obj.buildMirrors(None, None, None))
self.assertTrue(buildMirrorSegs_obj.buildMirrors(self.action, None, None))
self.assertEqual([call('No segments to None'), call('No segments to recover')],
self.mock_logger.info.call_args_list)
class BuildMirrorSegmentsTestCase(GpTestCase):
def setUp(self):
self.coordinator = Segment(content=-1, preferred_role='p', dbid=1, role='p', mode='s',
status='u', hostname='coordinatorhost', address='coordinatorhost-1',
port=1111, datadir='/coordinatordir')
self.primary = Segment(content=0, preferred_role='p', dbid=2, role='p', mode='s',
status='u', hostname='primaryhost', address='primaryhost-1',
port=3333, datadir='/primary')
self.mock_logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal'])
gplog.get_unittest_logger()
self.apply_patches([
])
self.buildMirrorSegs = GpMirrorListToBuild(
toBuild = [],
pool = None,
quiet = True,
parallelDegree = 0,
logger=self.mock_logger
)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster')
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost')
def test_get_running_postgres_segments_empty_segs(self, mock1, mock2, mock3):
toBuild = []
expected_output = []
segs = self.buildMirrorSegs._get_running_postgres_segments(toBuild)
self.assertEqual(segs, expected_output)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster', return_value=True)
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost', return_value=True)
def test_get_running_postgres_segments_all_pid_postmaster(self, mock1, mock2, mock3):
mock_segs = [Mock(), Mock()]
segs = self.buildMirrorSegs._get_running_postgres_segments(mock_segs)
self.assertEqual(segs, mock_segs)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster', side_effect=[True, False])
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost', return_value=True)
def test_get_running_postgres_segments_some_pid_postmaster(self, mock1, mock2, mock3):
mock_segs = [Mock(), Mock()]
expected_output = []
expected_output.append(mock_segs[0])
segs = self.buildMirrorSegs._get_running_postgres_segments(mock_segs)
self.assertEqual(segs, expected_output)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster', side_effect=[True, False])
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost', side_effect=[True, False])
def test_get_running_postgres_segments_one_pid_postmaster(self, mock1, mock2, mock3):
mock_segs = [Mock(), Mock()]
expected_output = []
expected_output.append(mock_segs[0])
segs = self.buildMirrorSegs._get_running_postgres_segments(mock_segs)
self.assertEqual(segs, expected_output)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster', side_effect=[False, False])
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost', side_effect=[True, False])
def test_get_running_postgres_segments_no_pid_postmaster(self, mock1, mock2, mock3):
mock_segs = [Mock(), Mock()]
expected_output = []
segs = self.buildMirrorSegs._get_running_postgres_segments(mock_segs)
self.assertEqual(segs, expected_output)
@patch('gppylib.operations.buildMirrorSegments.get_pid_from_remotehost')
@patch('gppylib.operations.buildMirrorSegments.is_pid_postmaster', side_effect=[False, False])
@patch('gppylib.operations.buildMirrorSegments.check_pid_on_remotehost', side_effect=[False, False])
def test_get_running_postgres_segments_no_pid_running(self, mock1, mock2, mock3):
mock_segs = [Mock(), Mock()]
expected_output = []
segs = self.buildMirrorSegs._get_running_postgres_segments(mock_segs)
self.assertEqual(segs, expected_output)
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=base.CommandResult(rc=0, stdout=b'/tmp/seg0', stderr=b'', completed=True, halt=False))
def test_dereference_remote_symlink_valid_symlink(self, mock1, mock2):
datadir = '/tmp/link/seg0'
host = 'h1'
self.assertEqual(self.buildMirrorSegs.dereference_remote_symlink(datadir, host), '/tmp/seg0')
@patch('gppylib.commands.base.Command.run')
@patch('gppylib.commands.base.Command.get_results', return_value=base.CommandResult(rc=1, stdout=b'', stderr=b'', completed=True, halt=False))
def test_dereference_remote_symlink_unable_to_determine_symlink(self, mock1, mock2):
datadir = '/tmp/seg0'
host = 'h1'
self.assertEqual(self.buildMirrorSegs.dereference_remote_symlink(datadir, host), '/tmp/seg0')
self.mock_logger.warning.assert_any_call('Unable to determine if /tmp/seg0 is symlink. Assuming it is not symlink')
@patch('gppylib.operations.buildMirrorSegments.read_era')
@patch('gppylib.operations.startSegments.StartSegmentsOperation')
def test_startAll_succeeds(self, mock1, mock2):
result = StartSegmentsResult()
result.getFailedSegmentObjs()
mock1.return_value.startSegments.return_value = result
result = self.buildMirrorSegs._GpMirrorListToBuild__startAll(Mock(), [Mock(), Mock()], [])
self.assertTrue(result)
@patch('gppylib.operations.buildMirrorSegments.read_era')
@patch('gppylib.operations.startSegments.StartSegmentsOperation')
def test_startAll_fails(self, mock1, mock2):
result = StartSegmentsResult()
failed_segment = Segment.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|/data/primary0")
result.addFailure(failed_segment, 'reason', 'reasoncode')
mock1.return_value.startSegments.return_value = result
result = self.buildMirrorSegs._GpMirrorListToBuild__startAll(Mock(), [Mock(), Mock()], [])
self.assertFalse(result)
self.mock_logger.warn.assert_any_call('Failed to start segment. The fault prober will shortly mark it as down. '
'Segment: sdw1:/data/primary0:content=0:dbid=2:role=p:preferred_role=p:mode=s:status=u: REASON: reason')
def _createGpArrayWith2Primary2Mirrors(self):
self.coordinator = Segment.initFromString(
"1|-1|p|p|s|u|cdw|cdw|5432|/data/coordinator")
self.primary0 = Segment.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|/data/primary0")
self.primary1 = Segment.initFromString(
"3|1|p|p|s|u|sdw2|sdw2|40001|/data/primary1")
mirror0 = Segment.initFromString(
"4|0|m|m|s|u|sdw2|sdw2|50000|/data/mirror0")
mirror1 = Segment.initFromString(
"5|1|m|m|s|u|sdw1|sdw1|50001|/data/mirror1")
return GpArray([self.coordinator, self.primary0, self.primary1, mirror0, mirror1])
def test_checkForPortAndDirectoryConflicts__given_the_same_host_checks_ports_differ(self):
self.coordinator.hostname = "samehost"
self.primary.hostname = "samehost"
self.coordinator.port = 1111
self.primary.port = 1111
gpArray = GpArray([self.coordinator, self.primary])
with self.assertRaisesRegex(Exception, r"Segment dbid's 2 and 1 on host samehost cannot have the same port 1111"):
self.buildMirrorSegs.checkForPortAndDirectoryConflicts(gpArray)
def test_checkForPortAndDirectoryConflicts__given_the_same_host_checks_data_directories_differ(self):
self.coordinator.hostname = "samehost"
self.primary.hostname = "samehost"
self.coordinator.datadir = "/data"
self.primary.datadir = "/data"
gpArray = GpArray([self.coordinator, self.primary])
with self.assertRaisesRegex(Exception, r"Segment dbid's 2 and 1 on host samehost cannot have the same data directory '/data'"):
self.buildMirrorSegs.checkForPortAndDirectoryConflicts(gpArray)
class SegmentProgressTestCase(GpTestCase):
"""
Test case for GpMirrorListToBuild._join_and_show_segment_progress().
"""
def setUp(self):
self.pool = Mock(spec=base.WorkerPool)
self.buildMirrorSegs = GpMirrorListToBuild(
toBuild=[],
pool=self.pool,
quiet=True,
parallelDegree=0,
logger=Mock(spec=logging.Logger)
)
def test_command_output_is_displayed_once_after_worker_pool_completes(self):
cmd = Mock(spec=base.Command)
cmd.remoteHost = 'localhost'
cmd.dbid = 2
cmd.get_results.return_value.stdout = "string 1\n"
cmd2 = Mock(spec=base.Command)
cmd2.remoteHost = 'host2'
cmd2.dbid = 4
cmd2.get_results.return_value.stdout = "string 2\n"
outfile = io.StringIO()
self.pool.join.return_value = True
self.buildMirrorSegs._join_and_show_segment_progress([cmd, cmd2], outfile=outfile)
results = outfile.getvalue()
self.assertEqual(results, (
'localhost (dbid 2): string 1\n'
'host2 (dbid 4): string 2\n'
))
def test_command_output_is_displayed_once_for_every_blocked_join(self):
cmd = Mock(spec=base.Command)
cmd.remoteHost = 'localhost'
cmd.dbid = 2
cmd.get_results.side_effect = [Mock(stdout="string 1"), Mock(stdout="string 2")]
outfile = io.StringIO()
self.pool.join.side_effect = [False, True]
self.buildMirrorSegs._join_and_show_segment_progress([cmd], outfile=outfile)
results = outfile.getvalue()
self.assertEqual(results, (
'localhost (dbid 2): string 1\n'
'localhost (dbid 2): string 2\n'
))
def test_inplace_display_uses_ansi_escapes_to_overwrite_previous_output(self):
cmd = Mock(spec=base.Command)
cmd.remoteHost = 'localhost'
cmd.dbid = 2
cmd.get_results.side_effect = [Mock(stdout="string 1"), Mock(stdout="string 2")]
cmd2 = Mock(spec=base.Command)
cmd2.remoteHost = 'host2'
cmd2.dbid = 4
cmd2.get_results.side_effect = [Mock(stdout="string 3"), Mock(stdout="string 4")]
outfile = io.StringIO()
self.pool.join.side_effect = [False, True]
self.buildMirrorSegs._join_and_show_segment_progress([cmd, cmd2], inplace=True, outfile=outfile)
results = outfile.getvalue()
self.assertEqual(results, (
'localhost (dbid 2): string 1\x1b[K\n'
'host2 (dbid 4): string 3\x1b[K\n'
'\x1b[2A'
'localhost (dbid 2): string 2\x1b[K\n'
'host2 (dbid 4): string 4\x1b[K\n'
))
def test_errors_during_command_execution_are_displayed(self):
cmd = Mock(spec=base.Command)
cmd.remoteHost = 'localhost'
cmd.dbid = 2
cmd.get_results.return_value.stderr = "some error\n"
cmd.run.side_effect = base.ExecutionError("Some exception", cmd)
cmd2 = Mock(spec=base.Command)
cmd2.remoteHost = 'host2'
cmd2.dbid = 4
cmd2.get_results.return_value.stderr = ''
cmd2.run.side_effect = base.ExecutionError("Some exception", cmd2)
outfile = io.StringIO()
self.pool.join.return_value = True
self.buildMirrorSegs._join_and_show_segment_progress([cmd, cmd2], outfile=outfile)
results = outfile.getvalue()
self.assertEqual(results, (
'localhost (dbid 2): some error\n'
'host2 (dbid 4): \n'
))
if __name__ == '__main__':
run_tests()
| 14,350 |
2,338 | <reponame>jhh67/chapel
//===-- CSKYMCAsmInfo.h - CSKY Asm Info ------------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the CSKYMCAsmInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_CSKY_MCTARGETDESC_CSKYMCASMINFO_H
#define LLVM_LIB_TARGET_CSKY_MCTARGETDESC_CSKYMCASMINFO_H
#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class Triple;
class CSKYMCAsmInfo : public MCAsmInfoELF {
void anchor() override;
public:
explicit CSKYMCAsmInfo(const Triple &TargetTriple);
};
} // namespace llvm
#endif // LLVM_LIB_TARGET_CSKY_MCTARGETDESC_CSKYMCASMINFO_H
| 315 |
1,062 | /**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.sources;
import com.google.mr4c.config.diff.DiffConfig;
import com.google.mr4c.config.execution.DatasetConfig;
import com.google.mr4c.content.ContentFactories;
import com.google.mr4c.serialize.ConfigSerializer;
import com.google.mr4c.serialize.SerializerFactories;
import com.google.mr4c.serialize.param.ParameterizedConfigSerializer;
import com.google.mr4c.sources.DiffSource.DiffOutput;
import com.google.mr4c.util.MR4CLogging;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.text.StrSubstitutor;
import org.slf4j.Logger;
public class ConfiguredDiffSource implements DiffSource {
private URI m_confFile;
private DiffConfig m_diffConfig;
private DatasetSource m_expected;
private DatasetSource m_actual;
private Map<DiffOutput,DatasetSource> m_outputSrcs = new HashMap<DiffOutput,DatasetSource>();
protected static final Logger s_log = MR4CLogging.getLogger(ConfiguredDiffSource.class);
public ConfiguredDiffSource(URI confFile) {
m_confFile = confFile;
}
public ConfiguredDiffSource(DiffConfig diffConfig) {
m_diffConfig = diffConfig;
}
public void loadConfig() throws IOException {
s_log.info("Reading diff config from [{}]", m_confFile);
ConfigSerializer ser = SerializerFactories.getSerializerFactory("application/json").createConfigSerializer(); // assume json config for now
ser = new ParameterizedConfigSerializer(ser);
Reader reader = ContentFactories.readContentAsReader(m_confFile);
try {
m_diffConfig = ser.deserializeDiffConfig(reader);
} finally {
reader.close();
}
}
public synchronized DatasetSource getExpectedDatasetSource() throws IOException {
if ( m_expected==null ) {
DatasetConfig datasetConfig = m_diffConfig.getExpectedDataset();
if ( datasetConfig==null ) {
throw new IllegalArgumentException("No source config for expected dataset");
}
m_expected = DatasetSources.getDatasetSource(datasetConfig);
}
return m_expected;
}
public synchronized DatasetSource getActualDatasetSource() throws IOException {
if ( m_actual==null ) {
DatasetConfig datasetConfig = m_diffConfig.getActualDataset();
if ( datasetConfig==null ) {
throw new IllegalArgumentException("No source config for actual dataset");
}
m_actual = DatasetSources.getDatasetSource(datasetConfig);
}
return m_actual;
}
public synchronized DatasetSource getOutputDatasetSource(DiffOutput output) throws IOException {
DatasetSource src = m_outputSrcs.get(output);
if ( src==null ) {
DatasetConfig datasetConfig = m_diffConfig.getDiffDataset();
if ( datasetConfig==null ) {
throw new IllegalArgumentException("No source config for diff datasets");
}
src = createOutputSource(datasetConfig, output);
m_outputSrcs.put(output, src);
}
return src;
}
private DatasetSource createOutputSource(DatasetConfig config, DiffOutput output) throws IOException {
Map<String,String> props = new HashMap<String,String>();
props.put(m_diffConfig.getDiffParam(), output.toString());
ConfigSerializer ser = SerializerFactories.getSerializerFactory("application/json").createConfigSerializer(); // assume json config for now
ser = new ParameterizedConfigSerializer(ser);
StringWriter sw = new StringWriter();
ser.serializeDatasetConfig(config, sw);
String json = StrSubstitutor.replace(sw.toString(), props, "!(", ")");
Reader reader = new StringReader(json);
config = ser.deserializeDatasetConfig(reader);
return DatasetSources.getDatasetSource(config);
}
}
| 1,385 |
6,304 | /*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "experimental/graphite/include/Recorder.h"
#include "experimental/graphite/include/Context.h"
#include "experimental/graphite/include/Recording.h"
#include "experimental/graphite/src/Caps.h"
#include "experimental/graphite/src/CommandBuffer.h"
#include "experimental/graphite/src/ContextPriv.h"
#include "experimental/graphite/src/DrawBufferManager.h"
#include "experimental/graphite/src/Gpu.h"
#include "experimental/graphite/src/ResourceProvider.h"
#include "experimental/graphite/src/UniformCache.h"
namespace skgpu {
Recorder::Recorder(sk_sp<Context> context)
: fContext(std::move(context))
, fUniformCache(new UniformCache)
, fDrawBufferManager(new DrawBufferManager(
fContext->priv().gpu()->resourceProvider(),
fContext->priv().gpu()->caps()->requiredUniformBufferAlignment())) {
}
Recorder::~Recorder() {}
Context* Recorder::context() const {
return fContext.get();
}
UniformCache* Recorder::uniformCache() {
return fUniformCache.get();
}
DrawBufferManager* Recorder::drawBufferManager() {
return fDrawBufferManager.get();
}
void Recorder::add(sk_sp<Task> task) {
fGraph.add(std::move(task));
}
std::unique_ptr<Recording> Recorder::snap() {
auto gpu = fContext->priv().gpu();
auto commandBuffer = gpu->resourceProvider()->createCommandBuffer();
fGraph.addCommands(gpu->resourceProvider(), commandBuffer.get());
fDrawBufferManager->transferToCommandBuffer(commandBuffer.get());
fGraph.reset();
return std::unique_ptr<Recording>(new Recording(std::move(commandBuffer)));
}
} // namespace skgpu
| 613 |
1,448 | /*===================================================================*/
/* */
/* Mapper 11 (Color Dreams) */
/* */
/*===================================================================*/
/*-------------------------------------------------------------------*/
/* Initialize Mapper 11 */
/*-------------------------------------------------------------------*/
void Map11_Init()
{
int nPage;
/* Initialize Mapper */
MapperInit = Map11_Init;
/* Write to Mapper */
MapperWrite = Map11_Write;
/* Write to SRAM */
MapperSram = Map0_Sram;
/* Write to APU */
MapperApu = Map0_Apu;
/* Read from APU */
MapperReadApu = Map0_ReadApu;
/* Callback at VSync */
MapperVSync = Map0_VSync;
/* Callback at HSync */
MapperHSync = Map0_HSync;
/* Callback at PPU */
MapperPPU = Map0_PPU;
/* Callback at Rendering Screen ( 1:BG, 0:Sprite ) */
MapperRenderScreen = Map0_RenderScreen;
/* Set SRAM Banks */
SRAMBANK = SRAM;
/* Set ROM Banks */
ROMBANK0 = ROMPAGE( 0 );
ROMBANK1 = ROMPAGE( 1 );
ROMBANK2 = ROMPAGE( 2 );
ROMBANK3 = ROMPAGE( 3 );
/* Set PPU Banks */
if ( NesHeader.byVRomSize > 0 )
{
for ( nPage = 0; nPage < 8; ++nPage )
PPUBANK[ nPage ] = VROMPAGE( nPage );
InfoNES_SetupChr();
}
/* Name Table Mirroring */
InfoNES_Mirroring( 1 );
/* Set up wiring of the interrupt pin */
K6502_Set_Int_Wiring( 1, 1 );
}
/*-------------------------------------------------------------------*/
/* Mapper 11 Write Function */
/*-------------------------------------------------------------------*/
void Map11_Write( WORD wAddr, BYTE byData )
{
BYTE byPrgBank = ( byData & 0x01 ) << 2;
BYTE byChrBank = ( ( byData & 0x70 ) >> 4 ) << 3;
/* Set ROM Banks */
ROMBANK0 = ROMPAGE( ( byPrgBank + 0 ) % ( NesHeader.byRomSize << 1 ) );
ROMBANK1 = ROMPAGE( ( byPrgBank + 1 ) % ( NesHeader.byRomSize << 1 ) );
ROMBANK2 = ROMPAGE( ( byPrgBank + 2 ) % ( NesHeader.byRomSize << 1 ) );
ROMBANK3 = ROMPAGE( ( byPrgBank + 3 ) % ( NesHeader.byRomSize << 1 ) );
/* Set PPU Banks */
PPUBANK[ 0 ] = VROMPAGE( ( byChrBank + 0 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 1 ] = VROMPAGE( ( byChrBank + 1 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 2 ] = VROMPAGE( ( byChrBank + 2 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 3 ] = VROMPAGE( ( byChrBank + 3 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 4 ] = VROMPAGE( ( byChrBank + 4 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 5 ] = VROMPAGE( ( byChrBank + 5 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 6 ] = VROMPAGE( ( byChrBank + 6 ) % ( NesHeader.byVRomSize << 3 ) );
PPUBANK[ 7 ] = VROMPAGE( ( byChrBank + 7 ) % ( NesHeader.byVRomSize << 3 ) );
InfoNES_SetupChr();
}
| 1,264 |
12,252 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.client.admin.cli.config;
import org.keycloak.util.JsonSerialization;
import java.io.IOException;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public class RealmConfigData {
private String serverUrl;
private String realm;
private String clientId;
private String token;
private String refreshToken;
private String signingToken;
private String secret;
private String grantTypeForAuthentication;
private Long expiresAt;
private Long refreshExpiresAt;
private Long sigExpiresAt;
public String serverUrl() {
return serverUrl;
}
public void serverUrl(String serverUrl) {
this.serverUrl = serverUrl;
}
public String realm() {
return realm;
}
public void realm(String realm) {
this.realm = realm;
}
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public String getRefreshToken() {
return refreshToken;
}
public void setRefreshToken(String refreshToken) {
this.refreshToken = refreshToken;
}
public String getSigningToken() {
return signingToken;
}
public void setSigningToken(String signingToken) {
this.signingToken = signingToken;
}
public String getSecret() {
return secret;
}
public void setSecret(String secret) {
this.secret = secret;
}
public String getGrantTypeForAuthentication() {
return grantTypeForAuthentication;
}
public void setGrantTypeForAuthentication(String grantTypeForAuthentication) {
this.grantTypeForAuthentication = grantTypeForAuthentication;
}
public Long getExpiresAt() {
return expiresAt;
}
public void setExpiresAt(Long expiresAt) {
this.expiresAt = expiresAt;
}
public Long getRefreshExpiresAt() {
return refreshExpiresAt;
}
public void setRefreshExpiresAt(Long refreshExpiresAt) {
this.refreshExpiresAt = refreshExpiresAt;
}
public Long getSigExpiresAt() {
return sigExpiresAt;
}
public void setSigExpiresAt(Long sigExpiresAt) {
this.sigExpiresAt = sigExpiresAt;
}
public void merge(RealmConfigData source) {
serverUrl = source.serverUrl;
realm = source.realm;
clientId = source.clientId;
token = source.token;
refreshToken = source.refreshToken;
signingToken = source.signingToken;
secret = source.secret;
grantTypeForAuthentication = source.grantTypeForAuthentication;
expiresAt = source.expiresAt;
refreshExpiresAt = source.refreshExpiresAt;
sigExpiresAt = source.sigExpiresAt;
}
public void mergeRefreshTokens(RealmConfigData source) {
token = source.token;
refreshToken = source.refreshToken;
expiresAt = source.expiresAt;
refreshExpiresAt = source.refreshExpiresAt;
}
@Override
public String toString() {
try {
return JsonSerialization.writeValueAsPrettyString(this);
} catch (IOException e) {
return super.toString() + " - Error: " + e.toString();
}
}
public RealmConfigData deepcopy() {
RealmConfigData data = new RealmConfigData();
data.serverUrl = serverUrl;
data.realm = realm;
data.clientId = clientId;
data.token = token;
data.refreshToken = refreshToken;
data.signingToken = signingToken;
data.secret = secret;
data.grantTypeForAuthentication = grantTypeForAuthentication;
data.expiresAt = expiresAt;
data.refreshExpiresAt = refreshExpiresAt;
data.sigExpiresAt = sigExpiresAt;
return data;
}
}
| 1,742 |
872 | <reponame>krishna13052001/LeetCode
#!/usr/bin/python3
"""
Return the root node of a binary search tree that matches the given preorder
traversal.
(Recall that a binary search tree is a binary tree where for every node, any
descendant of node.left has a value < node.val, and any descendant of node.right
has a value > node.val. Also recall that a preorder traversal displays the
value of the node first, then traverses node.left, then traverses node.right.)
Example 1:
Input: [8,5,1,7,10,12]
Output: [8,5,10,1,7,null,12]
Note:
1 <= preorder.length <= 100
The values of preorder are distinct.
"""
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bstFromPreorder2(self, preorder: List[int]) -> TreeNode:
"""
need to be BST
scan the list to break left and right part
F(n) = 2 F(n/2) + O(n), then it is O(n log n)
Make it O(n)
maintain a stack
After walking through example, left child can be determined quickly
since it is pre-order. Left comes first.
Stack maintain a node that is missing right child
decreasing stack
"""
root = TreeNode(preorder[0])
stk = [root]
for a in preorder[1:]:
node = TreeNode(a)
if a < stk[-1].val: # len(stk) always >= 1
stk[-1].left = node
else:
while len(stk) >= 2 and stk[-2].val < a:
stk.pop()
stk[-1].right = node
stk.pop()
stk.append(node)
return root
def bstFromPreorder(self, preorder: List[int]) -> TreeNode:
"""
If a node is a right child (larger), find the proper parent
The proper parent should the deepest in the stack that its val < current val
"""
root = TreeNode(preorder[0])
stk = [root]
for a in preorder[1:]:
node = TreeNode(a)
if a < stk[-1].val:
stk[-1].left = node
else:
while stk and stk[-1].val < a:
pi = stk.pop()
pi.right = node
stk.append(node)
return root
| 1,081 |
4,126 |
#include "Globals.h"
#include "Region.h"
Region::Region()
{
}
Chunk & Region::getRelChunk(int a_RelChunkX, int a_RelChunkZ)
{
ASSERT(a_RelChunkX >= 0);
ASSERT(a_RelChunkZ >= 0);
ASSERT(a_RelChunkX < 32);
ASSERT(a_RelChunkZ < 32);
return m_Chunks[a_RelChunkX + a_RelChunkZ * 32];
}
int Region::getRelBiome(int a_RelBlockX, int a_RelBlockZ)
{
ASSERT(a_RelBlockX >= 0);
ASSERT(a_RelBlockZ >= 0);
ASSERT(a_RelBlockX < 512);
ASSERT(a_RelBlockZ < 512);
int chunkX = a_RelBlockX / 16;
int chunkZ = a_RelBlockZ / 16;
Chunk & chunk = m_Chunks[chunkX + 32 * chunkZ];
if (chunk.isValid())
{
return chunk.getBiome(a_RelBlockX - 16 * chunkX, a_RelBlockZ - 16 * chunkZ);
}
else
{
return biInvalidBiome;
}
}
void Region::blockToRegion(int a_BlockX, int a_BlockZ, int & a_RegionX, int & a_RegionZ)
{
a_RegionX = static_cast<int>(std::floor(static_cast<float>(a_BlockX) / 512));
a_RegionZ = static_cast<int>(std::floor(static_cast<float>(a_BlockZ) / 512));
}
void Region::chunkToRegion(int a_ChunkX, int a_ChunkZ, int & a_RegionX, int & a_RegionZ)
{
a_RegionX = static_cast<int>(std::floor(static_cast<float>(a_ChunkX) / 32));
a_RegionZ = static_cast<int>(std::floor(static_cast<float>(a_ChunkZ) / 32));
}
| 559 |
764 | <filename>src/stable.h
#include <QApplication>
#include <QByteArray>
#include <QDebug>
#include <QEvent>
#include <QFile>
#include <QIODevice>
#include <QList>
#include <QNetworkCookie>
#include <QNetworkProxy>
#include <QNetworkReply>
#include <QNetworkRequest>
#include <QObject>
#include <QResource>
#include <QSet>
#include <QString>
#include <QStringList>
#include <QTcpServer>
#include <QTcpSocket>
#include <QTimer>
#include <QUuid>
#include <QVariant>
#include <QVariantList>
#include <QWebElement>
#include <QWebSettings>
#include <QtNetwork/QNetworkAccessManager>
#include <QtNetwork/QNetworkCookie>
#include <QtNetwork/QNetworkCookieJar>
#include <QtNetwork/QNetworkReply>
#include <QtNetwork/QNetworkRequest>
#include <QtNetwork>
#if QT_VERSION >= QT_VERSION_CHECK(5, 0, 0)
#include <QtWebKitWidgets>
#else
#include <QtWebKit>
#endif
#include <cmath>
#include <fstream>
#include <iostream>
#include <sstream>
| 345 |
435 | <reponame>amaajemyfren/data
{
"copyright_text": "Standard YouTube License",
"description": "Project Jupyter provides building blocks for interactive and exploratory computing. These building blocks make science and data science reproducible across over 40 programming language (Python, Julia, R, etc.). Central to the project is the Jupyter Notebook, a web-based interactive computing platform that allows users to author data- and code-driven narratives - computational narratives - that combine live code, equations, narrative text, visualizations, interactive dashboards and other media.\n\nWhile the Jupyter Notebook has proved to be an incredibly productive way of working with code and data interactively, it is helpful to decompose notebooks into more primitive building blocks: kernels for code execution, input areas for typing code, markdown cells for composing narrative content, output areas for showing results, terminals, etc. The fundamental idea of JupyterLab is to offer a user interface that allows users to assemble these building blocks in different ways to support interactive workflows that include, but go far beyond, Jupyter Notebooks.\n\nJupyterLab accomplishes this by providing a modular and extensible user interface that exposes these building blocks in the context of a powerful work space. Users can arrange multiple notebooks, text editors, terminals, output areas, etc. on a single page with multiple panels, tabs, splitters, and collapsible sidebars with a file browser, command palette and integrated help system. The codebase and UI of JupyterLab is based on a flexible plugin system that makes it easy to extend with new components.\n\nIn this talk, we will demonstrate the JupyterLab interface, its codebase, and describe how it fits within the overall roadmap of the project.",
"duration": 1770,
"id": 5341,
"language": "eng",
"recorded": "2016-07-15",
"related_urls": [],
"slug": "jupyterlab-building-blocks-for-interactive-computing-scipy-2016-brian-granger",
"speakers": [
"<NAME>",
"<NAME>"
],
"tags": [
"SciPy 2016",
"jupyter",
"jupyterlab",
"jupyter notebook"
],
"thumbnail_url": "https://i.ytimg.com/vi/Ejh0ftSjk6g/maxresdefault.jpg",
"title": "JupyterLab: Building Blocks for Interactive Computing",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=Ejh0ftSjk6g"
}
]
}
| 671 |
371 | package com.risk.riskmanage.datamanage.model;
import java.io.Serializable;
import java.util.Date;
import com.risk.riskmanage.common.model.BasePage;
public class FieldTypeUser extends BasePage implements Serializable {
private static final long serialVersionUID = 1L;
/**
* 主键
* */
private Integer id;
/**
* 字段类型编号(表主键)
* */
private Integer fieldTypeId;
/**
* 该字段类型归属的组织编号
* */
private Long organId;
/**
* 该字段类型归属的引擎id(表主键)
* */
private Integer engineId;
/**
* 创建或修改该字段的用户编号
* */
private Long userId;
/**
* 创建时间
* */
private Date created;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Integer getFieldTypeId() {
return fieldTypeId;
}
public void setFieldTypeId(Integer fieldTypeId) {
this.fieldTypeId = fieldTypeId;
}
public Long getOrganId() {
return organId;
}
public void setOrganId(Long organId) {
this.organId = organId;
}
public Integer getEngineId() {
return engineId;
}
public void setEngineId(Integer engineId) {
this.engineId = engineId;
}
public Long getUserId() {
return userId;
}
public void setUserId(Long userId) {
this.userId = userId;
}
public Date getCreated() {
return created;
}
public void setCreated(Date created) {
this.created = created;
}
}
| 613 |
369 | <filename>inc/osvr/Common/RawSenderType.h
/** @file
@brief Header
@date 2015
@author
Sensics, Inc.
<http://sensics.com/osvr>
*/
// Copyright 2015 Sensics, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef INCLUDED_RawSenderType_h_GUID_DF724F49_FE2F_4EB4_8CFC_8C16424400EC
#define INCLUDED_RawSenderType_h_GUID_DF724F49_FE2F_4EB4_8CFC_8C16424400EC
// Internal Includes
#include <osvr/Common/Export.h>
#include <osvr/Util/StdInt.h>
// Library/third-party includes
#include <boost/optional.hpp>
// Standard includes
// - none
namespace osvr {
namespace common {
/// @brief Type-safe wrapper with built-in default for a VRPN "sender type"
/// integer.
///
/// @todo add test code to ensure that the default value matches
/// vrpn_ANY_SENDER as found in vrpn_Connection.h
class RawSenderType {
public:
typedef int32_t UnderlyingSenderType;
/// @brief Default constructor - "any sender"
RawSenderType();
/// @brief Constructor from a registered sender
OSVR_COMMON_EXPORT explicit RawSenderType(UnderlyingSenderType sender);
/// @brief Gets the registered sender value or default
UnderlyingSenderType get() const;
/// @brief Gets the registered sender value, if specified, otherwise
/// returns the provided value.
UnderlyingSenderType getOr(UnderlyingSenderType valueIfNotSet) const;
private:
boost::optional<UnderlyingSenderType> m_sender;
};
} // namespace common
} // namespace osvr
#endif // INCLUDED_RawSenderType_h_GUID_DF724F49_FE2F_4EB4_8CFC_8C16424400EC
| 767 |
988 | <filename>src/isql/OptionsBase.h
/*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by <NAME> on 7-Oct-2007
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2007 <NAME>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef FB_OPTIONSBASE_H
#define FB_OPTIONSBASE_H
#include <stdio.h>
class OptionsBase
{
public:
struct optionsMap
{
int kw;
const char* text;
size_t abbrlen;
};
OptionsBase(const optionsMap* inmap, size_t insize, int wrongval);
int getCommand(const char* cmd) const;
void showCommands(FILE* out) const;
private:
const optionsMap* const m_options;
const size_t m_size;
int m_wrong;
};
inline OptionsBase::OptionsBase(const optionsMap* inmap, size_t insize, int wrongval)
: m_options(inmap), m_size(insize), m_wrong(wrongval)
{
}
#endif // FB_OPTIONSBASE_H
| 470 |
14,668 | <reponame>zealoussnow/chromium<filename>google_apis/gcm/engine/connection_handler.cc
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "google_apis/gcm/engine/connection_handler.h"
namespace gcm {
ConnectionHandler::ConnectionHandler() {
}
ConnectionHandler::~ConnectionHandler() {
}
} // namespace gcm
| 131 |
303 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import math
from typing import Optional, List
import torch
import torch.nn as nn
from fastfold.model.nn.primitives import Linear, LayerNorm, Attention
from fastfold.model.nn.dropout import (
DropoutRowwise,
DropoutColumnwise,
)
from fastfold.model.nn.pair_transition import PairTransition
from fastfold.model.nn.triangular_attention import (
TriangleAttentionStartingNode,
TriangleAttentionEndingNode,
)
from fastfold.model.nn.triangular_multiplicative_update import (
TriangleMultiplicationOutgoing,
TriangleMultiplicationIncoming,
)
from fastfold.utils.checkpointing import checkpoint_blocks
from fastfold.utils.tensor_utils import (
chunk_layer,
permute_final_dims,
flatten_final_dims,
)
class TemplatePointwiseAttention(nn.Module):
"""
Implements Algorithm 17.
"""
def __init__(self, c_t, c_z, c_hidden, no_heads, inf, **kwargs):
"""
Args:
c_t:
Template embedding channel dimension
c_z:
Pair embedding channel dimension
c_hidden:
Hidden channel dimension
"""
super(TemplatePointwiseAttention, self).__init__()
self.c_t = c_t
self.c_z = c_z
self.c_hidden = c_hidden
self.no_heads = no_heads
self.inf = inf
self.mha = Attention(
self.c_z,
self.c_t,
self.c_t,
self.c_hidden,
self.no_heads,
gating=False,
)
def _chunk(self,
z: torch.Tensor,
t: torch.Tensor,
biases: List[torch.Tensor],
chunk_size: int,
) -> torch.Tensor:
mha_inputs = {
"q_x": z,
"kv_x": t,
"biases": biases,
}
return chunk_layer(
self.mha,
mha_inputs,
chunk_size=chunk_size,
no_batch_dims=len(z.shape[:-2]),
)
def forward(self,
t: torch.Tensor,
z: torch.Tensor,
template_mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None
) -> torch.Tensor:
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
z:
[*, N_res, N_res, C_t] pair embedding
template_mask:
[*, N_templ] template mask
Returns:
[*, N_res, N_res, C_z] pair embedding update
"""
if template_mask is None:
template_mask = t.new_ones(t.shape[:-3])
bias = self.inf * (template_mask[..., None, None, None, None, :] - 1)
# [*, N_res, N_res, 1, C_z]
z = z.unsqueeze(-2)
# [*, N_res, N_res, N_temp, C_t]
t = permute_final_dims(t, (1, 2, 0, 3))
# [*, N_res, N_res, 1, C_z]
biases = [bias]
if chunk_size is not None:
z = self._chunk(z, t, biases, chunk_size)
else:
z = self.mha(q_x=z, kv_x=t, biases=biases)
# [*, N_res, N_res, C_z]
z = z.squeeze(-2)
return z
class TemplatePairStackBlock(nn.Module):
def __init__(
self,
c_t: int,
c_hidden_tri_att: int,
c_hidden_tri_mul: int,
no_heads: int,
pair_transition_n: int,
dropout_rate: float,
inf: float,
**kwargs,
):
super(TemplatePairStackBlock, self).__init__()
self.c_t = c_t
self.c_hidden_tri_att = c_hidden_tri_att
self.c_hidden_tri_mul = c_hidden_tri_mul
self.no_heads = no_heads
self.pair_transition_n = pair_transition_n
self.dropout_rate = dropout_rate
self.inf = inf
self.dropout_row = DropoutRowwise(self.dropout_rate)
self.dropout_col = DropoutColumnwise(self.dropout_rate)
self.tri_att_start = TriangleAttentionStartingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_att_end = TriangleAttentionEndingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_mul_out = TriangleMultiplicationOutgoing(
self.c_t,
self.c_hidden_tri_mul,
)
self.tri_mul_in = TriangleMultiplicationIncoming(
self.c_t,
self.c_hidden_tri_mul,
)
self.pair_transition = PairTransition(
self.c_t,
self.pair_transition_n,
)
def forward(self,
z: torch.Tensor,
mask: torch.Tensor,
chunk_size: Optional[int] = None,
_mask_trans: bool = True
):
single_templates = [
t.unsqueeze(-4) for t in torch.unbind(z, dim=-4)
]
single_templates_masks = [
m.unsqueeze(-3) for m in torch.unbind(mask, dim=-3)
]
for i in range(len(single_templates)):
single = single_templates[i]
single_mask = single_templates_masks[i]
single = single + self.dropout_row(
self.tri_att_start(
single,
chunk_size=chunk_size,
mask=single_mask
)
)
single = single + self.dropout_col(
self.tri_att_end(
single,
chunk_size=chunk_size,
mask=single_mask
)
)
single = single + self.dropout_row(
self.tri_mul_out(
single,
mask=single_mask
)
)
single = single + self.dropout_row(
self.tri_mul_in(
single,
mask=single_mask
)
)
single = single + self.pair_transition(
single,
mask=single_mask if _mask_trans else None,
chunk_size=chunk_size,
)
single_templates[i] = single
z = torch.cat(single_templates, dim=-4)
return z
class TemplatePairStack(nn.Module):
"""
Implements Algorithm 16.
"""
def __init__(
self,
c_t,
c_hidden_tri_att,
c_hidden_tri_mul,
no_blocks,
no_heads,
pair_transition_n,
dropout_rate,
blocks_per_ckpt,
inf=1e9,
**kwargs,
):
"""
Args:
c_t:
Template embedding channel dimension
c_hidden_tri_att:
Per-head hidden dimension for triangular attention
c_hidden_tri_att:
Hidden dimension for triangular multiplication
no_blocks:
Number of blocks in the stack
pair_transition_n:
Scale of pair transition (Alg. 15) hidden dimension
dropout_rate:
Dropout rate used throughout the stack
blocks_per_ckpt:
Number of blocks per activation checkpoint. None disables
activation checkpointing
"""
super(TemplatePairStack, self).__init__()
self.blocks_per_ckpt = blocks_per_ckpt
self.blocks = nn.ModuleList()
for _ in range(no_blocks):
block = TemplatePairStackBlock(
c_t=c_t,
c_hidden_tri_att=c_hidden_tri_att,
c_hidden_tri_mul=c_hidden_tri_mul,
no_heads=no_heads,
pair_transition_n=pair_transition_n,
dropout_rate=dropout_rate,
inf=inf,
)
self.blocks.append(block)
self.layer_norm = LayerNorm(c_t)
def forward(
self,
t: torch.tensor,
mask: torch.tensor,
chunk_size: int,
_mask_trans: bool = True,
):
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
mask:
[*, N_templ, N_res, N_res] mask
Returns:
[*, N_templ, N_res, N_res, C_t] template embedding update
"""
if(mask.shape[-3] == 1):
expand_idx = list(mask.shape)
expand_idx[-3] = t.shape[-4]
mask = mask.expand(*expand_idx)
t, = checkpoint_blocks(
blocks=[
partial(
b,
mask=mask,
chunk_size=chunk_size,
_mask_trans=_mask_trans,
)
for b in self.blocks
],
args=(t,),
blocks_per_ckpt=self.blocks_per_ckpt if self.training else None,
)
t = self.layer_norm(t)
return t
| 5,097 |
1,040 | // Copyright (c) <NAME>
// Licensed under the MIT License
// ==============================================================
// ORBITER MODULE: ScriptVessel
// Part of the ORBITER SDK
//
// ScriptVessel.cpp
// Control module for ScriptVessel vessel class
//
// Notes:
// Implementation of a generic vessel class that acts as an
// interface to script-driven vessel definitions.
// This class creates an interpreter instance, loads a vessel class-
// specific script and and implements the VESSEL2 callback functions
// by calling corresponding script functions.
// ==============================================================
#define STRICT
#define ORBITER_MODULE
extern "C" {
#include "Lua\lua.h"
}
#include "orbitersdk.h"
const int NCLBK = 4;
const int SETCLASSCAPS = 0;
const int POSTCREATION = 1;
const int PRESTEP = 2;
const int POSTSTEP = 3;
const char *CLBKNAME[NCLBK] = {
"setclasscaps", "postcreation", "prestep", "poststep"
};
// Calculate lift coefficient [Cl] as a function of aoa (angle of attack) over -Pi ... Pi
// Implemented here as a piecewise linear function
double LiftCoeff (double aoa)
{
int i;
const int nlift = 9;
static const double AOA[nlift] = {-180*RAD,-60*RAD,-30*RAD,-1*RAD,15*RAD,20*RAD,25*RAD,60*RAD,180*RAD};
static const double CL[nlift] = { 0, 0, -0.1, 0, 0.2, 0.25, 0.2, 0, 0};
static const double SCL[nlift] = {(CL[1]-CL[0])/(AOA[1]-AOA[0]), (CL[2]-CL[1])/(AOA[2]-AOA[1]),
(CL[3]-CL[2])/(AOA[3]-AOA[2]), (CL[4]-CL[3])/(AOA[4]-AOA[3]),
(CL[5]-CL[4])/(AOA[5]-AOA[4]), (CL[6]-CL[5])/(AOA[6]-AOA[5]),
(CL[7]-CL[6])/(AOA[7]-AOA[6]), (CL[8]-CL[7])/(AOA[8]-AOA[7])};
for (i = 0; i < nlift-1 && AOA[i+1] < aoa; i++);
return CL[i] + (aoa-AOA[i])*SCL[i];
}
// ==============================================================
// ScriptVessel class interface
// ==============================================================
class ScriptVessel: public VESSEL2 {
public:
ScriptVessel (OBJHANDLE hVessel, int flightmodel);
~ScriptVessel ();
void clbkSetClassCaps (FILEHANDLE cfg);
void clbkPostCreation ();
void clbkPreStep (double simt, double simdt, double mjd);
void clbkPostStep (double simt, double simdt, double mjd);
protected:
INTERPRETERHANDLE hInterp;
lua_State *L;
bool bclbk[NCLBK];
char func[256];
};
// ==============================================================
// Constructor/destructor
// ==============================================================
ScriptVessel::ScriptVessel (OBJHANDLE hVessel, int flightmodel): VESSEL2 (hVessel, flightmodel)
{
// create the interpreter instance to run the vessel script
hInterp = oapiCreateInterpreter();
L = oapiGetLua (hInterp);
strcpy (func, "clbk_");
}
ScriptVessel::~ScriptVessel ()
{
// delete the interpreter instance
oapiDelInterpreter (hInterp);
}
// ==============================================================
// Overloaded callback functions
// ==============================================================
// --------------------------------------------------------------
// Set the capabilities of the vessel class
// --------------------------------------------------------------
void ScriptVessel::clbkSetClassCaps (FILEHANDLE cfg)
{
char script[256], cmd[256];
int i;
// Load the vessel script
oapiReadItem_string (cfg, "Script", script);
sprintf (cmd, "run_global('Config/Vessels/%s')", script);
oapiExecScriptCmd (hInterp, cmd);
// Define the vessel instance
lua_pushlightuserdata (L, GetHandle()); // push vessel handle
lua_setfield (L, LUA_GLOBALSINDEX, "hVessel");
strcpy (cmd, "vi = vessel.get_interface(hVessel)");
oapiExecScriptCmd (hInterp, cmd);
// check for defined callback functions in script
for (i = 0; i < NCLBK; i++) {
strcpy (func+5, CLBKNAME[i]);
lua_getfield (L, LUA_GLOBALSINDEX, func);
bclbk[i] = (lua_isfunction (L,-1) != 0);
lua_pop(L,1);
}
// Run the SetClassCaps function
if (bclbk[SETCLASSCAPS]) {
strcpy (func+5, "setclasscaps");
lua_getfield (L, LUA_GLOBALSINDEX, func);
lua_pushlightuserdata (L, cfg);
lua_call (L, 1, 0);
}
}
void ScriptVessel::clbkPostCreation ()
{
if (bclbk[POSTCREATION]) {
strcpy (func+5, "postcreation");
lua_getfield (L, LUA_GLOBALSINDEX, func);
lua_call (L, 0, 0);
}
}
void ScriptVessel::clbkPreStep (double simt, double simdt, double mjd)
{
if (bclbk[PRESTEP]) {
strcpy (func+5, "prestep");
lua_getfield (L, LUA_GLOBALSINDEX, func);
lua_pushnumber(L,simt);
lua_pushnumber(L,simdt);
lua_pushnumber(L,mjd);
lua_call (L, 3, 0);
}
}
void ScriptVessel::clbkPostStep (double simt, double simdt, double mjd)
{
if (bclbk[POSTSTEP]) {
strcpy (func+5, "poststep");
lua_getfield (L, LUA_GLOBALSINDEX, func);
lua_pushnumber(L,simt);
lua_pushnumber(L,simdt);
lua_pushnumber(L,mjd);
lua_call (L, 3, 0);
}
}
// ==============================================================
// API callback interface
// ==============================================================
// --------------------------------------------------------------
// Vessel initialisation
// --------------------------------------------------------------
DLLCLBK VESSEL *ovcInit (OBJHANDLE hvessel, int flightmodel)
{
return new ScriptVessel (hvessel, flightmodel);
}
// --------------------------------------------------------------
// Vessel cleanup
// --------------------------------------------------------------
DLLCLBK void ovcExit (VESSEL *vessel)
{
if (vessel) delete (ScriptVessel*)vessel;
}
| 2,030 |
4,197 | {
"name": "cvat-canvas3d",
"version": "0.0.1",
"description": "Part of Computer Vision Annotation Tool which presents its canvas3D library",
"main": "src/canvas3d.ts",
"scripts": {
"build": "tsc && webpack --config ./webpack.config.js",
"server": "nodemon --watch config --exec 'webpack-dev-server --config ./webpack.config.js --mode=development --open'"
},
"author": "Intel",
"license": "MIT",
"browserslist": [
"Chrome >= 63",
"Firefox > 58",
"not IE 11",
"> 2%"
],
"devDependencies": {},
"dependencies": {
"@types/three": "^0.125.3",
"camera-controls": "^1.25.3",
"three": "^0.126.1"
}
}
| 272 |
5,076 | <reponame>zhiqiang-hu/duilib
#ifndef CHATDIALOG_HPP
#define CHATDIALOG_HPP
#include "skin_change_event.hpp"
#include "UIFriends.hpp"
class ChatDialog : public WindowImplBase, public SkinChangedReceiver
{
public:
ChatDialog(const CDuiString& bgimage, DWORD bkcolor, const FriendListItemInfo& myselft_info, const FriendListItemInfo& friend_info);
~ChatDialog();
public:
LPCTSTR GetWindowClassName() const;
virtual void OnFinalMessage(HWND hWnd);
virtual void InitWindow();
virtual LRESULT ResponseDefaultKeyEvent(WPARAM wParam);
virtual CDuiString GetSkinFile();
virtual CDuiString GetSkinFolder();
virtual CControlUI* CreateControl(LPCTSTR pstrClass);
virtual LRESULT HandleMessage(UINT uMsg, WPARAM wParam, LPARAM lParam);
virtual LRESULT OnSysCommand(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
virtual LRESULT OnClose(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
virtual LRESULT OnSetFocus(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
virtual BOOL Receive(SkinChangedParam param);
virtual LRESULT HandleCustomMessage(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
void SetTextColor(DWORD dwColor);
void SendMsg();
protected:
void Notify(TNotifyUI& msg);
void OnPrepare(TNotifyUI& msg);
void OnExit(TNotifyUI& msg);
void OnTimer(TNotifyUI& msg);
private:
void FontStyleChanged();
private:
bool emotion_timer_start_;
bool bold_;
bool italic_;
bool underline_;
DWORD text_color_;
DWORD font_size_;
CDuiString font_face_name_;
CDuiString bgimage_;
DWORD bkcolor_;
FriendListItemInfo myselft_;
FriendListItemInfo friend_;
};
#endif // CHARTDIALOG_HPP | 696 |
372 | <filename>lsass/interop/nsswitch/freebsd/nss-main.c
/* Editor Settings: expandtabs and use 4 spaces for indentation
* ex: set softtabstop=4 tabstop=8 expandtab shiftwidth=4: *
* -*- mode: c, c-basic-offset: 4 -*- */
/*
* Copyright © BeyondTrust Software 2004 - 2019
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS
* WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH
* BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT
* SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE,
* NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST
* A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT
* BEYONDTRUST AT beyondtrust.com/contact
*/
/*
* Copyright (C) BeyondTrust Software. All rights reserved.
*
* Module Name:
*
* nss-main.c
*
* Abstract:
*
* Name Server Switch (BeyondTrust LSASS)
*
* Main Entry Points
*
* Authors: <NAME> (<EMAIL>)
* <NAME> (<EMAIL>)
*
*/
#include "lsanss.h"
#include "nss-user.h"
#include "nss-group.h"
#include "externs.h"
#if defined(__LWI_FREEBSD__)
static
int
LsaNSSFindUserById(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSFindUserByName(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSBeginEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSEndEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSFindGroupById(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSFindGroupByName(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSBeginEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSEndEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
);
static
int
LsaNSSGetGroupMembership(
PVOID pResult,
PVOID pData,
va_list ap
);
ns_mtab *
nss_module_register(
const char* pszModName,
unsigned int* pLen,
nss_module_unregister_fn* pFnUnregister
)
{
static ns_mtab fnTable[] =
{
{ "passwd", "<PASSWORD>", &LsaNSSFindUserById, 0},
{ "passwd", "<PASSWORD>", &LsaNSSFindUserByName, 0},
{ "passwd", "<PASSWORD>", &LsaNSSBeginEnumUsers, 0},
{ "passwd", "<PASSWORD>", &LsaNSSEnumUsers, 0},
{ "passwd", "<PASSWORD>", &LsaNSSEndEnumUsers, 0},
{ "group", "getgrnam_r", &LsaNSSFindGroupByName, 0},
{ "group", "getgrgid_r", &LsaNSSFindGroupById, 0},
{ "group", "setgrent", &LsaNSSBeginEnumGroups, 0},
{ "group", "getgrent_r", &LsaNSSEnumGroups, 0},
{ "group", "endgrent", &LsaNSSEndEnumGroups, 0},
{ "group", "getgroupmembership", &LsaNSSGetGroupMembership, 0}
};
*pLen = sizeof(fnTable)/sizeof(fnTable[0]);
*pFnUnregister = NULL;
return fnTable;
}
static
int
LsaNSSFindUserById(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
uid_t uid = 0;
struct passwd* pResultUser = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = 0;
uid = va_arg(ap, uid_t);
pResultUser = (struct passwd*)va_arg(ap, struct passwd *);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int *);
ret = _nss_lsass_getpwuid_r(
uid,
pResultUser,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct passwd**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultUser;
}
return ret;
}
static
int
LsaNSSFindUserByName(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
PCSTR pszLoginId = NULL;
struct passwd* pResultUser = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = NULL;
pszLoginId = va_arg(ap, PCSTR);
pResultUser = (struct passwd*)va_arg(ap, struct passwd *);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int *);
ret = _nss_lsass_getpwnam_r(
pszLoginId,
pResultUser,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct passwd**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultUser;
}
return ret;
}
static
int
LsaNSSBeginEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = _nss_lsass_setpwent();
return ret;
}
static
int
LsaNSSEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
struct passwd* pResultUser = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = NULL;
pResultUser = (struct passwd*)va_arg(ap, struct passwd*);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int*);
ret = _nss_lsass_getpwent_r(
pResultUser,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct passwd**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultUser;
}
return ret;
}
static
int
LsaNSSEndEnumUsers(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = _nss_lsass_endpwent();
return ret;
}
static
int
LsaNSSFindGroupById(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
gid_t gid = 0;
struct group* pResultGroup = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = 0;
gid = va_arg(ap, gid_t);
pResultGroup = (struct group*)va_arg(ap, struct group *);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int *);
ret = _nss_lsass_getgrgid_r(
gid,
pResultGroup,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct group**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultGroup;
}
return ret;
}
static
int
LsaNSSFindGroupByName(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
PCSTR pszGroupName = NULL;
struct group* pResultGroup = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = NULL;
pszGroupName = va_arg(ap, PCSTR);
pResultGroup = (struct group*)va_arg(ap, struct group *);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int *);
ret = _nss_lsass_getgrnam_r(
pszGroupName,
pResultGroup,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct group**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultGroup;
}
return ret;
}
static
int
LsaNSSBeginEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = _nss_lsass_setgrent();
return ret;
}
static
int
LsaNSSEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0;
struct group* pResultGroup = NULL;
PSTR pszBuf = NULL;
size_t stBufLen = 0;
PINT pErrorNumber = NULL;
pResultGroup = (struct group*)va_arg(ap, struct group*);
pszBuf = (PSTR)va_arg(ap, char*);
stBufLen = (size_t)va_arg(ap, size_t);
pErrorNumber = (PINT)va_arg(ap, int*);
ret = _nss_lsass_getgrent_r(
pResultGroup,
pszBuf,
stBufLen,
pErrorNumber);
if(pResult)
{
*((struct group**)pResult) = ret != NSS_STATUS_SUCCESS ? NULL : pResultGroup;
}
return ret;
}
static
int
LsaNSSEndEnumGroups(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = _nss_lsass_endgrent();
return ret;
}
static
int
LsaNSSGetGroupMembership(
PVOID pResult,
PVOID pData,
va_list ap
)
{
int ret = 0, err = 0;
PCSTR pszUserName = va_arg(ap, PCSTR);
__attribute__((unused)) gid_t groupGid = va_arg(ap, gid_t);
gid_t* pResultGids = va_arg(ap, gid_t*);
size_t maxResultGids = (size_t) va_arg(ap, int);
PINT pNumResultGids = va_arg(ap, PINT);
size_t myResultsSize = *pNumResultGids;
ret = LsaNssCommonGroupGetGroupsByUserName(
&lsaConnection,
pszUserName,
*pNumResultGids,
maxResultGids,
&myResultsSize,
pResultGids,
&err);
if (myResultsSize > maxResultGids)
myResultsSize = maxResultGids;
*pNumResultGids = (int) myResultsSize;
errno = err;
return ret;
}
#endif /* __LWI__FREEBSD__ */
| 4,542 |
14,668 | def main(request, response):
response.writer.write_status(200)
response.writer.write_header(b"Content-Type", b"text/plain")
response.writer.end_headers()
response.writer.write(str(request.raw_headers))
response.close_connection = True
| 87 |
2,671 | print object.__bases__
print object.__mro__
class X(object): pass
class Y(X): pass
print(X.__bases__)
print(X.__mro__)
print(Y.__bases__)
print(Y.__mro__)
| 70 |
6,224 | /*
* Copyright (c) 2021 <NAME> <<EMAIL>>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_IPC_SERVICE_IPC_STATIC_VRINGS_H_
#define ZEPHYR_INCLUDE_IPC_SERVICE_IPC_STATIC_VRINGS_H_
#include <ipc/ipc_service.h>
#include <openamp/open_amp.h>
#include <metal/device.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief IPC service static VRINGs API
* @defgroup ipc_service_static_vrings_api IPC service static VRINGs API
* @{
*/
/** Number of used VRING buffers. */
#define VRING_COUNT (2)
/** VRING alignment. */
#define VRING_ALIGNMENT CONFIG_IPC_SERVICE_STATIC_VRINGS_ALIGNMENT
/**
* @typedef ipc_notify_cb
* @brief Define the notify callback.
*
* This callback is defined at instance level and it is called on virtqueue notify.
*
* @param vq Virtqueue.
* @param priv Priv data.
*/
typedef void (*ipc_notify_cb)(struct virtqueue *vq, void *priv);
/** @brief Static VRINGs structure.
*
* Struct used to represent and carry information about static allocation of VRINGs.
*/
struct ipc_static_vrings {
/** virtIO device. */
struct virtio_device vdev;
/** SHM physmap. */
metal_phys_addr_t shm_physmap[1];
/** SHM device. */
struct metal_device shm_device;
/** SHM and addresses. */
uintptr_t status_reg_addr;
/** TX VRING address. */
uintptr_t tx_addr;
/** RX VRING address. */
uintptr_t rx_addr;
/** VRING size. */
size_t vring_size;
/** Shared memory region address. */
uintptr_t shm_addr;
/** Share memory region size. */
size_t shm_size;
/** SHM IO region. */
struct metal_io_region *shm_io;
/** VRINGs */
struct virtio_vring_info rvrings[VRING_COUNT];
/** Virtqueues. */
struct virtqueue *vq[VRING_COUNT];
/** Private data to be passed to the notify callback. */
void *priv;
/** Notify callback. */
ipc_notify_cb notify_cb;
};
/** @brief Init the static VRINGs.
*
* Init VRINGs and Virtqueues of an OpenAMP / RPMsg instance.
*
* @param vr Pointer to the VRINGs instance struct.
* @param role Master / Remote role.
*
* @retval -EINVAL When some parameter is missing.
* @retval -ENOMEM When memory is not enough for VQs allocation.
* @retval 0 If successful.
* @retval Other errno codes depending on the OpenAMP implementation.
*/
int ipc_static_vrings_init(struct ipc_static_vrings *vr, unsigned int role);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_IPC_SERVICE_IPC_STATIC_VRINGS_H_ */
| 909 |
852 | <filename>Validation/RecoTau/plugins/ZllArbitrator.cc
#include "FWCore/Framework/interface/global/EDProducer.h"
#include "FWCore/Framework/interface/Event.h"
#include "FWCore/Framework/interface/MakerMacros.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "FWCore/Utilities/interface/InputTag.h"
#include "DataFormats/Common/interface/View.h"
#include "DataFormats/Math/interface/deltaR.h"
#include "DataFormats/MuonReco/interface/Muon.h"
#include "DataFormats/EgammaCandidates/interface/Electron.h"
#include "DataFormats/EgammaCandidates/interface/GsfElectron.h"
#include "DataFormats/EgammaCandidates/interface/Photon.h"
#include "DataFormats/JetReco/interface/Jet.h"
#include "DataFormats/TauReco/interface/PFTau.h"
#include "DataFormats/TauReco/interface/PFTauDiscriminator.h"
#include "DataFormats/TrackReco/interface/Track.h"
#include "CommonTools/Utils/interface/StringCutObjectSelector.h"
#include <algorithm>
#include <memory>
#include <vector>
////////////////////////////////////////////////////////////////////////////////
// class definition
////////////////////////////////////////////////////////////////////////////////
class ZllArbitrator : public edm::global::EDProducer<> {
public:
explicit ZllArbitrator(edm::ParameterSet const&);
void produce(edm::StreamID, edm::Event&, edm::EventSetup const&) const override;
private:
edm::EDGetTokenT<std::vector<reco::CompositeCandidate>> srcZCand_;
};
////////////////////////////////////////////////////////////////////////////////
// construction/destruction
////////////////////////////////////////////////////////////////////////////////
ZllArbitrator::ZllArbitrator(edm::ParameterSet const& iConfig)
: srcZCand_{consumes<std::vector<reco::CompositeCandidate>>(
iConfig.getParameter<edm::InputTag>("ZCandidateCollection"))} {
produces<std::vector<reco::CompositeCandidate>>();
}
////////////////////////////////////////////////////////////////////////////////
// implementation of member functions
////////////////////////////////////////////////////////////////////////////////
void ZllArbitrator::produce(edm::StreamID, edm::Event& iEvent, edm::EventSetup const&) const {
edm::Handle<std::vector<reco::CompositeCandidate>> zCandidates;
iEvent.getByToken(srcZCand_, zCandidates);
auto bestZ = std::make_unique<std::vector<reco::CompositeCandidate>>();
if (!zCandidates->empty()) {
// If you're going to hard-code numbers, at least make them constexpr.
double constexpr ZmassPDG{91.18}; // GeV
auto bestZCand = std::min_element(
std::cbegin(*zCandidates), std::cend(*zCandidates), [ZmassPDG](auto const& firstCand, auto const& secondCand) {
return std::abs(firstCand.mass() - ZmassPDG) < std::abs(secondCand.mass() - ZmassPDG);
});
bestZ->push_back(*bestZCand);
}
iEvent.put(std::move(bestZ));
}
using BestMassZArbitrationProducer = ZllArbitrator;
DEFINE_FWK_MODULE(BestMassZArbitrationProducer);
| 935 |
541 | <reponame>michelcareau/DSpace
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.xmlworkflow.state;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.dspace.core.Context;
import org.dspace.xmlworkflow.Role;
import org.dspace.xmlworkflow.WorkflowConfigurationException;
import org.dspace.xmlworkflow.state.actions.ActionResult;
import org.dspace.xmlworkflow.storedcomponents.XmlWorkflowItem;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Contains all the steps involved in a certain configured workflow.
*
* @author <NAME> (bram.deschouwer at dot com)
* @author <NAME> (kevin at atmire dot com)
* @author <NAME> (ben at atmire dot com)
* @author <NAME> (markd at atmire dot com)
*/
public class Workflow implements BeanNameAware {
private String id;
private Step firstStep;
private List<Step> steps;
public Step getFirstStep() {
return firstStep;
}
/**
* Get the name of this Workflow.
* @return the name.
* @see setBeanName
*/
public String getID() {
return id;
}
/**
* Return a step with a given name.
* @param stepID name of the Step to find.
* @return the identified Step.
* @throws WorkflowConfigurationException if the named step is not found.
*/
public Step getStep(String stepID) throws WorkflowConfigurationException {
for (Step step : steps) {
if (step.getId().equals(stepID)) {
return step;
}
}
throw new WorkflowConfigurationException("Step definition not found for: " + stepID);
}
/**
* Find the Step that follows a given Step given an outcome.
* @param context
* @param wfi the item whose steps are consulted.
* @param currentStep the step being consulted.
* @param outcome the outcome of {@link currentStep}.
* @return the next step.
* @throws WorkflowConfigurationException if the next step is invalid.
* @throws SQLException passed through.
*/
public Step getNextStep(Context context, XmlWorkflowItem wfi, Step currentStep, int outcome)
throws WorkflowConfigurationException, SQLException {
Step nextStep = currentStep.getNextStep(outcome);
if (nextStep != null) {
if (nextStep.isValidStep(context, wfi)) {
return nextStep;
} else {
return getNextStep(context, wfi, nextStep, ActionResult.OUTCOME_COMPLETE);
}
} else {
//No next step, archive it
return null;
}
}
@Autowired(required = true)
public void setFirstStep(Step firstStep) {
firstStep.setWorkflow(this);
this.firstStep = firstStep;
}
/**
* Get the steps that need to be executed in this workflow before the item is archived
* @return the workflow steps
*/
public List<Step> getSteps() {
return steps;
}
/**
* Set the steps that need to be executed in this workflow before the item is archived
* @param steps the workflow steps
*/
@Autowired(required = true)
public void setSteps(List<Step> steps) {
for (Step step : steps) {
step.setWorkflow(this);
}
this.steps = steps;
}
/**
* Get the roles that are used in this workflow
* @return a map containing the roles, the role name will the key, the role itself the value
*/
public Map<String, Role> getRoles() {
Map<String, Role> roles = new HashMap<>();
for (Step step : steps) {
if (step.getRole() != null) {
roles.put(step.getRole().getId(), step.getRole());
}
}
return roles;
}
@Override
public void setBeanName(String s) {
id = s;
}
}
| 1,586 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package com.sun.star.comp.urlresolver;
import com.sun.star.uno.XComponentContext;
import com.sun.star.comp.helper.Bootstrap;
import com.sun.star.lang.XMultiComponentFactory;
import com.sun.star.bridge.UnoUrlResolver;
import com.sun.star.bridge.XUnoUrlResolver;
import com.sun.star.beans.XPropertySet;
import com.sun.star.uno.UnoRuntime;
//import com.sun.star.connection.NoConnectionException;
/** start the office with these options <br>
soffice -accept=socket,host=localhost,port=8100;urp;
*/
public class UrlResolver_Test
{
public static void main(String[] args) {
try {
XComponentContext xcomponentcontext = Bootstrap.createInitialComponentContext( null );
// initial serviceManager
XMultiComponentFactory xLocalServiceManager = xcomponentcontext.getServiceManager();
// create a connector, so that it can contact the office
XUnoUrlResolver urlResolver
= UnoUrlResolver.create( xcomponentcontext );
Object initialObject = urlResolver.resolve(
"uno:socket,host=localhost,port=8100;urp;StarOffice.ServiceManager" );
XMultiComponentFactory xOfficeFactory= UnoRuntime.queryInterface(
XMultiComponentFactory.class, initialObject );
// retrieve the component context (it's not yet exported from the office)
// Query for the XPropertySet interface.
XPropertySet xProperySet = UnoRuntime.queryInterface(
XPropertySet.class, xOfficeFactory);
// Get the default context from the office server.
Object oDefaultContext = xProperySet.getPropertyValue( "DefaultContext" );
// Query for the interface XComponentContext.
XComponentContext xOfficeComponentContext = UnoRuntime.queryInterface(
XComponentContext.class, oDefaultContext );
// now create the desktop service
// NOTE: use the office component context here !
Object oDesktop = xOfficeFactory.createInstanceWithContext("com.sun.star.frame.Desktop",
xOfficeComponentContext );
} catch(com.sun.star.connection.NoConnectException e) {
System.out.println(e.getMessage());
e.printStackTrace();
} catch(com.sun.star.connection.ConnectionSetupException ce) {
System.out.println(ce.getMessage());
ce.printStackTrace();
} catch(com.sun.star.lang.IllegalArgumentException ie) {
System.out.println(ie.getMessage());
ie.printStackTrace();
} catch(com.sun.star.beans.UnknownPropertyException ue) {
System.out.println(ue.getMessage());
ue.printStackTrace();
} catch(java.lang.Exception ee) {
System.out.println(ee.getMessage());
ee.printStackTrace();
}
}
}
| 1,423 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_HISTORY_CLUSTERS_HISTORY_CLUSTERS_METRICS_LOGGER_H_
#define CHROME_BROWSER_HISTORY_CLUSTERS_HISTORY_CLUSTERS_METRICS_LOGGER_H_
#include "content/public/browser/page.h"
#include "content/public/browser/page_user_data.h"
#include "content/public/browser/web_contents_observer.h"
namespace history_clusters {
// The initial state that describes how an interaction with the HistoryClusters
// UI was started.
//
// Keep in sync with HistoryClustersInitialState in
// tools/metrics/histograms/enums.xml.
enum class HistoryClustersInitialState {
kUnknown = 0,
// The HistoryClusters UI was opened via direct URL, i.e., not opened via any
// other surface/path such as an omnibox action or other UI surface.
kDirectNavigation = 1,
// The HistoryClusters UI was opened indirectly; e.g., using the link the
// chrome://history sidebar.
kIndirectNavigation = 2,
// Add new values above this line.
kMaxValue = kIndirectNavigation,
};
// The final state, or outcome, of an interaction on the HistoryClusters UI.
//
// Keep in sync with HistoryClustersFinalState in enums.xml.
enum class HistoryClustersFinalState {
kUnknown = 0,
// The interaction with the HistoryClusters UI ended with a click on a link.
kLinkClick = 1,
// The UI interaction ended without opening anything on the page.
// TODO(manukh): Currently, clicking on the side bar links (e.g. the link to
// tabs from other devices) will record the final state as `kCloseTab`. We
// should differentiate this case.
kCloseTab = 2,
// Add new values above this line.
kMaxValue = kCloseTab,
};
// HistoryClustersMetricsLogger contains all the metrics/events associated with
// interactions and internals of HistoryClusters in Chrome. It has the same
// lifetime as the page's main document and metrics are flushed when `this` is
// destructed.
class HistoryClustersMetricsLogger
: public content::PageUserData<HistoryClustersMetricsLogger> {
public:
explicit HistoryClustersMetricsLogger(content::Page& page);
~HistoryClustersMetricsLogger() override;
PAGE_USER_DATA_KEY_DECL();
void set_initial_state(HistoryClustersInitialState init_state) {
init_state_ = init_state;
}
void set_final_state(HistoryClustersFinalState final_state) {
final_state_ = final_state;
}
void increment_query_count() { num_queries_++; }
void increment_toggles_to_basic_history() { num_toggles_to_basic_history_++; }
void set_navigation_id(int64_t navigation_id) {
navigation_id_ = navigation_id;
}
void IncrementLinksOpenedCount() { links_opened_count_++; }
private:
// The navigation ID of the navigation handle that this data is associated
// with, used for recording the metrics to UKM.
absl::optional<int64_t> navigation_id_;
// The initial state of how this interaction with the HistoryClusters UI was
// started.
absl::optional<HistoryClustersInitialState> init_state_;
// The final state of how this interaction with the HistoryClusters UI ended.
absl::optional<HistoryClustersFinalState> final_state_;
// The number of queries made on the tracker history clusters event. Only
// queries containing a string should be counted.
int num_queries_ = 0;
// The number of times in this interaction with HistoryClusters included the
// user toggled to the basic History UI from the HistoryClusters UI.
int num_toggles_to_basic_history_ = 0;
// The number of links opened from the HistoryClusters UI. Includes both
// same-tab and new-tab/window navigations. Includes both visit and related
// search links. Does not include sidebar navigations (e.g. 'Clear browsing
// data').
int links_opened_count_ = 0;
};
} // namespace history_clusters
#endif // CHROME_BROWSER_HISTORY_CLUSTERS_HISTORY_CLUSTERS_METRICS_LOGGER_H_
| 1,173 |
14,564 | package com.alibaba.datax.plugin.writer.sqlserverwriter;
import com.alibaba.datax.common.spi.ErrorCode;
public enum SqlServerWriterErrorCode implements ErrorCode {
;
private final String code;
private final String describe;
private SqlServerWriterErrorCode(String code, String describe) {
this.code = code;
this.describe = describe;
}
@Override
public String getCode() {
return this.code;
}
@Override
public String getDescription() {
return this.describe;
}
@Override
public String toString() {
return String.format("Code:[%s], Describe:[%s]. ", this.code,
this.describe);
}
}
| 269 |
493 | <filename>HtmlNativeAndroid/htmlnative-lib/src/main/java/com/mozz/htmlnative/script/FuncParams.java
package com.mozz.htmlnative.script;
/**
* @author <NAME>, 17/3/24.
*/
public interface FuncParams {
}
| 77 |
321 | /**
* Most of the code in the Qalingo project is copyrighted Hoteia and licensed
* under the Apache License Version 2.0 (release version 0.8.0)
* http://www.apache.org/licenses/LICENSE-2.0
*
* Copyright (c) Hoteia, 2012-2014
* http://www.hoteia.com - http://twitter.com/hoteia - <EMAIL>
*
*/
package org.hoteia.qalingo.core.web.mvc.controller.oauth;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import org.hoteia.qalingo.core.Constants;
import org.hoteia.qalingo.core.service.AttributeService;
import org.hoteia.qalingo.core.service.CustomerService;
import org.hoteia.qalingo.core.service.WebManagementService;
import org.hoteia.qalingo.core.service.openid.OpenIdException;
import org.hoteia.qalingo.core.web.mvc.controller.AbstractFrontofficeQalingoController;
import org.scribe.model.Token;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
/**
*
* <p>
* <a href="AbstractOpenIdFrontofficeController.java.html"><i>View Source</i></a>
* </p>
*
* @author <NAME> <a href="http://www.hoteia.com"><i>Hoteia.com</i></a>
*
*/
public abstract class AbstractOAuthFrontofficeController extends AbstractFrontofficeQalingoController {
protected final Logger logger = LoggerFactory.getLogger(getClass());
@Autowired
protected CustomerService customerService;
@Autowired
protected WebManagementService webManagementService;
@Autowired
protected AttributeService attributeService;
protected static final Token EMPTY_TOKEN = null;
// TODO : denis : 20130822 : move this in properties or database config
protected static final String LIVE_ME_URL = "https://apis.live.net/v5.0/me";
protected static final String FACEBOOK_ME_URL = "https://graph.facebook.com/me";
protected static final String TWITTER_URL = "http://api.twitter.com/1.1/account/verify_credentials.json";
protected static final String GOOGLE_ME_URL = "https://www.googleapis.com/plus/v1/people/me";
protected static final String TWITTER_OAUTH_REQUEST_TOKEN = "TWITTER_OAUTH_REQUEST_TOKEN";
protected static final String YAHOO_OAUTH_REQUEST_TOKEN = "<PASSWORD>";
protected static final String REQUEST_PARAM_OAUTH_VERIFIER = "oauth_verifier";
void checkNonce(String nonce) {
// check response_nonce to prevent replay-attack:
if (nonce==null || nonce.length()<20){
throw new OpenIdException("Verify failed.");
}
// make sure the time of server is correct:
long nonceTime = getNonceTime(nonce);
long diff = Math.abs(System.currentTimeMillis() - nonceTime);
if (diff > Constants.ONE_HOUR){
throw new OpenIdException("Bad nonce time.");
}
}
long getNonceTime(String nonce) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ")
.parse(nonce.substring(0, 19) + "+0000")
.getTime();
}
catch(ParseException e) {
throw new OpenIdException("Bad nonce time.");
}
}
} | 1,269 |
5,169 | {
"name": "PrivateTool",
"version": "0.0.1",
"summary": "我的demo",
"description": "用来测试学习创建pod",
"homepage": "https://git.oschina.net/MasonTy",
"license": "MIT",
"authors": {
"kongzhaozhuang": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://git.oschina.net/MasonTy/PrivateTestDemo.git",
"tag": "0.0.1"
},
"source_files": [
"PrivateTestDemo",
"*.{h,m}"
],
"requires_arc": true
}
| 231 |
348 | {"nom":"Decazeville","circ":"2ème circonscription","dpt":"Aveyron","inscrits":4075,"abs":3051,"votants":1024,"blancs":313,"nuls":73,"exp":638,"res":[{"nuance":"REM","nom":"<NAME>","voix":638}]} | 78 |
2,577 | <reponame>mdsarfarazalam840/camunda-bpm-platform
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.container.impl.jboss.util;
import org.jboss.as.naming.ManagedReferenceFactory;
import org.jboss.as.naming.ServiceBasedNamingStore;
import org.jboss.as.naming.deployment.ContextNames;
import org.jboss.as.naming.service.BinderService;
import org.jboss.msc.service.ServiceBuilder;
import org.jboss.msc.service.ServiceController;
import org.jboss.msc.service.ServiceName;
import org.jboss.msc.service.ServiceTarget;
/**
* <p>Utiliy class
* @author <NAME>
*
*/
public class BindingUtil {
public static ServiceController<ManagedReferenceFactory> createJndiBindings(ServiceTarget target, ServiceName serviceName, String binderServiceName, ManagedReferenceFactory managedReferenceFactory) {
BinderService binderService = new BinderService(binderServiceName);
ServiceBuilder<ManagedReferenceFactory> serviceBuilder = target
.addService(serviceName, binderService)
.addDependency(ContextNames.GLOBAL_CONTEXT_SERVICE_NAME, ServiceBasedNamingStore.class, binderService.getNamingStoreInjector());
binderService.getManagedObjectInjector().inject(managedReferenceFactory);
return serviceBuilder.install();
}
}
| 611 |
14,668 | <filename>components/security_interstitials/core/mitm_software_ui.cc
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/security_interstitials/core/mitm_software_ui.h"
#include "base/i18n/time_formatting.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
#include "components/security_interstitials/core/common_string_util.h"
#include "components/security_interstitials/core/metrics_helper.h"
#include "components/ssl_errors/error_info.h"
#include "components/strings/grit/components_strings.h"
#include "net/base/escape.h"
#include "ui/base/l10n/l10n_util.h"
namespace security_interstitials {
MITMSoftwareUI::MITMSoftwareUI(const GURL& request_url,
int cert_error,
const net::SSLInfo& ssl_info,
const std::string& mitm_software_name,
bool is_enterprise_managed,
ControllerClient* controller)
: request_url_(request_url),
cert_error_(cert_error),
ssl_info_(ssl_info),
mitm_software_name_(mitm_software_name),
is_enterprise_managed_(is_enterprise_managed),
controller_(controller) {
controller_->metrics_helper()->RecordUserInteraction(
security_interstitials::MetricsHelper::TOTAL_VISITS);
}
MITMSoftwareUI::~MITMSoftwareUI() {
controller_->metrics_helper()->RecordShutdownMetrics();
}
void MITMSoftwareUI::PopulateStringsForHTML(base::Value* load_time_data) {
CHECK(load_time_data);
// Shared with other SSL errors.
common_string_util::PopulateSSLLayoutStrings(cert_error_, load_time_data);
common_string_util::PopulateSSLDebuggingStrings(
ssl_info_, base::Time::NowFromSystemTime(), load_time_data);
// Set display booleans.
load_time_data->SetBoolKey("overridable", false);
load_time_data->SetBoolKey("hide_primary_button", true);
load_time_data->SetBoolKey("bad_clock", false);
// Set strings that are shared between enterprise and non-enterprise
// interstitials.
load_time_data->SetStringKey("tabTitle",
l10n_util::GetStringUTF16(IDS_SSL_V2_TITLE));
load_time_data->SetStringKey(
"heading", l10n_util::GetStringUTF16(IDS_MITM_SOFTWARE_HEADING));
load_time_data->SetStringKey("primaryButtonText", std::string());
load_time_data->SetStringKey("finalParagraph", std::string());
if (is_enterprise_managed_) {
MITMSoftwareUI::PopulateEnterpriseUserStringsForHTML(load_time_data);
return;
}
MITMSoftwareUI::PopulateAtHomeUserStringsForHTML(load_time_data);
}
void MITMSoftwareUI::HandleCommand(SecurityInterstitialCommand command) {
switch (command) {
case CMD_DO_REPORT:
controller_->SetReportingPreference(true);
break;
case CMD_DONT_REPORT:
controller_->SetReportingPreference(false);
break;
case CMD_SHOW_MORE_SECTION:
controller_->metrics_helper()->RecordUserInteraction(
security_interstitials::MetricsHelper::SHOW_ADVANCED);
break;
case CMD_OPEN_REPORTING_PRIVACY:
controller_->OpenExtendedReportingPrivacyPolicy(true);
break;
case CMD_OPEN_WHITEPAPER:
controller_->OpenExtendedReportingWhitepaper(true);
break;
case CMD_OPEN_ENHANCED_PROTECTION_SETTINGS:
controller_->metrics_helper()->RecordUserInteraction(
security_interstitials::MetricsHelper::OPEN_ENHANCED_PROTECTION);
controller_->OpenEnhancedProtectionSettings();
break;
case CMD_DONT_PROCEED:
case CMD_OPEN_HELP_CENTER:
case CMD_RELOAD:
case CMD_PROCEED:
case CMD_OPEN_DATE_SETTINGS:
case CMD_OPEN_DIAGNOSTIC:
case CMD_OPEN_LOGIN:
case CMD_REPORT_PHISHING_ERROR:
// Not supported by the SSL error page.
NOTREACHED() << "Unsupported command: " << command;
break;
case CMD_ERROR:
case CMD_TEXT_FOUND:
case CMD_TEXT_NOT_FOUND:
// Commands are for testing.
break;
}
}
void MITMSoftwareUI::PopulateEnterpriseUserStringsForHTML(
base::Value* load_time_data) {
load_time_data->SetStringKey(
"primaryParagraph",
l10n_util::GetStringFUTF16(
IDS_MITM_SOFTWARE_PRIMARY_PARAGRAPH_ENTERPRISE,
net::EscapeForHTML(base::UTF8ToUTF16(mitm_software_name_))));
load_time_data->SetStringKey(
"explanationParagraph",
l10n_util::GetStringFUTF16(
IDS_MITM_SOFTWARE_EXPLANATION_ENTERPRISE,
net::EscapeForHTML(base::UTF8ToUTF16(mitm_software_name_)),
l10n_util::GetStringUTF16(IDS_MITM_SOFTWARE_EXPLANATION)));
}
void MITMSoftwareUI::PopulateAtHomeUserStringsForHTML(
base::Value* load_time_data) {
load_time_data->SetStringKey(
"primaryParagraph",
l10n_util::GetStringFUTF16(
IDS_MITM_SOFTWARE_PRIMARY_PARAGRAPH_NONENTERPRISE,
net::EscapeForHTML(base::UTF8ToUTF16(mitm_software_name_))));
load_time_data->SetStringKey(
"explanationParagraph",
l10n_util::GetStringFUTF16(
IDS_MITM_SOFTWARE_EXPLANATION_NONENTERPRISE,
net::EscapeForHTML(base::UTF8ToUTF16(mitm_software_name_)),
l10n_util::GetStringUTF16(IDS_MITM_SOFTWARE_EXPLANATION)));
}
} // namespace security_interstitials
| 2,237 |
4,339 | <filename>modules/hibernate-5.3/src/test/java/org/apache/ignite/testsuites/IgniteHibernate53TestSuite.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.testsuites;
import org.apache.ignite.cache.hibernate.HibernateL2CacheConfigurationSelfTest;
import org.apache.ignite.cache.hibernate.HibernateL2CacheMultiJvmTest;
import org.apache.ignite.cache.hibernate.HibernateL2CacheSelfTest;
import org.apache.ignite.cache.hibernate.HibernateL2CacheStrategySelfTest;
import org.apache.ignite.cache.hibernate.HibernateL2CacheTransactionalSelfTest;
import org.apache.ignite.cache.hibernate.HibernateL2CacheTransactionalUseSyncSelfTest;
import org.apache.ignite.cache.store.hibernate.CacheHibernateBlobStoreNodeRestartTest;
import org.apache.ignite.cache.store.hibernate.CacheHibernateBlobStoreSelfTest;
import org.apache.ignite.cache.store.hibernate.CacheHibernateStoreFactorySelfTest;
import org.apache.ignite.cache.store.hibernate.CacheHibernateStoreSessionListenerSelfTest;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
/**
* Hibernate integration tests.
*/
@RunWith(Suite.class)
@Suite.SuiteClasses({
HibernateL2CacheSelfTest.class,
HibernateL2CacheTransactionalSelfTest.class,
HibernateL2CacheTransactionalUseSyncSelfTest.class,
HibernateL2CacheConfigurationSelfTest.class,
HibernateL2CacheStrategySelfTest.class,
HibernateL2CacheMultiJvmTest.class,
CacheHibernateBlobStoreSelfTest.class,
CacheHibernateBlobStoreNodeRestartTest.class,
CacheHibernateStoreSessionListenerSelfTest.class,
CacheHibernateStoreFactorySelfTest.class
})
public class IgniteHibernate53TestSuite {
}
| 791 |
435 | {
"copyright_text": null,
"description": "",
"duration": 2286,
"language": null,
"recorded": "2018-11-14",
"related_urls": [
{
"label": "Conference schedule",
"url": "http://pyparis.org/static/pdf/Schedule-PyParis-2018.pdf"
},
{
"label": "Conference web",
"url": "http://pyparis.org/"
},
{
"label": "slides",
"url": "http://pyparis.org/static/slides/Christopher%20Lennan+Tanuj%20Jain-475f1087.pdf"
}
],
"speakers": [
"<NAME>",
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/WEWl4cD-y00/maxresdefault.jpg",
"title": "Using Deep Learning to rank and tag millions of hotel images",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=WEWl4cD-y00"
}
]
}
| 374 |
1,003 | #include "math.hpp"
#include "muglm/muglm_impl.hpp"
#include "logging.hpp"
#include "bitops.hpp"
using namespace Granite;
struct VolumeCube
{
vec3 directions[6];
};
static vec3 sample_light(const VolumeCube &cube, vec3 n)
{
#if 0
vec3 n2 = n * n;
ivec3 index_offset = ivec3(lessThan(n, vec3(0.0f)));
vec3 result = cube.directions[index_offset.x + 0] * n2.x +
cube.directions[index_offset.y + 2] * n2.y +
cube.directions[index_offset.z + 4] * n2.z;
result *= 1.0f / pi<float>();
return result;
#else
(void)cube;
const vec3 dir = normalize(vec3(0.0f, 1.0f, 1.0f));
return vec3(100.0f, 50.0f, 25.0f) * pow(clamp(dot(n, dir), 0.0f, 1.0f), 100.0f);
#endif
}
mat3 integrate_patch(const VolumeCube &cube, vec3 pos_begin, vec3 pos_dx, vec3 pos_dy)
{
constexpr unsigned Res = 64;
mat3 contribution_per_major_axis = mat3(0.0f);
for (unsigned y = 0; y < Res; y++)
{
for (unsigned x = 0; x < Res; x++)
{
vec2 uv = vec2(x + 0.5f, y + 0.5f) / vec2(Res);
vec3 n = pos_begin + uv.x * pos_dx + uv.y * pos_dy;
float l2 = dot(n, n);
float inv_l = inversesqrt(l2);
float area = (1.0f / float(Res * Res)) * inv_l * inv_l * inv_l;
n *= inv_l;
vec3 col = sample_light(cube, n);
vec3 hemisphere_area = abs(n) * area;
contribution_per_major_axis[0] += col * hemisphere_area.x;
contribution_per_major_axis[1] += col * hemisphere_area.y;
contribution_per_major_axis[2] += col * hemisphere_area.z;
}
}
return contribution_per_major_axis;
}
static VolumeCube resample_cube(const VolumeCube &cube)
{
static const vec3 base_dirs[6] = {
vec3(1.0f, 0.0f, 0.0f),
vec3(-1.0f, 0.0f, 0.0f),
vec3(0.0f, 1.0f, 0.0f),
vec3(0.0f, -1.0f, 0.0f),
vec3(0.0f, 0.0f, 1.0f),
vec3(0.0f, 0.0f, -1.0f),
};
static const vec3 right[6] = {
vec3(0.0f, 0.0f, -1.0f),
vec3(0.0f, 0.0f, +1.0f),
vec3(1.0f, 0.0f, 0.0f),
vec3(1.0f, 0.0f, 0.0f),
vec3(1.0f, 0.0f, 0.0f),
vec3(-1.0f, 0.0f, 0.0f),
};
static const vec3 downs[6] = {
vec3(0.0f, -1.0f, 0.0f),
vec3(0.0f, -1.0f, 0.0f),
vec3(0.0f, 0.0f, +1.0f),
vec3(0.0f, 0.0f, -1.0f),
vec3(0.0f, -1.0f, 0.0f),
vec3(0.0f, -1.0f, 0.0f),
};
mat3 contributions[6 * 2 * 2];
const auto M = [](unsigned p) { return 1u << p; };
const uint32_t patch_mask_per_face[6] = {
(0xf << 0) | M(9) | M(11) | M(13) | M(15) | M(17) | M(19) | M(20) | M(22),
(0xf << 4) | M(8) | M(10) | M(12) | M(14) | M(16) | M(18) | M(21) | M(23),
(0xf << 8) | M(0) | M(1) | M(4) | M(5) | M(20) | M(21) | M(16) | M(17),
(0xf << 12) | M(2) | M(3) | M(6) | M(7) | M(18) | M(19) | M(22) | M(23),
(0xf << 16) | M(0) | M(2) | M(5) | M(7) | M(10) | M(11) | M(12) | M(13),
(0xf << 20) | M(1) | M(3) | M(4) | M(6) | M(8) | M(9) | M(14) | M(15),
};
VolumeCube result = {};
for (unsigned face = 0; face < 6; face++)
{
for (int patch_y = 0; patch_y < 2; patch_y++)
{
for (int patch_x = 0; patch_x < 2; patch_x++)
{
vec3 pos = base_dirs[face] +
float(patch_x - 1) * right[face] +
float(patch_y - 1) * downs[face];
contributions[face * 4 + patch_y * 2 + patch_x] = integrate_patch(cube, pos, right[face], downs[face]);
}
}
}
for (unsigned face = 0; face < 6; face++)
{
Util::for_each_bit(patch_mask_per_face[face], [&](unsigned bit) {
result.directions[face] += contributions[bit][face >> 1u];
});
result.directions[face] *= 1.0f / pi<float>();
}
return result;
}
int main()
{
VolumeCube cube = {};
cube.directions[0] = vec3(1.0f, 0.75f, 0.75f);
cube.directions[1] = vec3(0.5f, 0.75f, 0.75f);
cube.directions[2] = vec3(0.75f, 1.0f, 0.75f);
cube.directions[3] = vec3(0.75f, 0.5f, 0.75f);
cube.directions[4] = vec3(0.75f, 0.75f, 1.0f);
cube.directions[5] = vec3(0.75f, 0.75f, 0.5f);
auto resampled_cube = resample_cube(cube);
const auto log_cube = [&]() {
printf("=====\n");
for (unsigned face = 0; face < 6; face++)
{
printf("Face %u: (%.3f, %.3f, %.3f).\n", face,
resampled_cube.directions[face].x, resampled_cube.directions[face].y,
resampled_cube.directions[face].z);
}
printf("=====\n");
};
log_cube();
resampled_cube = resample_cube(resampled_cube);
log_cube();
resampled_cube = resample_cube(resampled_cube);
log_cube();
} | 2,218 |
523 | #include "Hash.h"
using namespace SparCraft;
namespace SparCraft
{
namespace Hash
{
HashType unitIndexHash[Constants::Num_Players][Constants::Max_Units];
HashValues values[Constants::Num_Hashes];
}
}
const HashType Hash::HashValues::positionHash(const size_t & player, const PositionType & x, const PositionType & y) const
{
// return hash32shift(unitPositionHash[player] ^ ((x << 16) + y))
return hash32shift(hash32shift(unitPositionHash[player] ^ x) ^ y);
}
Hash::HashValues::HashValues(int seed)
{
RandomInt rand(std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), Constants::Seed_Hash_Time ? 0 : seed);
for (size_t p(0); p<Constants::Num_Players; ++p)
{
for (size_t u(0); u<Constants::Max_Units; ++u)
{
unitIndexHash[p][u] = rand.nextInt();
}
unitPositionHash[p] = rand.nextInt();
timeCanAttackHash[p] = rand.nextInt();
timeCanMoveHash[p] = rand.nextInt();
unitTypeHash[p] = rand.nextInt();
currentHPHash[p] = rand.nextInt();
}
}
const HashType Hash::HashValues::getAttackHash (const size_t & player, const size_t & value) const
{
return hash32shift(timeCanAttackHash[player] ^ value);
}
const HashType Hash::HashValues::getMoveHash (const size_t & player, const size_t & value) const { return hash32shift(timeCanMoveHash[player] ^ value); }
const HashType Hash::HashValues::getUnitTypeHash (const size_t & player, const size_t & value) const { return hash32shift(unitTypeHash[player] ^ value); }
const HashType Hash::HashValues::getCurrentHPHash (const size_t & player, const size_t & value) const { return hash32shift(currentHPHash[player] ^ value); }
//<NAME>' 32 bit integer hash function
const size_t Hash::jenkinsHash( size_t a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
void Hash::initHash()
{
values[0] = HashValues(0);
values[1] = HashValues(1);
}
int Hash::hash32shift(int key)
{
key = ~key + (key << 15); // key = (key << 15) - key - 1;
key = key ^ (key >> 12);
key = key + (key << 2);
key = key ^ (key >> 4);
key = key * 2057; // key = (key + (key << 3)) + (key << 11);
key = key ^ (key >> 16);
return key;
}
const int Hash::jenkinsHashCombine(const HashType & hash, const int val)
{
return hash32shift(hash ^ (HashType)val);
}
const size_t Hash::magicHash(const HashType & hash, const size_t & player, const size_t & index)
{
return hash32shift(hash ^ unitIndexHash[player][index]);
}
| 1,007 |
346 | <reponame>johntconklin/monitor-core
#ifndef GANGLIA_GEXEC_H
#define GANGLIA_GEXEC_H 1
extern int gexec_errno;
#define GEXEC_TIMEOUT 60
#define GEXEC_HOST_STRING_LEN 256
struct gexec_host_t {
char ip[64];
char name[GEXEC_HOST_STRING_LEN];
char domain[GEXEC_HOST_STRING_LEN];
double load_one;
double load_five;
double load_fifteen;
double cpu_user;
double cpu_nice;
double cpu_system;
double cpu_idle;
double cpu_wio;
unsigned int proc_run;
unsigned int proc_total;
unsigned int cpu_num;
time_t last_reported;
int gexec_on;
int name_resolved;
};
typedef struct gexec_host_t gexec_host_t;
typedef struct
{
char name[256];
time_t localtime;
unsigned int num_hosts;
void *hosts;
unsigned int num_gexec_hosts;
void *gexec_hosts;
unsigned int num_dead_hosts;
void *dead_hosts;
/* Used internally */
int malloc_error;
gexec_host_t *host;
int host_up;
int start;
}
gexec_cluster_t;
int gexec_cluster_free ( gexec_cluster_t *cluster );
int gexec_cluster (gexec_cluster_t *cluster, char *ip, unsigned short port);
#endif
| 503 |
617 | # Copyright (c) Open Enclave SDK contributors.
# Licensed under the MIT License.
import lldb
import sys
def lldb_eval(expr):
return lldb.debugger.GetSelectedTarget().EvaluateExpression(expr)
def lldb_expr(expr):
lldb.debugger.HandleCommand("expr " + expr)
def lldb_read_string(address):
process = lldb.debugger.GetSelectedTarget().GetProcess()
return process.ReadCStringFromMemory(address, 32, lldb.SBError())
def lldb_quit():
process = lldb.debugger.GetSelectedTarget().GetProcess()
process.Destroy()
def bp_main(frame, bp_loc, dict):
print("** Hit breakpoint in main")
argv_1 = lldb_eval("argv[1]")
enclave = lldb_read_string(int(str(argv_1.value), 16))
print("** enclave = " + enclave)
return False
# Breakpoint in enc.c
def bp_enc_c_37(frame, bp_loc, dict):
print("** Hit breakpoint in enclave")
# Set debugger_test
lldb_expr("debugger_test=1")
debugger_test = lldb_eval("debugger_test")
print("** debugger_test = %s" % debugger_test.value)
if int(debugger_test.value) != 1:
print("** Error: failed to set debugger_test")
lldb_quit()
return False
# Breakpoint in module_contructor
def bp_init_module(frame, bp_loc, dict):
lldb_expr("is_module_init=1")
return False
# Breakpoint by line number in module
def bp_module_c_19(frame, bp_loc, dict):
# Check that value has been set
is_module_init = lldb_eval("is_module_init")
if int(is_module_init.value) != 1:
print("** Error: is_module_init != 1")
lldb_quit()
print("** is_module_init = %s" % is_module_init.value)
return False
# Breakpoint in module destructor
def bp_fini_module(frame, bp_loc, dict):
# Calls don't work with hardware mode.
# lldb_expr("notify_module_done_wrapper()")
lldb_expr("module_fini=1")
return False
# Breakpoint in square function
def bp_module_c_32(frame, bp_loc, dict):
lldb_expr("r = a * a")
return False
# Another breakpoint to test variable lookup
def bp_module_c_44(frame, bp_loc, dict):
lldb_expr(" t = a + b + k")
t = lldb_eval("t")
print("t = %s" % t.value)
return False
def run_test():
lldb.debugger.SetAsync(False)
target = lldb.debugger.GetSelectedTarget()
bp = target.BreakpointCreateByName("main")
bp.SetScriptCallbackFunction('commands.bp_main')
bp = target.BreakpointCreateByLocation("enc.c", 37)
bp.SetScriptCallbackFunction('commands.bp_enc_c_37')
bp = target.BreakpointCreateByName("init_module")
bp.SetScriptCallbackFunction('commands.bp_init_module')
bp = target.BreakpointCreateByLocation("module.c", 19)
bp.SetScriptCallbackFunction('commands.bp_module_c_19')
bp = target.BreakpointCreateByName("fini_module")
bp.SetScriptCallbackFunction('commands.bp_fini_module')
bp = target.BreakpointCreateByLocation("module.c", 32)
bp.SetScriptCallbackFunction('commands.bp_module_c_32')
bp = target.BreakpointCreateByLocation("module.c", 44)
bp.SetScriptCallbackFunction('commands.bp_module_c_44')
# The `personality` syscall is used by lldb to turn off ASLR.
# This syscall may not be permitted within containers.
# Therefore, turn off disable-aslr.
lldb.debugger.HandleCommand("settings set target.disable-aslr false")
lldb.debugger.HandleCommand("run")
# Run again to ensure that module is correctly unloaded/reloaded by debugger.
lldb.debugger.HandleCommand("run")
retval = lldb.debugger.GetSelectedTarget().GetProcess().exit_state
if int(retval) == 0:
print("oelldb-multi-module-test passed")
else:
print("oelldb-multi-module-test failed")
def __lldb_init_module(debugger, dict):
run_test()
| 1,418 |
1,442 | #ifndef POINCARE_BINOMIAL_DISTRIBUTION_H
#define POINCARE_BINOMIAL_DISTRIBUTION_H
#include <poincare/expression.h>
#include <poincare/preferences.h>
namespace Poincare {
class BinomialDistribution final {
public:
template<typename T> static T EvaluateAtAbscissa(T x, T n, T p);
template<typename T> static T CumulativeDistributiveFunctionAtAbscissa(T x, T n, T p);
template<typename T> static T CumulativeDistributiveInverseForProbability(T probability, T n, T p);
template<typename T> static bool ParametersAreOK(T n, T p);
/* ExpressionParametersAreOK returns true if the expression could be verified.
* The result of the verification is *result. */
static bool ExpressionParametersAreOK(bool * result, const Expression & n, const Expression & p, Context * context);
};
}
#endif
| 262 |
1,069 | <gh_stars>1000+
# coding: utf-8
import arrow
import datetime
import time
from logging import getLogger
# django classes
from django.conf import settings
from django.core.cache import caches
# django_th classes
from django_th.services.services import ServicesMgr
# th_rss classes
from th_rss.lib.feedsservice import Feeds
logger = getLogger('django_th.trigger_happy')
cache = caches['django_th']
class ServiceRss(ServicesMgr):
"""
Service RSS
"""
def __init__(self, token=None, **kwargs):
super(ServiceRss, self).__init__(token, **kwargs)
def _get_published(self, entry):
"""
get the 'published' attribute
:param entry:
:return:
"""
published = None
if hasattr(entry, 'published_parsed'):
if entry.published_parsed is not None:
published = datetime.datetime.utcfromtimestamp(time.mktime(entry.published_parsed))
elif hasattr(entry, 'created_parsed'):
if entry.created_parsed is not None:
published = datetime.datetime.utcfromtimestamp(time.mktime(entry.created_parsed))
elif hasattr(entry, 'updated_parsed'):
if entry.updated_parsed is not None:
published = datetime.datetime.utcfromtimestamp(time.mktime(entry.updated_parsed))
return published
def read_data(self, **kwargs):
"""
get the data from the service
:param kwargs: contain keyword args : trigger_id and model name
:type kwargs: dict
:rtype: dict
"""
date_triggered = kwargs.get('date_triggered')
trigger_id = kwargs.get('trigger_id')
kwargs['model_name'] = 'Rss'
kwargs['app_label'] = 'django_th'
# get the URL from the trigger id
rss = super(ServiceRss, self).read_data(**kwargs)
logger.debug("RSS Feeds from %s : url %s", rss.name, rss.url)
now = arrow.utcnow().to(settings.TIME_ZONE)
my_feeds = []
# retrieve the data
feeds = Feeds(**{'url_to_parse': rss.url}).datas()
for entry in feeds.entries:
# entry.*_parsed may be None when the date in a RSS Feed is invalid
# so will have the "now" date as default
published = self._get_published(entry)
if published:
published = arrow.get(str(published)).to(settings.TIME_ZONE)
date_triggered = arrow.get(str(date_triggered)).to(settings.TIME_ZONE)
if date_triggered is not None and published is not None and now >= published >= date_triggered:
my_feeds.append(entry)
# digester
self.send_digest_event(trigger_id, entry.title, entry.link)
cache.set('th_rss_' + str(trigger_id), my_feeds)
cache.set('th_rss_uuid_{}'.format(rss.uuid), my_feeds)
# return the data
return my_feeds
| 1,316 |
3,459 | <reponame>ianclawson/Provenance
#ifndef _PCE_H
#include <mednafen/mednafen.h>
#include <mednafen/state.h>
#include <mednafen/general.h>
#include <mednafen/memory.h>
using namespace Mednafen;
#define PCE_MASTER_CLOCK 21477272.727273
#define DECLFR(x) uint8 MDFN_FASTCALL x (uint32 A)
#define DECLFW(x) void MDFN_FASTCALL x (uint32 A, uint8 V)
namespace MDFN_IEN_PCE_FAST
{
MDFN_HIDE extern uint8 ROMSpace[0x88 * 8192 + 8192];
typedef void (MDFN_FASTCALL *writefunc)(uint32 A, uint8 V);
typedef uint8 (MDFN_FASTCALL *readfunc)(uint32 A);
MDFN_HIDE extern uint8 PCEIODataBuffer;
void PCE_InitCD(void) MDFN_COLD;
};
#include "huc6280.h"
namespace MDFN_IEN_PCE_FAST
{
MDFN_HIDE extern bool PCE_ACEnabled; // Arcade Card emulation enabled?
void PCE_Power(void) MDFN_COLD;
MDFN_HIDE extern int pce_overclocked;
MDFN_HIDE extern uint8 BaseRAM[32768 + 8192];
};
using namespace MDFN_IEN_PCE_FAST;
#define _PCE_H
#endif
| 438 |
879 | <filename>header/src/main/java/org/zstack/header/vm/BeforeStartNewCreatedVmExtensionPoint.java
package org.zstack.header.vm;
/**
* Created by xing5 on 2016/5/20.
* l
*/
public interface BeforeStartNewCreatedVmExtensionPoint {
void beforeStartNewCreatedVm(VmInstanceSpec spec);
}
| 98 |
12,020 | <filename>docs/project.json
{
"root": "docs",
"sourceRoot": "docs",
"projectType": "library",
"tags": ["scope:nx-dev"]
}
| 52 |
575 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This tool is used to benchmark the render model used by the compositor
// Most of this file is derived from the source of the tile_render_bench tool,
// and has been changed to support running a sequence of independent
// simulations for our different render models and test cases.
#include <stdio.h>
#include <sys/dir.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string>
#include <utility>
#include <vector>
#include "base/at_exit.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/containers/queue.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/location.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/task/single_thread_task_executor.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "gpu/tools/compositor_model_bench/render_model_utils.h"
#include "gpu/tools/compositor_model_bench/render_models.h"
#include "gpu/tools/compositor_model_bench/render_tree.h"
#include "ui/base/x/x11_util.h"
#include "ui/gfx/x/connection.h"
#include "ui/gfx/x/glx.h"
#include "ui/gfx/x/xproto.h"
#include "ui/gfx/x/xproto_util.h"
#include "ui/gl/glx_util.h"
#include "ui/gl/init/gl_factory.h"
using base::DirectoryExists;
using base::PathExists;
using base::TimeTicks;
using std::string;
struct SimulationSpecification {
string simulation_name;
base::FilePath input_path;
RenderModel model_under_test;
TimeTicks simulation_start_time;
int frames_rendered;
};
// Forward declarations
class Simulator;
void _process_events(Simulator* sim);
void _update_loop(Simulator* sim);
class Simulator {
public:
Simulator(int seconds_per_test, const base::FilePath& output_path)
: output_path_(output_path),
seconds_per_test_(seconds_per_test),
gl_context_(nullptr),
window_width_(WINDOW_WIDTH),
window_height_(WINDOW_HEIGHT) {}
~Simulator() {
// Cleanup GL.
auto display = connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing);
glXMakeCurrent(display, 0, nullptr);
glXDestroyContext(display, gl_context_);
// The window and X11 connection will be cleaned up when connection_ is
// destroyed.
}
void QueueTest(const base::FilePath& path) {
SimulationSpecification spec;
// To get a std::string, we'll try to get an ASCII simulation name.
// If the name of the file wasn't ASCII, this will give an empty simulation
// name, but that's not really harmful (we'll still warn about it though.)
spec.simulation_name = path.BaseName().RemoveExtension().MaybeAsASCII();
if (spec.simulation_name.empty()) {
LOG(WARNING) << "Simulation for path " << path.LossyDisplayName()
<< " will have a blank simulation name, since the file name "
"isn't ASCII";
}
spec.input_path = path;
spec.model_under_test = ForwardRenderModel;
spec.frames_rendered = 0;
sims_remaining_.push(spec);
// The following lines are commented out pending the addition
// of the new render model once this version gets fully checked in.
//
// spec.model_under_test = KDTreeRenderModel;
// sims_remaining_.push(spec);
}
void Run() {
if (sims_remaining_.empty()) {
LOG(WARNING) << "No configuration files loaded.";
return;
}
base::AtExitManager at_exit;
if (!InitX11() || !InitGLContext()) {
LOG(FATAL) << "Failed to set up GUI.";
}
InitBuffers();
LOG(INFO) << "Running " << sims_remaining_.size() << " simulations.";
single_thread_task_executor_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&Simulator::ProcessEvents, weak_factory_.GetWeakPtr()));
run_loop_.Run();
}
void ProcessEvents() {
// Consume all the X events.
connection_->Flush();
connection_->ReadResponses();
auto& events = connection_->events();
while (!events.empty()) {
auto event = std::move(events.front());
events.pop_front();
if (event.As<x11::ExposeEvent>())
UpdateLoop();
else if (auto* configure = event.As<x11::ConfigureNotifyEvent>())
Resize(configure->width, configure->height);
}
}
void UpdateLoop() {
if (UpdateTestStatus())
UpdateCurrentTest();
}
private:
// Initialize X11. Returns true if successful. This method creates the
// X11 window. Further initialization is done in X11VideoRenderer.
bool InitX11() {
connection_ = std::make_unique<x11::Connection>();
if (!connection_->Ready()) {
LOG(FATAL) << "Cannot open X11 connection";
return false;
}
// Creates the window.
auto black_pixel = connection_->default_screen().black_pixel;
window_ = connection_->GenerateId<x11::Window>();
connection_->CreateWindow({
.wid = window_,
.parent = connection_->default_root(),
.x = 1,
.y = 1,
.width = window_width_,
.height = window_height_,
.background_pixel = black_pixel,
.border_pixel = black_pixel,
.event_mask = x11::EventMask::Exposure | x11::EventMask::KeyPress |
x11::EventMask::StructureNotify,
});
x11::SetStringProperty(window_, x11::Atom::WM_NAME, x11::Atom::STRING,
"Compositor Model Bench");
connection_->MapWindow({window_});
connection_->ConfigureWindow({
.window = window_,
.width = WINDOW_WIDTH,
.height = WINDOW_HEIGHT,
});
return true;
}
// Initialize the OpenGL context.
bool InitGLContext() {
if (!gl::init::InitializeGLOneOff()) {
LOG(FATAL) << "gl::init::InitializeGLOneOff failed";
return false;
}
auto* glx_config = gl::GetFbConfigForWindow(connection_.get(), window_);
if (!glx_config)
return false;
auto* visual =
glXGetVisualFromFBConfig(connection_->GetXlibDisplay(), glx_config);
DCHECK(visual);
gl_context_ = glXCreateContext(
connection_->GetXlibDisplay(x11::XlibDisplayType::kSyncing), visual,
nullptr, true /* Direct rendering */);
if (!gl_context_)
return false;
auto display = connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing);
if (!glXMakeCurrent(display, static_cast<uint32_t>(window_), gl_context_)) {
glXDestroyContext(display, gl_context_);
gl_context_ = nullptr;
return false;
}
return true;
}
bool InitializeNextTest() {
SimulationSpecification& spec = sims_remaining_.front();
LOG(INFO) << "Initializing test for " << spec.simulation_name << "("
<< ModelToString(spec.model_under_test) << ")";
const base::FilePath& path = spec.input_path;
std::unique_ptr<RenderNode> root = BuildRenderTreeFromFile(path);
if (!root) {
LOG(ERROR) << "Couldn't parse test configuration file "
<< path.LossyDisplayName();
return false;
}
current_sim_ = ConstructSimulationModel(
spec.model_under_test, std::move(root), window_width_, window_height_);
return !!current_sim_;
}
void CleanupCurrentTest() {
LOG(INFO) << "Finished test " << sims_remaining_.front().simulation_name;
current_sim_.reset();
}
void UpdateCurrentTest() {
++sims_remaining_.front().frames_rendered;
if (current_sim_)
current_sim_->Update();
glXSwapBuffers(connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing),
static_cast<uint32_t>(window_));
auto window = static_cast<x11::Window>(window_);
x11::ExposeEvent ev{
.window = window,
.width = WINDOW_WIDTH,
.height = WINDOW_HEIGHT,
};
x11::SendEvent(ev, window, x11::EventMask::Exposure);
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&Simulator::UpdateLoop, weak_factory_.GetWeakPtr()));
}
void DumpOutput() {
LOG(INFO) << "Successfully ran " << sims_completed_.size() << " tests";
FILE* f = base::OpenFile(output_path_, "w");
if (!f) {
LOG(ERROR) << "Failed to open output file "
<< output_path_.LossyDisplayName();
exit(-1);
}
LOG(INFO) << "Writing results to " << output_path_.LossyDisplayName();
fputs("{\n\t\"results\": [\n", f);
while (sims_completed_.size()) {
SimulationSpecification i = sims_completed_.front();
fprintf(f,
"\t\t{\"simulation_name\":\"%s\",\n"
"\t\t\t\"render_model\":\"%s\",\n"
"\t\t\t\"frames_drawn\":%d\n"
"\t\t},\n",
i.simulation_name.c_str(), ModelToString(i.model_under_test),
i.frames_rendered);
sims_completed_.pop();
}
fputs("\t]\n}", f);
base::CloseFile(f);
}
bool UpdateTestStatus() {
TimeTicks& current_start = sims_remaining_.front().simulation_start_time;
base::TimeDelta d = TimeTicks::Now() - current_start;
if (!current_start.is_null() && d.InSeconds() > seconds_per_test_) {
CleanupCurrentTest();
sims_completed_.push(sims_remaining_.front());
sims_remaining_.pop();
}
if (sims_remaining_.size() &&
sims_remaining_.front().simulation_start_time.is_null()) {
while (sims_remaining_.size() && !InitializeNextTest()) {
sims_remaining_.pop();
}
if (sims_remaining_.size()) {
sims_remaining_.front().simulation_start_time = TimeTicks::Now();
}
}
if (sims_remaining_.empty()) {
DumpOutput();
run_loop_.QuitWhenIdle();
return false;
}
return true;
}
void Resize(int width, int height) {
window_width_ = width;
window_height_ = height;
if (current_sim_)
current_sim_->Resize(window_width_, window_height_);
}
base::SingleThreadTaskExecutor single_thread_task_executor_;
base::RunLoop run_loop_;
// Simulation task list for this execution
std::unique_ptr<RenderModelSimulator> current_sim_;
base::queue<SimulationSpecification> sims_remaining_;
base::queue<SimulationSpecification> sims_completed_;
base::FilePath output_path_;
// Amount of time to run each simulation
int seconds_per_test_;
// GUI data
std::unique_ptr<x11::Connection> connection_;
x11::Window window_ = x11::Window::None;
GLXContext gl_context_;
int window_width_;
int window_height_;
base::WeakPtrFactory<Simulator> weak_factory_{this};
};
int main(int argc, char* argv[]) {
base::CommandLine::Init(argc, argv);
const base::CommandLine* cl = base::CommandLine::ForCurrentProcess();
if (argc != 3 && argc != 4) {
LOG(INFO)
<< "Usage: \n"
<< cl->GetProgram().BaseName().LossyDisplayName()
<< "--in=[input path] --out=[output path] (duration=[seconds])\n"
"The input path specifies either a JSON configuration file or\n"
"a directory containing only these files\n"
"(if a directory is specified, simulations will be run for\n"
"all files in that directory and subdirectories)\n"
"The optional duration parameter specifies the (integer)\n"
"number of seconds to be spent on each simulation.\n"
"Performance measurements for the specified simulation(s) are\n"
"written to the output path.";
return -1;
}
int seconds_per_test = 1;
if (cl->HasSwitch("duration")) {
seconds_per_test = atoi(cl->GetSwitchValueASCII("duration").c_str());
}
Simulator sim(seconds_per_test, cl->GetSwitchValuePath("out"));
base::FilePath inPath = cl->GetSwitchValuePath("in");
if (!PathExists(inPath)) {
LOG(FATAL) << "Path does not exist: " << inPath.LossyDisplayName();
return -1;
}
if (DirectoryExists(inPath)) {
LOG(INFO) << "(input path is a directory)";
base::FileEnumerator dirItr(inPath, true, base::FileEnumerator::FILES);
for (base::FilePath f = dirItr.Next(); !f.empty(); f = dirItr.Next()) {
sim.QueueTest(f);
}
} else {
LOG(INFO) << "(input path is a file)";
sim.QueueTest(inPath);
}
sim.Run();
return 0;
}
| 4,848 |
594 | <reponame>enfoTek/tomato.linksys.e2000.nvram-mod
/* PR optimization/6177
This testcase ICEd because expr.c did not expect to see a CONCAT
as array rtl. */
extern void abort (void);
extern void exit (int);
__complex__ float foo (void)
{
__complex__ float f[1];
__real__ f[0] = 1.0;
__imag__ f[0] = 1.0;
f[0] = __builtin_conjf (f[0]);
return f[0];
}
int main (void)
{
__complex__ double d[1];
d[0] = foo ();
if (__real__ d[0] != 1.0
|| __imag__ d[0] != -1.0)
abort ();
exit (0);
}
| 233 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.