max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
339 | <reponame>glipR/pygame_gui<gh_stars>100-1000
import pygame
from tests.shared_fixtures import _init_pygame, default_ui_manager, default_display_surface, \
_display_surface_return_none
from pygame_gui.core.drawable_shapes.rect_drawable_shape import RectDrawableShape
from pygame_gui.ui_manager import UIManager
class TestRectDrawableShape:
def test_creation(self, _init_pygame, default_ui_manager: UIManager):
RectDrawableShape(containing_rect=pygame.Rect(0, 0, 100, 100),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 0,
'border_width': 0,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
def test_full_rebuild_on_size_change_negative_values(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 100, 100),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': -10,
'border_width': -10,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
shape.full_rebuild_on_size_change()
def test_full_rebuild_on_size_change_large(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 25, 25),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 20,
'border_width': 20,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
shape.full_rebuild_on_size_change()
def test_full_rebuild_on_size_change_large_shadow(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 2, 2),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 1,
'border_width': 0,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
shape.full_rebuild_on_size_change()
def test_collide_point(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 100, 100),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 0,
'border_width': 0,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
assert shape.collide_point((50, 50)) is True
def test_set_position(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 100, 100),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 0,
'border_width': 0,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
shape.set_position((50, 50))
assert shape.containing_rect.topleft == (50, 50)
def test_set_dimensions(self, _init_pygame, default_ui_manager: UIManager):
shape = RectDrawableShape(containing_rect=pygame.Rect(0, 0, 100, 100),
theming_parameters={'text': 'test',
'font': default_ui_manager.get_theme().get_font([]),
'shadow_width': 0,
'border_width': 0,
'normal_border': pygame.Color('#FFFFFF'),
'normal_bg': pygame.Color('#000000'),
'text_horiz_alignment': 'center',
'text_vert_alignment': 'center'},
states=['normal'], manager=default_ui_manager)
shape.set_dimensions((50, 50))
assert shape.containing_rect.width == 50
| 4,568 |
698 | """Graph execution time for serial, threaded and processes forms of Pi estimation with lists"""
import numpy as np
import matplotlib.pyplot as plt
# timings generated using
# pi_lists_parallel
speeds = np.array([[110.0],
[118.0, 144.0, 149.0, 150.0],
[110.0, 55.0, 28.0, 27.0]])
nbr_cores = np.array([[1],
[1, 2, 4, 8],
[1, 2, 4, 8]])
labels = np.array(["Series", "Threads", "Processes"])
plt.figure(1)
plt.clf()
markers = ['-.o', '--x', '-x']
for nc, sp, label, mk in zip(nbr_cores, speeds, labels, markers):
plt.plot(nc, sp, mk, label=label, linewidth=2)
plt.annotate("Series and 1 Process have the same execution time",
(nbr_cores[0][0] + 0.1, speeds[0][0]))
plt.legend(loc="lower left", framealpha=0.8)
plt.ylim(20, 155)
plt.xlim(0.5, 8.5)
plt.ylabel("Execution time (seconds) - smaller is better")
plt.xlabel("Number of workers")
plt.title(
"Time to estimate Pi using objects with 100,000,000\ndart throws in series, threaded and with processes")
# plt.grid()
# plt.show()
plt.tight_layout()
plt.savefig("08_pi_lists_graph_speed_tests_threaded_processes.png")
| 530 |
363 | <reponame>ajisaka/ai-research-keyphrase-extraction
# Copyright (c) 2017-present, Swisscom (Schweiz) AG.
# All rights reserved.
#
#Authors: <NAME>, <NAME>
"""Module containing helper function to process results of a solr query"""
def process_tagged_text(s):
"""
Return a tagged_text as a list of sentence where each sentence is list of tuple (word,tag)
:param s: string tagged_text coming from solr word1|tag1 word2|tag2[ENDSENT]word3|tag3 ...
:return: (list of list of tuple) list of sentences where each sentence is a list of tuple (word,tag)
"""
def str2tuple(tagged_token_text, sep='|'):
loc = tagged_token_text.rfind(sep)
if loc >= 0:
return tagged_token_text[:loc], tagged_token_text[loc + len(sep):]
else:
raise RuntimeError('Problem when parsing tagged token '+tagged_token_text)
result = []
for sent in s.split('[ENDSENT]'):
sent = [str2tuple(tagged_token) for tagged_token in sent.split(' ')]
result.append(sent)
return result
| 397 |
794 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LeNet-5 on (Fashion) MNIST."""
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import uncertainty_baselines as ub
import utils # local file import
flags.DEFINE_enum('dataset', 'mnist',
enum_values=['mnist', 'fashion_mnist'],
help='Name of the image dataset.')
flags.DEFINE_integer('ensemble_size', 1, 'Number of ensemble members.')
flags.DEFINE_boolean('bootstrap', False,
'Sample the training set for bootstrapping.')
flags.DEFINE_integer('training_steps', 5000, 'Training steps.')
flags.DEFINE_integer('batch_size', 256, 'Batch size.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_integer('validation_freq', 5, 'Validation frequency in steps.')
flags.DEFINE_string('output_dir', '/tmp/det_training',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
FLAGS = flags.FLAGS
def lenet5(input_shape, num_classes):
"""Builds LeNet5."""
inputs = tf.keras.layers.Input(shape=input_shape)
conv1 = tf.keras.layers.Conv2D(6,
kernel_size=5,
padding='SAME',
activation='relu')(inputs)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2],
padding='SAME')(conv1)
conv2 = tf.keras.layers.Conv2D(16,
kernel_size=5,
padding='SAME',
activation='relu')(pool1)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2],
padding='SAME')(conv2)
conv3 = tf.keras.layers.Conv2D(120,
kernel_size=5,
padding='SAME',
activation=tf.nn.relu)(pool2)
flatten = tf.keras.layers.Flatten()(conv3)
dense1 = tf.keras.layers.Dense(84, activation=tf.nn.relu)(flatten)
logits = tf.keras.layers.Dense(num_classes)(dense1)
outputs = tf.keras.layers.Lambda(lambda x: ed.Categorical(logits=x))(logits)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
if FLAGS.dataset == 'mnist':
dataset_builder_class = ub.datasets.MnistDataset
else:
dataset_builder_class = ub.datasets.FashionMnistDataset
n_train = 50000
train_dataset = next(dataset_builder_class(
'train').load(batch_size=n_train).as_numpy_iterator())
x_train = train_dataset['features']
y_train = train_dataset['labels']
test_dataset = next(dataset_builder_class(
'test').load(batch_size=10000).as_numpy_iterator())
x_test = test_dataset['features']
y_test = test_dataset['labels']
num_classes = int(np.amax(y_train)) + 1
# Note that we need to disable v2 behavior after we load the data.
tf1.disable_v2_behavior()
ensemble_filenames = []
for i in range(FLAGS.ensemble_size):
# TODO(trandustin): We re-build the graph for each ensemble member. This
# is due to an unknown bug where the variables are otherwise not
# re-initialized to be random. While this is inefficient in graph mode, I'm
# keeping this for now as we'd like to move to eager mode anyways.
model = lenet5(x_train.shape[1:], num_classes)
def negative_log_likelihood(y, rv_y):
del rv_y # unused arg
return -model.output.distribution.log_prob(tf.squeeze(y)) # pylint: disable=cell-var-from-loop
def accuracy(y_true, y_sample):
del y_sample # unused arg
return tf.equal(
tf.argmax(input=model.output.distribution.logits, axis=1), # pylint: disable=cell-var-from-loop
tf.cast(tf.squeeze(y_true), tf.int64))
def log_likelihood(y_true, y_sample):
del y_sample # unused arg
return model.output.distribution.log_prob(tf.squeeze(y_true)) # pylint: disable=cell-var-from-loop
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=FLAGS.learning_rate),
loss=negative_log_likelihood,
metrics=[log_likelihood, accuracy])
member_dir = os.path.join(FLAGS.output_dir, 'member_' + str(i))
tensorboard = tf1.keras.callbacks.TensorBoard(
log_dir=member_dir,
update_freq=FLAGS.batch_size * FLAGS.validation_freq)
if FLAGS.bootstrap:
inds = np.random.choice(n_train, n_train, replace=True)
x_sampled = x_train[inds]
y_sampled = y_train[inds]
model.fit(
x=x_train if not FLAGS.bootstrap else x_sampled,
y=y_train if not FLAGS.bootstrap else y_sampled,
batch_size=FLAGS.batch_size,
epochs=(FLAGS.batch_size * FLAGS.training_steps) // n_train,
validation_data=(x_test, y_test),
validation_freq=max(
(FLAGS.validation_freq * FLAGS.batch_size) // n_train, 1),
verbose=1,
callbacks=[tensorboard])
member_filename = os.path.join(member_dir, 'model.weights')
ensemble_filenames.append(member_filename)
model.save_weights(member_filename)
labels = tf.keras.layers.Input(shape=y_train.shape[1:])
ll = tf.keras.backend.function([model.input, labels], [
model.output.distribution.log_prob(tf.squeeze(labels)),
model.output.distribution.logits,
])
ensemble_metrics_vals = {
'train': utils.ensemble_metrics(
x_train, y_train, model, ll, weight_files=ensemble_filenames),
'test': utils.ensemble_metrics(
x_test, y_test, model, ll, weight_files=ensemble_filenames),
}
for split, metrics in ensemble_metrics_vals.items():
logging.info(split)
for metric_name in metrics:
logging.info('%s: %s', metric_name, metrics[metric_name])
if __name__ == '__main__':
app.run(main)
| 3,060 |
831 | <gh_stars>100-1000
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.ui.resourcechooser;
import com.intellij.ui.ColorUtil;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.event.MouseEvent;
import java.awt.event.MouseListener;
import junit.framework.TestCase;
public class ColorPickerTest extends TestCase {
public void testColorWheelResizeAndSelect() {
ColorPicker.SaturationBrightnessComponent saturationBrightnessComponent = new ColorPicker.SaturationBrightnessComponent();
saturationBrightnessComponent.setSize(new Dimension(1010, 710));
saturationBrightnessComponent.setHue(0.75f);
saturationBrightnessComponent.setOpacity(100);
MouseEvent event = new MouseEvent(saturationBrightnessComponent, MouseEvent.MOUSE_CLICKED, 1, MouseEvent.BUTTON1, 805, 505, 1, false);
for (MouseListener mouseListener : saturationBrightnessComponent.getMouseListeners()) {
mouseListener.mousePressed(event);
}
Color expectedColor = ColorUtil.toAlpha(Color.getHSBColor(0.75f, 0.8f, 1.0f - 5.0f / 7.0f), 100);
assertEquals(expectedColor, saturationBrightnessComponent.getColor());
saturationBrightnessComponent.setSize(new Dimension(1510, 1010));
saturationBrightnessComponent.setHue(0.0f);
saturationBrightnessComponent.setOpacity(100);
event = new MouseEvent(saturationBrightnessComponent, MouseEvent.MOUSE_CLICKED, 1, MouseEvent.BUTTON1, 1505, 1005, 1, false);
for (MouseListener mouseListener : saturationBrightnessComponent.getMouseListeners()) {
mouseListener.mousePressed(event);
}
expectedColor = ColorUtil.toAlpha(Color.BLACK, 100);
assertEquals(expectedColor, saturationBrightnessComponent.getColor());
saturationBrightnessComponent.setSize(new Dimension(1510, 1010));
saturationBrightnessComponent.setHue(0.0f);
saturationBrightnessComponent.setOpacity(100);
event = new MouseEvent(saturationBrightnessComponent, MouseEvent.MOUSE_CLICKED, 1, MouseEvent.BUTTON1, 1505, 5, 1, false);
for (MouseListener mouseListener : saturationBrightnessComponent.getMouseListeners()) {
mouseListener.mousePressed(event);
}
expectedColor = ColorUtil.toAlpha(Color.RED, 100);
assertEquals(expectedColor, saturationBrightnessComponent.getColor());
}
}
| 885 |
645 | <reponame>AlexShypula/stoke
// Copyright 2013-2016 Stanford University
//
// Licensed under the Apache License, Version 2.0 (the License);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an AS IS BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
namespace stoke {
class ValidatorDecTest : public StraightLineValidatorTest {};
TEST_F(ValidatorDecTest, Issue280) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2d01;
check_circuit(cs);
}
TEST_F(ValidatorDecTest, Issue280_2) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2d00;
check_circuit(cs);
}
TEST_F(ValidatorDecTest, Issue280_3) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2dff;
check_circuit(cs);
}
TEST_F(ValidatorDecTest, Issue280_4) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2d81;
check_circuit(cs);
}
TEST_F(ValidatorDecTest, Issue280_5) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2d80;
check_circuit(cs);
}
TEST_F(ValidatorDecTest, Issue280_6) {
target_ << ".foo:" << std::endl;
target_ << "decb %al" << std::endl;
target_ << "retq" << std::endl;
CpuState cs;
cs.gp[x64asm::ax].get_fixed_quad(0) = 0x2d7f;
check_circuit(cs);
}
} //namespace stoke
| 843 |
841 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.process.workitem.jms;
import java.io.ByteArrayInputStream;
import java.io.ObjectInputStream;
import javax.jms.BytesMessage;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.Message;
import javax.jms.MessageProducer;
import javax.jms.Session;
import org.drools.core.process.instance.impl.WorkItemImpl;
import org.jbpm.process.workitem.core.TestWorkItemManager;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class JMSSendTaskWorkItemHandlerTest {
@Mock
ConnectionFactory connectionFactory;
@Mock
Destination destination;
@Mock
Connection connection;
@Mock
Session session;
@Mock
MessageProducer producer;
@Mock
BytesMessage message;
@Test
public void testSendMessage() throws Exception {
ArgumentCaptor<byte[]> bytesCaptor = ArgumentCaptor.forClass(byte[].class);
when(connectionFactory.createConnection()).thenReturn(connection);
when(connection.createSession(anyBoolean(),
anyInt())).thenReturn(session);
when(session.createProducer(any(Destination.class))).thenReturn(producer);
when(session.createBytesMessage()).thenReturn(message);
doNothing().when(producer).close();
doNothing().when(session).close();
doNothing().when(connection).close();
doNothing().when(producer).send(any(Message.class));
TestWorkItemManager manager = new TestWorkItemManager();
WorkItemImpl workItem = new WorkItemImpl();
workItem.setParameter("Signal",
"mysignal");
workItem.setParameter("SignalProcessInstanceId",
"abcde");
workItem.setParameter("SignalWorkItemId",
"12345");
workItem.setParameter("SignalDeploymentId",
"deployment-123");
workItem.setProcessInstanceId(123L);
workItem.setDeploymentId("deploy-123");
workItem.setParameter("Data",
"hello world");
JMSSendTaskWorkItemHandler handler = new JMSSendTaskWorkItemHandler(connectionFactory,
destination,
false,
false);
handler.executeWorkItem(workItem,
manager);
assertNotNull(manager.getResults());
assertEquals(1,
manager.getResults().size());
assertTrue(manager.getResults().containsKey(workItem.getId()));
verify(message).writeBytes(bytesCaptor.capture());
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytesCaptor.getValue());
ObjectInputStream objectInputStream = new ObjectInputStream(byteArrayInputStream);
String messageVal = (String) objectInputStream.readObject();
assertTrue("hello world".equals(messageVal));
}
}
| 1,666 |
2,360 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHaphpipe(PythonPackage):
"""HAplotype and PHylodynamics pipeline for viral assembly,
population genetics, and phylodynamics."""
homepage = "https://github.com/gwcbi/haphpipe"
url = "https://github.com/gwcbi/haphpipe/archive/v1.0.3.tar.gz"
maintainers = ['dorton21']
version('1.0.3', sha256='9a9e8632a217ff4207c1dea66887a471e0ea04bbb7c0f0d72631acaba214bd37')
# Deps. taken from
# https://github.com/bioconda/bioconda-recipes/blob/master/recipes/haphpipe/meta.yaml
# https://bioconda.github.io/recipes/haphpipe/README.html
# https://github.com/gwcbi/haphpipe/blob/master/environment.yml
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-future', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-gsutil', type=('build', 'run'))
depends_on('py-sierrapy', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('bowtie2', type=('build', 'run'))
depends_on('blast-plus', type=('build', 'run'))
depends_on('freebayes', type=('build', 'run'))
depends_on('modeltest-ng', type=('build', 'run'))
depends_on('libdeflate', type=('build', 'run'))
depends_on('sratoolkit', type=('build', 'run'))
depends_on('spades', type=('build', 'run'))
depends_on('seqtk', type=('build', 'run'))
depends_on('raxml-ng~mpi', type=('build', 'run'))
depends_on('[email protected]', type=('build', 'run'))
depends_on('trinity', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('bwa', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('mafft', type=('build', 'run'))
depends_on('picard', type=('build', 'run'))
| 901 |
1,097 | <reponame>EnisBerk/jupyterlab-git
import pytest
from unittest.mock import patch
from jupyterlab_git.git import execute, execution_lock
@pytest.mark.asyncio
async def test_execute_waits_on_index_lock(tmp_path):
lock_file = tmp_path / ".git/index.lock"
lock_file.parent.mkdir(parents=True, exist_ok=True)
lock_file.write_text("")
async def remove_lock_file(*args):
assert "unlocked" not in repr(execution_lock) # Check that the lock is working
lock_file.unlink() # Raise an error for missing file
with patch("tornado.gen.sleep") as sleep:
sleep.side_effect = remove_lock_file # Remove the lock file instead of sleeping
assert "unlock" in repr(execution_lock)
cmd = ["git", "dummy"]
kwargs = {"cwd": "{!s}".format(tmp_path)}
await execute(cmd, **kwargs)
assert "unlock" in repr(execution_lock)
assert not lock_file.exists()
assert sleep.call_count == 1
| 385 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-p98c-wc65-vxfg",
"modified": "2022-05-13T01:18:36Z",
"published": "2022-05-13T01:18:36Z",
"aliases": [
"CVE-2018-0919"
],
"details": "Microsoft Office 2010 SP2, 2013 SP1, and 2016, Microsoft Office 2016 Click-to-Run Microsoft Office 2016 for Mac, Microsoft Office Web Apps 2010 SP2, Microsoft Office Web Apps 2013 SP1, Microsoft SharePoint Enterprise Server 2013 SP1, Microsoft SharePoint Enterprise Server 2016, Microsoft SharePoint Server 2010 SP2, Microsoft Word 2010 SP2, Word 2013 SP1 and Microsoft Word 2016 allow an information disclosure vulnerability due to how variables are initialized, aka \"Microsoft Office Information Disclosure Vulnerability\".",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:L/AC:L/PR:N/UI:R/S:U/C:L/I:N/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2018-0919"
},
{
"type": "WEB",
"url": "https://portal.msrc.microsoft.com/en-US/security-guidance/advisory/CVE-2018-0919"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/103311"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id/1040526"
}
],
"database_specific": {
"cwe_ids": [
"CWE-125"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 595 |
590 | <reponame>lmj0591/mygui<filename>Platforms/Ogre/OgrePlatform/src/MyGUI_OgreVertexBuffer.cpp<gh_stars>100-1000
/*!
@file
@author <NAME>
@date 04/2009
*/
#include "MyGUI_OgreVertexBuffer.h"
#include <OgreHardwareBufferManager.h>
namespace MyGUI
{
const size_t VERTEX_BUFFER_REALLOCK_STEP = 5 * VertexQuad::VertexCount;
OgreVertexBuffer::OgreVertexBuffer() :
mVertexCount(0),
mNeedVertexCount(0)
{
}
OgreVertexBuffer::~OgreVertexBuffer()
{
destroy();
}
void OgreVertexBuffer::create()
{
mRenderOperation.vertexData = new Ogre::VertexData();
mRenderOperation.vertexData->vertexStart = 0;
Ogre::VertexDeclaration* vd = mRenderOperation.vertexData->vertexDeclaration;
vd->addElement(0, 0, Ogre::VET_FLOAT3, Ogre::VES_POSITION);
vd->addElement(0, Ogre::VertexElement::getTypeSize(Ogre::VET_FLOAT3), Ogre::VET_COLOUR, Ogre::VES_DIFFUSE);
vd->addElement(
0,
Ogre::VertexElement::getTypeSize(Ogre::VET_FLOAT3) + Ogre::VertexElement::getTypeSize(Ogre::VET_COLOUR),
Ogre::VET_FLOAT2,
Ogre::VES_TEXTURE_COORDINATES);
// Create the Vertex Buffer, using the Vertex Structure we previously declared in _declareVertexStructure.
mVertexBuffer = Ogre::HardwareBufferManager::getSingleton().createVertexBuffer(
mRenderOperation.vertexData->vertexDeclaration->getVertexSize(0), // declared Vertex used
mVertexCount,
Ogre::HardwareBuffer::HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE,
false);
// Bind the created buffer to the renderOperation object. Now we can manipulate the buffer, and the RenderOp keeps the changes.
mRenderOperation.vertexData->vertexBufferBinding->setBinding(0, mVertexBuffer);
mRenderOperation.operationType = Ogre::RenderOperation::OT_TRIANGLE_LIST;
mRenderOperation.useIndexes = false;
}
void OgreVertexBuffer::destroy()
{
delete mRenderOperation.vertexData;
mRenderOperation.vertexData = nullptr;
mVertexBuffer.reset();
}
void OgreVertexBuffer::resize()
{
mVertexCount = mNeedVertexCount + VERTEX_BUFFER_REALLOCK_STEP;
destroy();
create();
}
void OgreVertexBuffer::setVertexCount(size_t _count)
{
mNeedVertexCount = _count;
}
size_t OgreVertexBuffer::getVertexCount() const
{
return mNeedVertexCount;
}
Vertex* OgreVertexBuffer::lock()
{
if (mNeedVertexCount > mVertexCount || mVertexCount == 0)
resize();
return reinterpret_cast<Vertex*>(mVertexBuffer->lock(Ogre::HardwareVertexBuffer::HBL_DISCARD));
}
void OgreVertexBuffer::unlock()
{
mVertexBuffer->unlock();
}
} // namespace MyGUI
| 968 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.network.models;
import com.azure.core.annotation.Fluent;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
/** Specifies the peering configuration. */
@Fluent
public final class ExpressRouteCircuitPeeringConfig {
@JsonIgnore private final ClientLogger logger = new ClientLogger(ExpressRouteCircuitPeeringConfig.class);
/*
* The reference to AdvertisedPublicPrefixes.
*/
@JsonProperty(value = "advertisedPublicPrefixes")
private List<String> advertisedPublicPrefixes;
/*
* The communities of bgp peering. Specified for microsoft peering.
*/
@JsonProperty(value = "advertisedCommunities")
private List<String> advertisedCommunities;
/*
* The advertised public prefix state of the Peering resource.
*/
@JsonProperty(value = "advertisedPublicPrefixesState", access = JsonProperty.Access.WRITE_ONLY)
private ExpressRouteCircuitPeeringAdvertisedPublicPrefixState advertisedPublicPrefixesState;
/*
* The legacy mode of the peering.
*/
@JsonProperty(value = "legacyMode")
private Integer legacyMode;
/*
* The CustomerASN of the peering.
*/
@JsonProperty(value = "customerASN")
private Integer customerAsn;
/*
* The RoutingRegistryName of the configuration.
*/
@JsonProperty(value = "routingRegistryName")
private String routingRegistryName;
/**
* Get the advertisedPublicPrefixes property: The reference to AdvertisedPublicPrefixes.
*
* @return the advertisedPublicPrefixes value.
*/
public List<String> advertisedPublicPrefixes() {
return this.advertisedPublicPrefixes;
}
/**
* Set the advertisedPublicPrefixes property: The reference to AdvertisedPublicPrefixes.
*
* @param advertisedPublicPrefixes the advertisedPublicPrefixes value to set.
* @return the ExpressRouteCircuitPeeringConfig object itself.
*/
public ExpressRouteCircuitPeeringConfig withAdvertisedPublicPrefixes(List<String> advertisedPublicPrefixes) {
this.advertisedPublicPrefixes = advertisedPublicPrefixes;
return this;
}
/**
* Get the advertisedCommunities property: The communities of bgp peering. Specified for microsoft peering.
*
* @return the advertisedCommunities value.
*/
public List<String> advertisedCommunities() {
return this.advertisedCommunities;
}
/**
* Set the advertisedCommunities property: The communities of bgp peering. Specified for microsoft peering.
*
* @param advertisedCommunities the advertisedCommunities value to set.
* @return the ExpressRouteCircuitPeeringConfig object itself.
*/
public ExpressRouteCircuitPeeringConfig withAdvertisedCommunities(List<String> advertisedCommunities) {
this.advertisedCommunities = advertisedCommunities;
return this;
}
/**
* Get the advertisedPublicPrefixesState property: The advertised public prefix state of the Peering resource.
*
* @return the advertisedPublicPrefixesState value.
*/
public ExpressRouteCircuitPeeringAdvertisedPublicPrefixState advertisedPublicPrefixesState() {
return this.advertisedPublicPrefixesState;
}
/**
* Get the legacyMode property: The legacy mode of the peering.
*
* @return the legacyMode value.
*/
public Integer legacyMode() {
return this.legacyMode;
}
/**
* Set the legacyMode property: The legacy mode of the peering.
*
* @param legacyMode the legacyMode value to set.
* @return the ExpressRouteCircuitPeeringConfig object itself.
*/
public ExpressRouteCircuitPeeringConfig withLegacyMode(Integer legacyMode) {
this.legacyMode = legacyMode;
return this;
}
/**
* Get the customerAsn property: The CustomerASN of the peering.
*
* @return the customerAsn value.
*/
public Integer customerAsn() {
return this.customerAsn;
}
/**
* Set the customerAsn property: The CustomerASN of the peering.
*
* @param customerAsn the customerAsn value to set.
* @return the ExpressRouteCircuitPeeringConfig object itself.
*/
public ExpressRouteCircuitPeeringConfig withCustomerAsn(Integer customerAsn) {
this.customerAsn = customerAsn;
return this;
}
/**
* Get the routingRegistryName property: The RoutingRegistryName of the configuration.
*
* @return the routingRegistryName value.
*/
public String routingRegistryName() {
return this.routingRegistryName;
}
/**
* Set the routingRegistryName property: The RoutingRegistryName of the configuration.
*
* @param routingRegistryName the routingRegistryName value to set.
* @return the ExpressRouteCircuitPeeringConfig object itself.
*/
public ExpressRouteCircuitPeeringConfig withRoutingRegistryName(String routingRegistryName) {
this.routingRegistryName = routingRegistryName;
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
public void validate() {
}
}
| 1,892 |
369 | <reponame>bitigchi/MuditaOS
// Copyright (c) 2017-2021, Mudita <NAME>.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include <hal/cellular/SIM.hpp>
#include <vector>
namespace cellular
{
namespace service
{
constexpr const char *name = "ServiceCellular";
}
namespace api
{
enum class CallMode
{
Regular,
Emergency
};
enum class SimLockState
{
Enabled,
Disabled
};
using SimSlot = hal::cellular::SimSlot;
enum class ModemState
{
Startup,
Ready,
Fail,
Fatal
};
using SimCode = std::vector<unsigned int>;
} // namespace api
} // namespace cellular
| 400 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.core.startup;
import org.netbeans.SetupHid;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.netbeans.MockEvents;
import org.netbeans.MockModuleInstaller;
import org.netbeans.Module;
import org.netbeans.ModuleManager;
import org.netbeans.Stamps;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.filesystems.LocalFileSystem;
import org.openide.modules.InstalledFileLocator;
import org.openide.modules.api.PlacesTestUtils;
import org.openide.util.test.MockLookup;
/** Do we recognize startlevel?
* @author <NAME>
*/
public class ModuleListStartLevelTest extends SetupHid {
private static final String PREFIX = "wherever/";
private LocalFileSystem fs;
private MockEvents ev;
private File ud;
private void initModule() throws IOException {
FileObject fo = modulesfolder.createData("com-jcraft-jsch.xml");
File mod = new File(new File(ud, "modules"), "com-jcraft-jsch.jar");
final HashMap<String, String> man = new HashMap<String, String>();
man.put("Bundle-SymbolicName", "com.jcraft.jsch");
createJar(mod, new HashMap<String, String>(), man);
InputStream is = ModuleListStartLevelTest.class.getResourceAsStream("ModuleList-com-jcraft-jsch.xml");
assertNotNull("Module definition found", is);
final OutputStream os = fo.getOutputStream();
FileUtil.copy(is, os);
os.close();
is.close();
}
private final class IFL extends InstalledFileLocator {
public IFL() {}
public File locate(String relativePath, String codeNameBase, boolean localized) {
if (relativePath.startsWith(PREFIX)) {
File f = new File(jars, relativePath.substring(PREFIX.length()).replace('/', File.separatorChar));
if (f.exists()) {
return f;
}
}
return null;
}
}
public ModuleListStartLevelTest(String name) {
super(name);
}
private ModuleManager mgr;
private FileObject modulesfolder;
@Override
protected void setUp() throws Exception {
super.setUp();
MockLookup.setInstances(new IFL());
ud = new File(getWorkDir(), "ud");
PlacesTestUtils.setUserDirectory(ud);
File dir = new File(ud, "config");
File modulesdir = new File(dir, "Modules");
if (! modulesdir.mkdirs()) throw new IOException("Making " + modulesdir);
fs = new LocalFileSystem();
fs.setRootDirectory(dir);
modulesfolder = fs.findResource("Modules");
assertNotNull(modulesfolder);
initModule();
MockModuleInstaller installer = new MockModuleInstaller();
ev = new MockEvents();
mgr = new ModuleManager(installer, ev);
}
public void testParsesStartLevel() throws Exception {
ModuleList list = new ModuleList(mgr, modulesfolder, ev);
Set<Module> set = list.readInitial();
assertEquals("One module: " + set, 1, set.size());
Module m = set.iterator().next();
assertEquals("Start level has been specified to four", 4, m.getStartLevel());
Stamps.getModulesJARs().flush(0);
Stamps.getModulesJARs().shutdown();
Map<String, Map<String, Object>> cache = list.readCache();
assertNotNull("Cache read", cache);
Map<String, Object> module = cache.get("com.jcraft.jsch");
assertNotNull("Module info found", module);
Object level = module.get("startlevel");
assertEquals("Start level is remembered", Integer.valueOf(4), level);
}
}
| 1,748 |
1,338 |
#ifndef DRAWING_ENGINE_H
#define DRAWING_ENGINE_H
#include <GraphicsDefs.h>
#include <Region.h>
class AccelerantHWInterface;
class DirectWindowBuffer;
class DrawingEngine {
public:
DrawingEngine(AccelerantHWInterface* interface,
DirectWindowBuffer* buffer);
virtual ~DrawingEngine();
bool Lock();
void Unlock();
void ConstrainClipping(BRegion* region);
bool StraightLine(BPoint a, BPoint b, const rgb_color& c);
void StrokeLine(BPoint a, BPoint b, const rgb_color& color);
void StrokeRect(BRect r, const rgb_color& color);
void FillRegion(BRegion *region, const rgb_color& color);
void DrawString(const char* string, BPoint baseLine,
const rgb_color& color);
void CopyRegion(BRegion *region, int32 xOffset, int32 yOffset);
private:
AccelerantHWInterface* fHWInterface;
DirectWindowBuffer* fBuffer;
BRegion fCurrentClipping;
};
#endif // DRAWING_ENGINE_H
| 402 |
1,338 | <gh_stars>1000+
/*
* Copyright 2009, <NAME>.
* Copyright 2008, <NAME> <<EMAIL>>.
* Copyright 2005-2014, <NAME> <<EMAIL>>.
* Copyright 2015, <NAME> <<EMAIL>>
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef DRAW_BITMAP_NEAREST_NEIGHBOR_H
#define DRAW_BITMAP_NEAREST_NEIGHBOR_H
#include "Painter.h"
struct DrawBitmapNearestNeighborCopy {
static void
Draw(const Painter* painter, PainterAggInterface& aggInterface,
agg::rendering_buffer& bitmap, BPoint offset,
double scaleX, double scaleY, BRect destinationRect)
{
//bigtime_t now = system_time();
uint32 dstWidth = destinationRect.IntegerWidth() + 1;
uint32 dstHeight = destinationRect.IntegerHeight() + 1;
uint32 srcWidth = bitmap.width();
uint32 srcHeight = bitmap.height();
// Do not calculate more filter weights than necessary and also
// keep the stack based allocations reasonably sized
const BRegion& clippingRegion = *painter->ClippingRegion();
if (clippingRegion.Frame().IntegerWidth() + 1 < (int32)dstWidth)
dstWidth = clippingRegion.Frame().IntegerWidth() + 1;
if (clippingRegion.Frame().IntegerHeight() + 1 < (int32)dstHeight)
dstHeight = clippingRegion.Frame().IntegerHeight() + 1;
// When calculating less filter weights than specified by
// destinationRect, we need to compensate the offset.
uint32 filterWeightXIndexOffset = 0;
uint32 filterWeightYIndexOffset = 0;
if (clippingRegion.Frame().left > destinationRect.left) {
filterWeightXIndexOffset = (int32)(clippingRegion.Frame().left
- destinationRect.left);
}
if (clippingRegion.Frame().top > destinationRect.top) {
filterWeightYIndexOffset = (int32)(clippingRegion.Frame().top
- destinationRect.top);
}
// should not pose a problem with stack overflows
// (needs around 6Kb for 1920x1200)
uint16 xIndices[dstWidth];
uint16 yIndices[dstHeight];
// Extract the cropping information for the source bitmap,
// If only a part of the source bitmap is to be drawn with scale,
// the offset will be different from the destinationRect left top
// corner.
const int32 xBitmapShift = (int32)(destinationRect.left - offset.x);
const int32 yBitmapShift = (int32)(destinationRect.top - offset.y);
for (uint32 i = 0; i < dstWidth; i++) {
// index into source
uint16 index = (uint16)((i + filterWeightXIndexOffset) * srcWidth
/ (srcWidth * scaleX));
// round down to get the left pixel
xIndices[i] = index;
// handle cropped source bitmap
xIndices[i] += xBitmapShift;
// precompute index for 32 bit pixels
xIndices[i] *= 4;
}
for (uint32 i = 0; i < dstHeight; i++) {
// index into source
uint16 index = (uint16)((i + filterWeightYIndexOffset) * srcHeight
/ (srcHeight * scaleY));
// round down to get the top pixel
yIndices[i] = index;
// handle cropped source bitmap
yIndices[i] += yBitmapShift;
}
//printf("X: %d ... %d, %d (%ld or %f)\n",
// xIndices[0], xIndices[dstWidth - 2], xIndices[dstWidth - 1],
// dstWidth, srcWidth * scaleX);
//printf("Y: %d ... %d, %d (%ld or %f)\n",
// yIndices[0], yIndices[dstHeight - 2], yIndices[dstHeight - 1],
// dstHeight, srcHeight * scaleY);
const int32 left = (int32)destinationRect.left;
const int32 top = (int32)destinationRect.top;
const int32 right = (int32)destinationRect.right;
const int32 bottom = (int32)destinationRect.bottom;
const uint32 dstBPR = aggInterface.fBuffer.stride();
renderer_base& baseRenderer = aggInterface.fBaseRenderer;
// iterate over clipping boxes
baseRenderer.first_clip_box();
do {
const int32 x1 = max_c(baseRenderer.xmin(), left);
const int32 x2 = min_c(baseRenderer.xmax(), right);
if (x1 > x2)
continue;
int32 y1 = max_c(baseRenderer.ymin(), top);
int32 y2 = min_c(baseRenderer.ymax(), bottom);
if (y1 > y2)
continue;
// buffer offset into destination
uint8* dst = aggInterface.fBuffer.row_ptr(y1) + x1 * 4;
// x and y are needed as indeces into the wheight arrays, so the
// offset into the target buffer needs to be compensated
const int32 xIndexL = x1 - left - filterWeightXIndexOffset;
const int32 xIndexR = x2 - left - filterWeightXIndexOffset;
y1 -= top + filterWeightYIndexOffset;
y2 -= top + filterWeightYIndexOffset;
//printf("x: %ld - %ld\n", xIndexL, xIndexR);
//printf("y: %ld - %ld\n", y1, y2);
for (; y1 <= y2; y1++) {
// buffer offset into source (top row)
register const uint8* src = bitmap.row_ptr(yIndices[y1]);
// buffer handle for destination to be incremented per pixel
register uint32* d = (uint32*)dst;
for (int32 x = xIndexL; x <= xIndexR; x++) {
*d = *(uint32*)(src + xIndices[x]);
d++;
}
dst += dstBPR;
}
} while (baseRenderer.next_clip_box());
//printf("draw bitmap %.5fx%.5f: %lld\n", xScale, yScale,
// system_time() - now);
}
};
#endif // DRAW_BITMAP_NEAREST_NEIGHBOR_H
| 1,845 |
4,569 | package com.brianway.learning.java.multithread.supplement.example2;
/**
* Created by Brian on 2016/4/17.
*/
/**
* 取消System.out.println的注释能看到更多细节
*/
public class MyThread extends Thread {
private Object lock;
private String showChar;
private int showNumPosition;
private int printCount = 0;//统计打印了几个字母
volatile private static int addNumber = 1;
public MyThread(Object lock, String showChar, int showNumPosition) {
this.lock = lock;
this.showChar = showChar;
this.showNumPosition = showNumPosition;
}
@Override
public void run() {
try {
synchronized (lock) {
//System.out.println("ThreadName="+ Thread.currentThread().getName()+" get the lock");
while (true) {
if (addNumber % 3 == showNumPosition) {
System.out.println("ThreadName=" + Thread.currentThread().getName()
+ " runCount = " + addNumber + " " + showChar);
lock.notifyAll();
addNumber++;
printCount++;
if (printCount == 3) {
break;
}
} else {
//System.out.println("ThreadName="+ Thread.currentThread().getName()+" will await");
lock.wait();
//System.out.println("ThreadName="+ Thread.currentThread().getName()+" after await");
}
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
| 881 |
347 | <reponame>jihwahn1018/ovirt-engine<filename>frontend/webadmin/modules/webadmin/src/main/java/org/ovirt/engine/ui/webadmin/section/main/view/popup/vm/HostDeviceColumnHelper.java
package org.ovirt.engine.ui.webadmin.section.main.view.popup.vm;
import java.util.List;
import org.ovirt.engine.core.compat.StringHelper;
import org.ovirt.engine.ui.webadmin.ApplicationConstants;
import org.ovirt.engine.ui.webadmin.ApplicationMessages;
import org.ovirt.engine.ui.webadmin.gin.AssetProvider;
public final class HostDeviceColumnHelper {
private static final ApplicationConstants constants = AssetProvider.getConstants();
private static final ApplicationMessages messages = AssetProvider.getMessages();
public static String renderNameId(String name, String id) {
if (StringHelper.isNullOrEmpty(name)) {
return id;
}
// we assume that VDSM will never report name != null && id == null
return messages.nameId(name, id);
}
public static String renderVmNamesList(List<String> names) {
if (names != null) {
return String.join(", ", names); //$NON-NLS-1$
}
return "";
}
public static String renderIommuGroup(Integer group) {
return group == null ? constants.notAvailableLabel() : group.toString();
}
}
| 472 |
488 | <reponame>ouankou/rose
#ifndef OCTAVE_ANALYSIS_BOTTOMUPTYPEANALYSIS_H
#define OCTAVE_ANALYSIS_BOTTOMUPTYPEANALYSIS_H
#include <string>
#include <map>
#include <utility>
#include "rose.h"
#include "FastNumericsRoseSupport.h"
namespace MatlabAnalysis
{
typedef FastNumericsRoseSupport::MatlabFunctionRec MatlabFunctionRec;
typedef FastNumericsRoseSupport::MatlabOverloadSet MatlabOverloadSet;
typedef FastNumericsRoseSupport::NameToDeclarationMap NameToDeclarationMap;
struct Ctx
{
/// A mapping from function name to function declaration, with a bool flag
/// to indicate if a copy of the function has been made
static NameToDeclarationMap nameToFunctionDeclaration;
/// A name-decl pair of matlab builtin functions
static NameToDeclarationMap matlabBuiltins;
};
class FunctionAnalyzer;
void runBottomUpInference(SgExpression*, SgProject*, FunctionAnalyzer*);
static inline
bool hasBeenAnalyzed(const MatlabFunctionRec& rec)
{
return rec.second;
}
static inline
void setAnalyzed(MatlabFunctionRec& rec)
{
rec.second = true;
}
}
#endif /* OCTAVE_ANALYSIS_BOTTOMUPTYPEANALYSIS_H */
| 403 |
7,158 | <reponame>ptelang/opencv_contrib
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "opencv2/datasets/sr_general100.hpp"
#include <opencv2/core.hpp>
#include <cstdio>
#include <string>
#include <vector>
using namespace std;
using namespace cv;
using namespace cv::datasets;
int main(int argc, char *argv[])
{
const char *keys =
"{ help h usage ? | | show this message }"
"{ path p |true| path to dataset (General-100 dataset folder) }";
CommandLineParser parser(argc, argv, keys);
string path(parser.get<string>("path"));
if (parser.has("help") || path=="true")
{
parser.printMessage();
return -1;
}
Ptr<SR_general100> dataset = SR_general100::create();
dataset->load(path);
// ***************
// Dataset contains all images.
// For example, let's output dataset size; first image name; and second image full path.
printf("dataset size: %u\n", (unsigned int)dataset->getTrain().size());
SR_general100Obj *example = static_cast<SR_general100Obj *>(dataset->getTrain()[0].get());
printf("first image name: %s\n", example->imageName.c_str());
SR_general100Obj *example2 = static_cast<SR_general100Obj *>(dataset->getTrain()[1].get());
string fullPath = path + "/" + example2->imageName.c_str();
printf("second image full path: %s\n", fullPath.c_str());
return 0;
} | 575 |
578 | /*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.syndesis.connector.email.customizer;
import java.io.IOException;
import java.util.Map;
import javax.mail.BodyPart;
import javax.mail.MessagingException;
import javax.mail.Multipart;
import javax.mail.internet.MimeMultipart;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.apache.camel.util.ObjectHelper;
import org.jsoup.Jsoup;
import io.syndesis.connector.email.EMailConstants;
import io.syndesis.connector.email.model.EMailMessageModel;
import io.syndesis.integration.component.proxy.ComponentProxyComponent;
import io.syndesis.integration.component.proxy.ComponentProxyCustomizer;
public class EMailReceiveCustomizer implements ComponentProxyCustomizer, EMailConstants {
@Override
public void customize(ComponentProxyComponent component, Map<String, Object> options) {
component.setBeforeConsumer(EMailReceiveCustomizer::beforeConsumer);
}
private static void beforeConsumer(Exchange exchange) throws MessagingException, IOException {
final Message in = exchange.getIn();
final EMailMessageModel mail = new EMailMessageModel();
if (ObjectHelper.isNotEmpty(in.getBody())) {
textFromMessage(in, mail);
}
if (ObjectHelper.isNotEmpty(in.getHeader(MAIL_SUBJECT))) {
mail.setSubject(in.getHeader(MAIL_SUBJECT, String.class));
}
if (ObjectHelper.isNotEmpty(in.getHeader(MAIL_FROM))) {
mail.setFrom(in.getHeader(MAIL_FROM, String.class));
}
if (ObjectHelper.isNotEmpty(in.getHeader(MAIL_TO))) {
mail.setTo(in.getHeader(MAIL_TO, String.class));
}
if (ObjectHelper.isNotEmpty(in.getHeader(MAIL_CC))) {
mail.setCc(in.getHeader(MAIL_CC, String.class));
}
if (ObjectHelper.isNotEmpty(in.getHeader(MAIL_BCC))) {
mail.setBcc(in.getHeader(MAIL_BCC, String.class));
}
exchange.getIn().setBody(mail);
}
private static String getPlainTextFromMultipart(Multipart multipart) throws MessagingException, IOException {
StringBuilder result = new StringBuilder();
int count = multipart.getCount();
for (int i = 0; i < count; i++) {
BodyPart bodyPart = multipart.getBodyPart(i);
if (bodyPart.isMimeType(TEXT_PLAIN)) {
result.append(NEW_LINE)
.append(bodyPart.getContent());
break; // without break same text can appear twice
} else if (bodyPart.isMimeType(TEXT_HTML)) {
result.append(NEW_LINE)
.append(Jsoup.parse((String) bodyPart.getContent()).text());
break; // without break same text can appear twice
} else if (bodyPart.isMimeType("application/pgp-encrypted")) {
//
// Body is encrypted so flag as such to enable easy understanding for users
//
result.append(NEW_LINE)
.append("<pgp encrypted text>")
.append(NEW_LINE)
.append(bodyPart.getContent().toString());
} else if (bodyPart.getContent() instanceof MimeMultipart){
result.append(NEW_LINE)
.append(getPlainTextFromMultipart((MimeMultipart) bodyPart.getContent()));
}
}
return result.toString();
}
private static void textFromMessage(Message camelMessage, EMailMessageModel model) throws MessagingException, IOException {
Object content = camelMessage.getBody();
if (content instanceof String) {
content = content.toString().trim();
} else if (content instanceof Multipart) {
content = getPlainTextFromMultipart((Multipart) content);
}
model.setContent(content);
}
}
| 1,829 |
1,056 | #include "../../includes.hpp"
#include "hooks.hpp"
Hooks::Events::EventListener::EventListener() {
Interfaces::eventManager->AddListener(this, "player_hurt", false);
Interfaces::eventManager->AddListener(this, "player_death", false);
Interfaces::eventManager->AddListener(this, "bullet_impact", false);
}
Hooks::Events::EventListener::~EventListener() {
Interfaces::eventManager->RemoveListener(this);
}
void Hooks::Events::EventListener::FireGameEvent(IGameEvent *event) {
if (strstr(event->GetName(), "player_hurt")) {
Player* attacker = (Player*)Interfaces::entityList->GetClientEntity(Interfaces::engine->GetPlayerForUserID(event->GetInt("attacker")));
Player* victim = (Player*)Interfaces::entityList->GetClientEntity(Interfaces::engine->GetPlayerForUserID(event->GetInt("userid")));
if (attacker && victim && attacker == Globals::localPlayer) {
player_info_t info;
Interfaces::engine->GetPlayerInfo(victim->index(), &info);
if (CONFIGBOOL("Misc>Misc>Hitmarkers>Hitlogs")) {
if (CONFIGBOOL("Legit>Backtrack>Backtrack") && Features::Backtrack::lastBacktrack > 4) {
Features::Notifications::addNotification(ImColor(220, 220, 40), "[gs] backtracked %s %i ticks for %i health", info.name, Features::Backtrack::lastBacktrack, event->GetInt("dmg_health"));
}
else {
Features::Notifications::addNotification(ImColor(220, 220, 40), "[gs] hit %s for %i health", info.name, event->GetInt("dmg_health"));
}
}
if (CONFIGBOOL("Misc>Misc>Hitmarkers>Hitmarkers")) {
Features::Hitmarkers::drawHitmarkerTill = Interfaces::globals->curtime + 0.7f;
}
if (CONFIGBOOL("Misc>Misc>Hitmarkers>Hitsound")) {
Interfaces::engine->ExecuteClientCmd("play buttons/arena_switch_press_02"); // TODO: play sound via a better method
}
if (CONFIGBOOL("Misc>Misc>Hitmarkers>Damage Markers")) {
Features::Hitmarkers::DamageMarker damageMarker;
damageMarker.drawHitmarkerTill = Interfaces::globals->curtime + 4.f;
damageMarker.headshot = event->GetInt("hitgroup") == HITGROUP_HEAD;
damageMarker.damage = event->GetInt("dmg_health");
switch (event->GetInt("hitgroup")) {
case HITGROUP_HEAD: damageMarker.position = victim->getBonePos(8); break;
case HITGROUP_CHEST: damageMarker.position = victim->getBonePos(6); break;
case HITGROUP_STOMACH: damageMarker.position = victim->getBonePos(4); break;
default:
damageMarker.position = victim->getBonePos(3); break;
}
Features::Hitmarkers::damageMarkers.push_back(damageMarker);
}
}
}
else if (strstr(event->GetName(), "player_death")) {
Entity* attacker = (Entity*)Interfaces::entityList->GetClientEntity(Interfaces::engine->GetPlayerForUserID(event->GetInt("attacker")));
Entity* victim = (Entity*)Interfaces::entityList->GetClientEntity(Interfaces::engine->GetPlayerForUserID(event->GetInt("userid")));
if (attacker && victim && attacker == Globals::localPlayer) {
player_info_t info;
Interfaces::engine->GetPlayerInfo(victim->index(), &info);
if (CONFIGBOOL("Misc>Misc>Hitmarkers>Hitlogs")) {
Features::Notifications::addNotification(ImColor(220, 40, 40), "[gs] killed %s", info.name);
}
}
}
Features::BulletTracers::event(event);
}
int Hooks::Events::EventListener::GetEventDebugID() {
return EVENT_DEBUG_ID_INIT;
} | 1,621 |
2,338 | #include <chrono>
#include <thread>
void f3() {
int m;
m = 2; // thread 3
}
void f2() {
int n;
n = 1; // thread 2
std::thread t3(f3);
t3.join();
}
int main() { // main
std::thread t2(f2);
t2.join();
return 0;
}
| 110 |
5,169 | {
"name": "HashBuilder",
"version": "1.0",
"summary": "A utility to generate a hash for NSObject subclasses.",
"description": "\t\tUsed to build a hash result from contributed objects or hashes (presumably\n\t\tproperties on your object which should be considered in the isEqual: override).\n\t\tThe intention is for the hash result to be returned from an override to the\n\t\t`NSObject` `- (NSUInteger)hash` method.\n",
"homepage": "https://github.com/levigroker/HashBuilder",
"license": "Creative Commons Attribution 3.0 Unported License",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/levigroker/HashBuilder.git",
"tag": "1.0"
},
"platforms": {
"ios": "5.0"
},
"source_files": "HashBuilder/**/*.{h,m}",
"frameworks": "Foundation",
"requires_arc": true
}
| 294 |
3,372 | <gh_stars>1000+
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.ec2.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.ec2.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.StringUtils;
/**
* DescribeScheduledInstanceAvailabilityRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeScheduledInstanceAvailabilityRequestMarshaller implements
Marshaller<Request<DescribeScheduledInstanceAvailabilityRequest>, DescribeScheduledInstanceAvailabilityRequest> {
public Request<DescribeScheduledInstanceAvailabilityRequest> marshall(
DescribeScheduledInstanceAvailabilityRequest describeScheduledInstanceAvailabilityRequest) {
if (describeScheduledInstanceAvailabilityRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
Request<DescribeScheduledInstanceAvailabilityRequest> request = new DefaultRequest<DescribeScheduledInstanceAvailabilityRequest>(
describeScheduledInstanceAvailabilityRequest, "AmazonEC2");
request.addParameter("Action", "DescribeScheduledInstanceAvailability");
request.addParameter("Version", "2016-11-15");
request.setHttpMethod(HttpMethodName.POST);
com.amazonaws.internal.SdkInternalList<Filter> describeScheduledInstanceAvailabilityRequestFiltersList = (com.amazonaws.internal.SdkInternalList<Filter>) describeScheduledInstanceAvailabilityRequest
.getFilters();
if (!describeScheduledInstanceAvailabilityRequestFiltersList.isEmpty() || !describeScheduledInstanceAvailabilityRequestFiltersList.isAutoConstruct()) {
int filtersListIndex = 1;
for (Filter describeScheduledInstanceAvailabilityRequestFiltersListValue : describeScheduledInstanceAvailabilityRequestFiltersList) {
if (describeScheduledInstanceAvailabilityRequestFiltersListValue.getName() != null) {
request.addParameter("Filter." + filtersListIndex + ".Name",
StringUtils.fromString(describeScheduledInstanceAvailabilityRequestFiltersListValue.getName()));
}
com.amazonaws.internal.SdkInternalList<String> filterValuesList = (com.amazonaws.internal.SdkInternalList<String>) describeScheduledInstanceAvailabilityRequestFiltersListValue
.getValues();
if (!filterValuesList.isEmpty() || !filterValuesList.isAutoConstruct()) {
int valuesListIndex = 1;
for (String filterValuesListValue : filterValuesList) {
if (filterValuesListValue != null) {
request.addParameter("Filter." + filtersListIndex + ".Value." + valuesListIndex, StringUtils.fromString(filterValuesListValue));
}
valuesListIndex++;
}
}
filtersListIndex++;
}
}
SlotDateTimeRangeRequest firstSlotStartTimeRange = describeScheduledInstanceAvailabilityRequest.getFirstSlotStartTimeRange();
if (firstSlotStartTimeRange != null) {
if (firstSlotStartTimeRange.getEarliestTime() != null) {
request.addParameter("FirstSlotStartTimeRange.EarliestTime", StringUtils.fromDate(firstSlotStartTimeRange.getEarliestTime()));
}
if (firstSlotStartTimeRange.getLatestTime() != null) {
request.addParameter("FirstSlotStartTimeRange.LatestTime", StringUtils.fromDate(firstSlotStartTimeRange.getLatestTime()));
}
}
if (describeScheduledInstanceAvailabilityRequest.getMaxResults() != null) {
request.addParameter("MaxResults", StringUtils.fromInteger(describeScheduledInstanceAvailabilityRequest.getMaxResults()));
}
if (describeScheduledInstanceAvailabilityRequest.getMaxSlotDurationInHours() != null) {
request.addParameter("MaxSlotDurationInHours", StringUtils.fromInteger(describeScheduledInstanceAvailabilityRequest.getMaxSlotDurationInHours()));
}
if (describeScheduledInstanceAvailabilityRequest.getMinSlotDurationInHours() != null) {
request.addParameter("MinSlotDurationInHours", StringUtils.fromInteger(describeScheduledInstanceAvailabilityRequest.getMinSlotDurationInHours()));
}
if (describeScheduledInstanceAvailabilityRequest.getNextToken() != null) {
request.addParameter("NextToken", StringUtils.fromString(describeScheduledInstanceAvailabilityRequest.getNextToken()));
}
ScheduledInstanceRecurrenceRequest recurrence = describeScheduledInstanceAvailabilityRequest.getRecurrence();
if (recurrence != null) {
if (recurrence.getFrequency() != null) {
request.addParameter("Recurrence.Frequency", StringUtils.fromString(recurrence.getFrequency()));
}
if (recurrence.getInterval() != null) {
request.addParameter("Recurrence.Interval", StringUtils.fromInteger(recurrence.getInterval()));
}
com.amazonaws.internal.SdkInternalList<Integer> scheduledInstanceRecurrenceRequestOccurrenceDaysList = (com.amazonaws.internal.SdkInternalList<Integer>) recurrence
.getOccurrenceDays();
if (!scheduledInstanceRecurrenceRequestOccurrenceDaysList.isEmpty() || !scheduledInstanceRecurrenceRequestOccurrenceDaysList.isAutoConstruct()) {
int occurrenceDaysListIndex = 1;
for (Integer scheduledInstanceRecurrenceRequestOccurrenceDaysListValue : scheduledInstanceRecurrenceRequestOccurrenceDaysList) {
if (scheduledInstanceRecurrenceRequestOccurrenceDaysListValue != null) {
request.addParameter("Recurrence.OccurrenceDay." + occurrenceDaysListIndex,
StringUtils.fromInteger(scheduledInstanceRecurrenceRequestOccurrenceDaysListValue));
}
occurrenceDaysListIndex++;
}
}
if (recurrence.getOccurrenceRelativeToEnd() != null) {
request.addParameter("Recurrence.OccurrenceRelativeToEnd", StringUtils.fromBoolean(recurrence.getOccurrenceRelativeToEnd()));
}
if (recurrence.getOccurrenceUnit() != null) {
request.addParameter("Recurrence.OccurrenceUnit", StringUtils.fromString(recurrence.getOccurrenceUnit()));
}
}
return request;
}
}
| 2,705 |
369 | <reponame>bitigchi/MuditaOS
// Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include "Alignment.hpp" // for Alignment
#include "Axes.hpp" // for Axis
#include <module-gui/gui/Common.hpp>
#include "Layout.hpp" // for LayoutHorizontalPolicy, LayoutVertic...
#include "Margins.hpp" // for Padding, Margins
#include "core/BoundingBox.hpp" // for BoundingBox, BoundingBox::(anonymous)
#include <cstdint> // for uint32_t, int32_t, uint16_t
#include <functional> // for function
#include <list> // for list
#include <memory> // for unique_ptr
#include <utility> // for move
#include <core/DrawCommandForward.hpp>
#include <module-gui/gui/widgets/visitor/GuiVisitor.hpp>
#include <Timers/Timer.hpp>
namespace gui
{
class InputEvent;
}
namespace gui
{
class Navigation;
} // namespace gui
namespace gui
{
/// enumaration that contains gui item types
enum class ItemType
{
ITEM = 0,
RECT = 1,
LABEL,
LIST,
LIST_ITEM,
SPAN_ITEM,
TEXT,
IMAGE,
LAYOUT,
VBOX,
HBOX
};
/// Base class for all GUI items.
/// @details It is a basic element for creating widgets and applications
/// that make up the GUI (Graphic User Interface) in MuditaOS
class Item
{
public:
/// flag that informs whether item has a focus
bool focus;
/// pointer to the child item that has focus
Item *focusItem = nullptr;
/// item type of the widget - could be used to check what type item we are working on
/// right now unused except Text, where it calls callback for gui::BoxLayout which does nothing
ItemType type = ItemType::ITEM;
/// pointer to the parent Item
Item *parent = nullptr;
/// list of item's children. @note Items can have only one parent.
std::list<Item *> children;
/// enumaration with options for item bounding box (size & position) area
enum class Area
{
Min,
Normal,
Draw,
Max,
};
/// actual bounding box (size & position) of the item. This is in coordinates of the parent widget.
BoundingBox widgetArea;
/// bounding box (size & position) of the item minimum size,
BoundingBox widgetMinimumArea;
/// bounding box (size & position) of the item maximal size,
BoundingBox widgetMaximumArea;
/// bounding box (size & position) used for drawing. This is in coordinates of window
BoundingBox drawArea; // drawableArea would be more accurate
// maximal bounding box size
/// gets bounding box for selected area
auto area(Area which = Area::Normal) -> BoundingBox &
{
switch (which) {
case Area::Min:
return widgetMinimumArea;
case Area::Normal:
return widgetArea;
case Area::Draw:
return drawArea;
case Area::Max:
return widgetMaximumArea;
default:
return widgetArea;
}
}
Padding padding;
Margins margins;
Alignment alignment;
/// radius of corner, default 0
short radius = 0;
/// flag that defines if item is active.
/// @details When flag is set to true item is focusable (focus on item can be set)
/// and clickable (item action can be activated). Otherwise item is non focusable and non clickable.
/// @note if false -> than it shouldn't be used with onInput, navigation etc.
bool activeItem = true;
/// flag that defines whether widget is visible (this is - should be rendered)
bool visible;
/// policy for changing vertical size if Item is placed inside layout.
LayoutVerticalPolicy verticalPolicy;
/// policy for changing horizontal size if Item is placed inside layout.
LayoutHorizontalPolicy horizontalPolicy;
/// Maximum height to which Layout base widget can scale current widget.
Length maxHeight;
/// Maximum width to which Layout base widget can scale current widget.
Length maxWidth;
/// @defgroup callbacks Item callback functions
/// callback functors are meant to emulate signal <-> slot actions where you bind element instance to in code
/// defined lambda function all Items have functions corresponding to callback
/// 1. if you wish to create new item which does something new in action function - override it
/// 2. if you wish to call function on item - set callback for it
/// @attention all callbacks return true if handled, return true means end of event processing, i.e. if you
/// handle enter in inputCallback, then activatedCallback shouldn't be called.
/// @{
/// called when item looses/gains focus
/// @param `this` : item
std::function<bool(Item &)> focusChangedCallback;
/// called when item has dimensions changed
/// @param `this` : item
/// @param data : new bounding box area
/// @note should be part of widgetArea
std::function<bool(Item &, BoundingBox data)> dimensionChangedCallback;
/// called when item is activated, this is enter is pressed
/// @param `this` : item
std::function<bool(Item &)> activatedCallback;
/// callback when any key is pressed
/// @param `this` : item
/// @param inputEvent : input event e.g. key pressed
std::function<bool(Item &, const InputEvent &inputEvent)> inputCallback;
/// callback when timer is called on Item and onTimer is executed
/// @param `this` : item
/// @param `timer` : which triggered this callback
std::function<bool(Item &, sys::Timer &)> timerCallback = nullptr;
/// callback on navigation, called when item passes navigation to handle by its children
/// @param `InputEvent` : input event e.g. key pressed
/// @attention when child handles navigation it should return true, so that parent won't perform action for that
std::function<bool(const InputEvent &)> itemNavigation = nullptr;
/// @}
/// @defgroup focus functions handling focus
/// @{
/// navigation handler for input events e.g. keyboard key pressed
/// @param inputEvent : input event e.g. key pressed
bool handleNavigation(const InputEvent inputEvent);
/// sets/resets focus on this Item and runs focusChangedCallback for it
/// @param state : true to set focus on item, false to clear focus from item
bool setFocus(bool state);
/// sets/resets focus on provided item child if one exists.
/// @param item : this
/// @note runs focusChangedCallback on item which changes
/// @attention focusItem is just a pointer, might crash if item with focus was removed
void setFocusItem(Item *item);
/// getter for focus item
/// @attention focusItem is just a pointer, might crash if item with focus was removed
Item *getFocusItem() const;
/// @}
/// @defgroup callbackCallers functions which should call functors from callbacks group
/// @{
/// called from setFocus, does nothing (which means it doesn't call focusChangedCallback
/// @param state : new focus state
virtual bool onFocus(bool state);
/// called when this Item was pressed with enter (middle key on phone action keys),
/// used for callback input handling calls activatedCallback
/// @param[in] data : unused
virtual bool onActivated(void *data);
/// called when any key is pressed, before onActivated , after focus
/// calls: inputCallback
/// @param InputEvent : input event e.g. key pressed
virtual bool onInput(const InputEvent &inputEvent);
/// (should be) called each time when dimension of element was changed
/// @param oldDim : old bounding box dimensions (item size & position)
/// @param newDim : new bounding box dimensions (item size & position)
/// @note TODO should be fixed so that api would be consistent
virtual bool onDimensionChanged(const BoundingBox &oldDim, const BoundingBox &newDim);
/// called on Timer event in application, triggeres timerCallback
/// @param : timer timer element which triggered this action
virtual bool onTimer(sys::Timer &timer);
/// @}
/// function called to add child to item
/// @param item : pointer to item (widget) that will be added to this item children's
/// similar approach to QT, adding widget will always succeed.
virtual void addWidget(Item *item);
/// function to remove child item from element
/// @param item : pointer to item (widget) that will be removed from this item children's
/// it's recursive for all elements underneath
/// @attention It doesn't call delete please remove item after calling this function
virtual bool removeWidget(Item *item);
/// call removeWidget on item and delete on item
/// @param item : pointer to item (widget) that will be deleted and removed from this item children's
virtual bool erase(Item *item);
/// remove all children and destroy them
virtual void erase();
/// sets `visible` flag
virtual void setVisible(bool value);
/// sets bounding box area (normal) for item
/// @param area : new bounding box dimensions (area) for item (item size & position)
virtual void setArea(BoundingBox area);
/// function for setting bounding box area (normal) on selected axis
/// @param axis : indicates axis X or Y for operation
/// @param posOnAxis : new position value for selected axis
/// @param posOnOrthogonalAxis : new position for axis orthogonal to selected
/// @param sizeOnAxis : new size for selected axis
/// @param sizeOnOrthogonalAxis : new size for orthogonal axis to selected
void setAreaInAxis(Axis axis,
Position posOnAxis,
Position posOnOrthogonalAxis,
Length sizeOnAxis,
Length sizeOnOrthogonalAxis);
/// sets position of element - this is sets area().x and area().y of item
/// @note calls onDimensionChanged callback & updateDrawArea for item
/// @attention should be bind to area
virtual void setPosition(const Position &x, const Position &y);
virtual void setPosition(const Position &val, Axis axis);
[[nodiscard]] Length getSize(Axis axis) const;
[[nodiscard]] Position getPosition(Axis axis) const;
virtual void setMargins(const Margins &value);
[[nodiscard]] Margins getMargins();
virtual void setPadding(const Padding &value);
[[nodiscard]] Padding getPadding() const;
virtual void setAlignment(const Alignment &value);
[[nodiscard]] Alignment &getAlignment();
[[nodiscard]] Alignment getAlignment(Axis axis);
/// function to calculate absolute position in selected axis in case of alignment
/// @param axis : selected axis (X or Y)
/// @param itemSize : size of item on selected axis for this calculation
/// @return alignment for selected axis.
[[nodiscard]] virtual Length getAxisAlignmentValue(Axis axis, Length itemSize);
/// @defgroup size_range_setters Named the same way that are in QT minimum/maximum sizes setters
///
/// All setters:
/// 1. only sets range in which normal area can be calculated in layouts
/// 2. doesn't trigger any callbacks
/// @note we can consider calling callback when setMinimum/Maximum exceeds normal size
/// @{
void setMaximumSize(Length val, Axis axis);
void setMaximumWidth(Length w);
void setMaximumHeight(Length h);
void setMaximumSize(Length w, Length h);
void setMinimumSize(Length val, Axis axis);
void setMinimumSize(Length w, Length h);
void setMinimumWidth(Length w);
void setMinimumHeight(Length h);
/// @}
/// requests bigger size from parent if parent available
/// if no parent available - sets size
/// @return true if handled positively
virtual auto requestSize(Length request_w, Length request_h) -> Size final;
/// handle for layouts to implement to resize on demand ( i.e. when it needs to expand after
/// addition/removal of chars )
///
/// @details by default items do not resize of their children so it's safe for them to pass handleRequestSize
/// straight to setSize. Layout manages size of its item in range { Area::Min <= size <= Area::Max } so in
/// that case layout should to i.e. store request size and than handle resizes appropriately
/// i.e. Text => text->requestSize => layout->storeRequest => layout use it in resizes
/// with this both:
/// 1. user setSize
/// 2. resize requests from UI element
/// should be handled without infinite loop on resize ( item->setSize -> notify Layout -> layout: item->setSize
/// )
/// @return bool requested size granted {w,h}
virtual auto handleRequestResize(const Item *, Length request_w, Length request_h) -> Size;
/// flag informing that content has changed
bool contentChanged = false;
/// inform parent that child content has changed.
void informContentChanged();
/// handle child content change request.
virtual void handleContentChanged();
virtual void setSize(Length w, Length h);
void setSize(Length val, Axis axis);
virtual void setBoundingBox(const BoundingBox &new_box);
/// entry function to create commands to execute in renderer to draw on screen
/// @note we should consider lazy evaluation prior to drawing on screen, rather than on each resize of elements
/// @return list of commands for renderer to draw elements on screen
virtual std::list<Command> buildDrawList() final;
/// Implementation of DrawList per Item to be drawn on screen
/// This is called from buildDrawList before children elements are added
/// should be = 0;
/// @param : commands list of commands for renderer to draw elements on screen
virtual void buildDrawListImplementation(std::list<Command> &commands)
{}
/// pre hook function, if set it is executed before building draw command
/// at Item::buildDrawListImplementation()
/// @param `commandlist` : commands list of commands for renderer to draw elements on screen
std::function<void(std::list<Command> &)> preBuildDrawListHook = nullptr;
/// post hook function, if set it is executed after building draw command
/// at Item::buildDrawListImplementation()
/// @param `commandlist` : commands list of commands for renderer to draw elements on screen
std::function<void(std::list<Command> &)> postBuildDrawListHook = nullptr;
/// sets radius for item edges
/// @note this should be moved to Rect
virtual void setRadius(int value);
/// gets next navigation item in NavigationDirection
/// @param direction : navigation direction (LEFT,RIGHT,UP,DOWN)
/// @return pointer to next navigation item in selected direction
virtual Item *getNavigationItem(NavigationDirection direction);
/// sets navigation item in NavigationDirection
/// @param direction : navigation direction (LEFT,RIGHT,UP,DOWN)
/// @param item : pointer to next item for selected direction
virtual void setNavigationItem(NavigationDirection direction, Item *item);
/// clears next navigation item in NavigationDirection
/// @param direction : navigation direction (LEFT,RIGHT,UP,DOWN)
virtual void clearNavigationItem(gui::NavigationDirection direction);
/// item constructor.
Item();
Item(Item &) = delete;
/// item virtual destructor.
virtual ~Item();
/// @defgroup inconsistent inconsistent size/offset accessors and setters
/// all these elements should be checked for naming/use consistency
/// possibly all of that should be handled via area() (and area should have callback pinned from Item on resize
/// @{
void setX(const Position x);
void setY(const Position y);
[[nodiscard]] Position getX() const
{
return (widgetArea.x);
}
[[nodiscard]] Position getY() const
{
return (widgetArea.y);
}
[[nodiscard]] gui::Length getWidth() const
{
return (widgetArea.w);
}
[[nodiscard]] gui::Length getHeight() const
{
return (widgetArea.h);
}
/// helper function to show where widget ends in x axis
/// @return item ends position in X axis
[[nodiscard]] Position offset_w() const
{
return getWidth() + widgetArea.x;
}
/// helper function to show where widget ends in y axis
/// @return item ends position in Y axis
[[nodiscard]] Position offset_h() const
{
return getHeight() + widgetArea.y;
}
/// helper function to show where widget ends in selected axis
/// @return item ends position in selected axis
[[nodiscard]] Position getOffset(Axis axis) const
{
return this->widgetArea.size(axis) + this->widgetArea.pos(axis);
};
/// @}
/// adds timer to GUI item.
/// @note this is needed so that timer for element would live as long as element lives.
/// @details Timers can be attached to Item
/// in order to pass on an ownership of timer to application/widget which uses its functionalities.
void attachTimer(sys::Timer *timer)
{
timers.push_back(timer);
}
/// remove timer from item and as a result - destory it.
void detachTimer(sys::Timer &timer);
/// simple check function to determine if item is active && visible.
/// @return true if item is active and visible. Otherwise false.
inline bool isActive()
{
return (activeItem && visible);
}
/// @brief
virtual void accept(GuiVisitor &visitor);
protected:
/// On change of position or size this method will recalculate visible part of the widget
/// considering widgets hierarchy and calculate absolute position of drawing primitives.
virtual void updateDrawArea();
/// builds draw commands for all of item's children
/// @param `commandlist` : commands list of commands for renderer to draw elements on screen
virtual void buildChildrenDrawList(std::list<Command> &commands) final;
/// Pointer to navigation object. It is added when object is set for one of the directions
gui::Navigation *navigationDirections = nullptr;
private:
/// list of attached timers to item.
std::list<sys::Timer *> timers;
};
/// gets navigation direction (LEFT,RIGHT,UP,DOWN) based on incoming input event
/// @param[in] evt : input event e.g. key pressed
NavigationDirection inputToNavigation(const InputEvent &evt);
/// checks whether input event is related to GUI navigation directions (LEFT,RIGHT,UP,DOWN)
/// @param evt : input event e.g. key pressed
bool isInputNavigation(const InputEvent &evt);
} /* namespace gui */
| 7,244 |
1,144 | // SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2015 <NAME> <<EMAIL>>
*
* Copyright (c) 2011 The Chromium OS Authors.
* (C) Copyright 2002-2006
* <NAME>, DENX Software Engineering, <EMAIL>.
*
* (C) Copyright 2002
* Sysgo Real-Time Solutions, GmbH <www.elinos.com>
* <NAME> <<EMAIL>>
*/
#include <common.h>
DECLARE_GLOBAL_DATA_PTR;
int arch_reserve_stacks(void)
{
ulong *s;
/* setup stack pointer for exceptions */
gd->irq_sp = gd->start_addr_sp;
/* Clear initial stack frame */
s = (ulong *)gd->start_addr_sp;
*s = 0; /* Terminate back chain */
*++s = 0; /* NULL return address */
return 0;
}
| 240 |
348 | <gh_stars>100-1000
{"nom":"Saint-Célerin","circ":"2ème circonscription","dpt":"Sarthe","inscrits":543,"abs":340,"votants":203,"blancs":13,"nuls":5,"exp":185,"res":[{"nuance":"SOC","nom":"<NAME>","voix":98},{"nuance":"REM","nom":"<NAME>","voix":87}]} | 103 |
5,169 | <reponame>Gantios/Specs
{
"name": "ARPopMenu",
"platforms": {
"ios": "9.0"
},
"summary": "ARPopMenu lets a user to create Menu popup.",
"version": "1.1",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"Anshu": "<EMAIL>"
},
"homepage": "https://github.com/RoyAnshu/ARPopMenu",
"source": {
"git": "https://github.com/RoyAnshu/ARPopMenu.git",
"tag": "1.1"
},
"frameworks": "UIKit",
"source_files": "ARPopMenu/*.{h,m}"
}
| 224 |
575 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import js_checker
import json
import os
import sys
import unittest
import tempfile
_HERE_PATH = os.path.dirname(__file__)
sys.path.append(os.path.join(_HERE_PATH, '..', '..'))
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi, MockFile
class JsCheckerEsLintTest(unittest.TestCase):
def tearDown(self):
os.remove(self._tmp_file)
def _runChecks(self, file_contents):
tmp_args = {'suffix': '.js', 'dir': _HERE_PATH, 'delete': False}
with tempfile.NamedTemporaryFile(**tmp_args) as f:
self._tmp_file = f.name
f.write(file_contents)
input_api = MockInputApi()
input_api.files = [MockFile(os.path.abspath(self._tmp_file), '')]
input_api.presubmit_local_path = _HERE_PATH
checker = js_checker.JSChecker(input_api, MockOutputApi())
try:
return checker.RunEsLintChecks(input_api.AffectedFiles(), format='json')
except RuntimeError as err:
# Extract ESLint's JSON error output from the error message.
json_error = err.message[err.message.index('['):]
return json.loads(json_error)[0].get('messages')
def _assertError(self, results, rule_id, line):
self.assertEqual(1, len(results))
message = results[0]
self.assertEqual(rule_id, message.get('ruleId'))
self.assertEqual(line, message.get('line'))
def testGetElementByIdCheck(self):
results = self._runChecks("const a = document.getElementById('foo');")
self._assertError(results, 'no-restricted-properties', 1)
def testPrimitiveWrappersCheck(self):
results = self._runChecks('const a = new Number(1);')
self._assertError(results, 'no-new-wrappers', 1)
if __name__ == '__main__':
unittest.main()
| 704 |
3,013 | # Graphics / Clock
# Use the #graphics API to make a clock.
# Source: https://codepen.io/dudleystorey/pen/HLBki
# ---
import time
import datetime
from h2o_wave import site, ui, graphics as g
page = site['/demo']
page['example'] = ui.graphics_card(
box='1 1 2 3', view_box='0 0 100 100', width='100%', height='100%',
stage=g.stage(
face=g.circle(cx='50', cy='50', r='45', fill='#111', stroke_width='2px', stroke='#f55'),
),
scene=g.scene(
hour=g.rect(x='47.5', y='12.5', width='5', height='40', rx='2.5', fill='#333', stroke='#555'),
min=g.rect(x='48.5', y='12.5', width='3', height='40', rx='2', fill='#333', stroke='#555'),
sec=g.line(x1='50', y1='50', x2='50', y2='16', stroke='#f55', stroke_width='1px'),
),
)
page.save()
def rotate(deg):
return f'rotate({deg} 50 50)'
scene = page['example'].scene
while True:
time.sleep(1)
now = datetime.datetime.now()
g.draw(scene.hour, transform=rotate(30 * (now.hour % 12) + now.minute / 2))
g.draw(scene.min, transform=rotate(6 * now.minute))
g.draw(scene.sec, transform=rotate(6 * now.second))
page.save()
| 498 |
506 | <gh_stars>100-1000
// https://open.kattis.com/problems/periodicstrings
#include <bits/stdc++.h>
using namespace std;
int main() {
cin.tie(0), ios::sync_with_stdio(0);
string s;
cin >> s;
for (int i = 1; i <= s.size(); i++)
if (s.size() % i == 0) {
bool ok = 1;
string t = s.substr(0, i);
for (int j = 0; j < s.size(); j += i) {
for (int k = 0; k < i; k++)
if (s[j + k] != t[k]) {
ok = 0;
break;
}
if (ok) t = t.back() + t.substr(0, i - 1);
else
break;
}
if (ok) {
cout << i << '\n';
return 0;
}
}
}
| 366 |
4,893 | <reponame>duchce/TensorFlow
import tensorflow as tf
import numpy as np
class SOM:
def __init__(self, width, height, dim):
self.num_iters = 100
self.width = width
self.height = height
self.dim = dim
self.node_locs = self.get_locs()
# Each node is a vector of dimension `dim`
# For a 2D grid, there are `width * height` nodes
nodes = tf.Variable(tf.random_normal([width*height, dim]))
self.nodes = nodes
# These two ops are inputs at each iteration
x = tf.placeholder(tf.float32, [dim])
iter = tf.placeholder(tf.float32)
self.x = x
self.iter = iter
# Find the node that matches closest to the input
bmu_loc = self.get_bmu_loc(x)
self.propagate_nodes = self.get_propagation(bmu_loc, x, iter)
def get_propagation(self, bmu_loc, x, iter):
num_nodes = self.width * self.height
rate = 1.0 - tf.div(iter, self.num_iters)
alpha = rate * 0.5
sigma = rate * tf.to_float(tf.maximum(self.width, self.height)) / 2.
expanded_bmu_loc = tf.expand_dims(tf.to_float(bmu_loc), 0)
sqr_dists_from_bmu = tf.reduce_sum(tf.square(tf.sub(expanded_bmu_loc, self.node_locs)), 1)
neigh_factor = tf.exp(-tf.div(sqr_dists_from_bmu, 2 * tf.square(sigma)))
rate = tf.mul(alpha, neigh_factor)
rate_factor = tf.pack([tf.tile(tf.slice(rate, [i], [1]), [self.dim]) for i in range(num_nodes)])
nodes_diff = tf.mul(rate_factor, tf.sub(tf.pack([x for i in range(num_nodes)]), self.nodes))
update_nodes = tf.add(self.nodes, nodes_diff)
return tf.assign(self.nodes, update_nodes)
def get_bmu_loc(self, x):
expanded_x = tf.expand_dims(x, 0)
sqr_diff = tf.square(tf.sub(expanded_x, self.nodes))
dists = tf.reduce_sum(sqr_diff, 1)
bmu_idx = tf.argmin(dists, 0)
bmu_loc = tf.pack([tf.mod(bmu_idx, self.width), tf.div(bmu_idx, self.width)])
return bmu_loc
def get_locs(self):
locs = [[x, y]
for y in range(self.height)
for x in range(self.width)]
return tf.to_float(locs)
def train(self, data):
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(self.num_iters):
for data_x in data:
sess.run(self.propagate_nodes, feed_dict={self.x: data_x, self.iter: i})
centroid_grid = [[] for i in range(self.width)]
self.nodes_val = list(sess.run(self.nodes))
self.locs_val = list(sess.run(self.node_locs))
for i, l in enumerate(self.locs_val):
centroid_grid[int(l[0])].append(self.nodes_val[i])
self.centroid_grid = centroid_grid | 1,387 |
335 | {
"word": "Mother",
"definitions": [
"A woman in relation to her child or children.",
"A female animal in relation to its offspring.",
"(especially as a form of address) an elderly woman.",
"Denoting an institution or organization from which others of the same type derive.",
"(especially as a title or form of address) the head of a female religious community."
],
"parts-of-speech": "Noun"
} | 149 |
631 | #/bin/python
import sys
print("Fixing wasm client, file: ", sys.argv[1], " output: ")
t_poll = "function ___syscall_poll"
t_proxy = "if (ENVIRONMENT_IS_PTHREAD)"
t_ret = "return"
t_end = ");"
# read all input
with open(sys.argv[1], 'r') as original:
text = original.read()
# find function sys_poll
ipoll = text.index(t_poll)
#add with AB declaration
text = text[:ipoll] + "const AB = new Int32Array(new SharedArrayBuffer(4));\n" + text[ipoll:]
# find start of proxy call
ipoll = text.index(t_poll)
iret = text.index(t_proxy, ipoll)
iret = text.index(t_ret, iret)
# replace with our changes
text = text[:iret] + '{\n var ret =' + text[iret + len(t_ret):]
# find end of proxy call
iend = text.index(t_end, iret)
text = text[:iend + len(t_end)] + '\n if (ret == 0) Atomics.wait(AB, 0, 0, 50);\n return ret;\n }' + text[iend + len(t_end):]
# replace buffer instanceof SharedArrayBuffer
# to fix https://bugs.chromium.org/p/chromium/issues/detail?id=1269096
text = text.replace("buffer instanceof SharedArrayBuffer", "buffer[Symbol.toStringTag] == 'SharedArrayBuffer'")
#save fixed result
with open(sys.argv[1], 'w') as result:
result.write(text)
| 463 |
344 | /*
* Copyright 2014 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define GLM_FORCE_RADIANS
#include <jni.h>
#include <hello_area_description/hello_area_description_app.h>
static hello_area_description::AreaLearningApp app;
#ifdef __cplusplus
extern "C" {
#endif
jint JNI_OnLoad(JavaVM* vm, void*) {
// We need to store a reference to the Java VM so that we can call into the
// Java layer to show progress while saving ADFs
app.SetJavaVM(vm);
return JNI_VERSION_1_6;
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_onCreate(
JNIEnv* env, jobject /*obj*/, jobject caller_activity) {
app.OnCreate(env, caller_activity);
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_onTangoServiceConnected(
JNIEnv* env, jobject, jobject binder, jboolean is_area_learning_enabled,
jboolean is_loading_area_description) {
app.OnTangoServiceConnected(env, binder, is_area_learning_enabled,
is_loading_area_description);
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_onPause(
JNIEnv*, jobject) {
app.OnPause();
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_onDestroy(
JNIEnv*, jobject) {
app.OnDestroy();
}
JNIEXPORT jboolean JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_isRelocalized(
JNIEnv*, jobject) {
return app.IsRelocalized();
}
JNIEXPORT jstring JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_getLoadedAdfUuidString(
JNIEnv* env, jobject) {
return (env)->NewStringUTF(app.GetLoadedAdfString().c_str());
}
JNIEXPORT jstring JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_saveAdf(
JNIEnv* env, jobject) {
return (env)->NewStringUTF(app.SaveAdf().c_str());
}
JNIEXPORT jstring JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_getAdfMetadataValue(
JNIEnv* env, jobject, jstring uuid, jstring key) {
std::string uuid_str(env->GetStringUTFChars(uuid, nullptr));
std::string key_str(env->GetStringUTFChars(key, nullptr));
return env->NewStringUTF(app.GetAdfMetadataValue(uuid_str, key_str).c_str());
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_setAdfMetadataValue(
JNIEnv* env, jobject, jstring uuid, jstring key, jstring value) {
std::string uuid_str(env->GetStringUTFChars(uuid, nullptr));
std::string key_str(env->GetStringUTFChars(key, nullptr));
std::string value_str(env->GetStringUTFChars(value, nullptr));
app.SetAdfMetadataValue(uuid_str, key_str, value_str);
}
JNIEXPORT jstring JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_getAllAdfUuids(
JNIEnv* env, jobject) {
return env->NewStringUTF(app.GetAllAdfUuids().c_str());
}
JNIEXPORT void JNICALL
Java_com_projecttango_examples_cpp_helloareadescription_TangoJniNative_deleteAdf(
JNIEnv* env, jobject, jstring uuid) {
std::string uuid_str(env->GetStringUTFChars(uuid, nullptr));
return app.DeleteAdf(uuid_str);
}
#ifdef __cplusplus
}
#endif
| 1,406 |
330 | <gh_stars>100-1000
/*
atom-ui.c
UI implementation for atom.c
*/
#include "chips/m6502.h"
#include "chips/mc6847.h"
#include "chips/i8255.h"
#include "chips/m6522.h"
#include "chips/beeper.h"
#include "chips/clk.h"
#include "chips/kbd.h"
#include "chips/mem.h"
#include "systems/atom.h"
#define UI_DASM_USE_M6502
#define UI_DBG_USE_M6502
#define CHIPS_UTIL_IMPL
#include "util/m6502dasm.h"
#define CHIPS_UI_IMPL
#include "imgui.h"
#include "ui/ui_util.h"
#include "ui/ui_chip.h"
#include "ui/ui_memedit.h"
#include "ui/ui_memmap.h"
#include "ui/ui_dasm.h"
#include "ui/ui_dbg.h"
#include "ui/ui_m6502.h"
#include "ui/ui_m6522.h"
#include "ui/ui_mc6847.h"
#include "ui/ui_i8255.h"
#include "ui/ui_audio.h"
#include "ui/ui_kbd.h"
#include "ui/ui_atom.h"
| 383 |
746 | package org.protege.editor.owl.model.util;
import org.semanticweb.owlapi.model.*;
import javax.annotation.Nonnull;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* <NAME>
* Stanford Center for Biomedical Informatics Research
* 23 Aug 2017
*/
public class ClassDefinitionExtractor {
@Nonnull
private final OWLClass cls;
@Nonnull
private final OWLDataFactory dataFactory;
@Nonnull
private final OWLOntology ontology;
public ClassDefinitionExtractor(@Nonnull OWLClass cls,
@Nonnull OWLOntology ontology,
@Nonnull OWLDataFactory dataFactory) {
this.cls = checkNotNull(cls);
this.dataFactory = checkNotNull(dataFactory);
this.ontology = checkNotNull(ontology);
}
/**
* Gets a list of changes that are necessary to remove the axioms that constitute the logical definition
* of the target class.
* @return A list of changes.
*/
@Nonnull
public List<OWLOntologyChange> getChangesToRemoveDefinition() {
List<OWLOntologyChange> changes = new ArrayList<>();
generateChangesToRemoveDefinitionFromOntology(ontology, changes);
return changes;
}
@Nonnull
public Set<OWLAxiom> getDefiningAxioms() {
Set<OWLAxiom> result = new HashSet<>();
result.addAll(ontology.getSubClassAxiomsForSubClass(cls));
result.addAll(ontology.getEquivalentClassesAxioms(cls));
result.addAll(ontology.getDisjointClassesAxioms(cls));
result.addAll(ontology.getHasKeyAxioms(cls));
result.addAll(ontology.getDisjointUnionAxioms(cls));
return result;
}
private void generateChangesToRemoveDefinitionFromOntology(@Nonnull OWLOntology o,
@Nonnull List<OWLOntologyChange> changes) {
o.getSubClassAxiomsForSubClass(cls).forEach(ax -> changes.add(new RemoveAxiom(o, ax)));
o.getEquivalentClassesAxioms(cls).forEach(ax -> changes.add(new RemoveAxiom(o, ax)));
o.getDisjointClassesAxioms(cls).forEach(ax -> {
changes.add(new RemoveAxiom(o, ax));
Set<OWLClassExpression> remainingDisjointClasses = ax.getClassExpressionsMinus(cls);
if(remainingDisjointClasses.size() > 1) {
OWLAxiom freshAx = dataFactory.getOWLDisjointClassesAxiom(remainingDisjointClasses,
ax.getAnnotations());
changes.add(new AddAxiom(o, freshAx));
}
});
o.getHasKeyAxioms(cls).forEach(ax -> new RemoveAxiom(o, ax));
o.getDisjointUnionAxioms(cls).forEach(ax -> new RemoveAxiom(o, ax));
}
}
| 1,282 |
701 | <gh_stars>100-1000
func = None
def check(_object, name, types):
for _type in types:
if _type is None:
_type = type(None)
if _type is func and callable(_object):
return
try:
if _type.__name__ == "View" and _type.__module__ == "pyto_ui":
if "__py_view__" in dir(_object):
return
except AttributeError:
pass
if isinstance(_object, _type):
return
msg = f"Invalid value type. The '{name}' parameter must be an instance of one of the following types:"
for _type in types:
msg += f"\n{_type}"
raise TypeError(msg)
func = type(check)
| 328 |
2,151 | <reponame>zipated/src<gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_WEBUI_NETWORK_ERROR_URL_LOADER_H_
#define CONTENT_BROWSER_WEBUI_NETWORK_ERROR_URL_LOADER_H_
#include "services/network/public/mojom/url_loader.mojom.h"
namespace content {
// Creates the data for chrome://network-error.
void StartNetworkErrorsURLLoader(const network::ResourceRequest& request,
network::mojom::URLLoaderClientPtr client);
} // namespace content
#endif // CONTENT_BROWSER_WEBUI_NETWORK_ERROR_URL_LOADER_H_
| 260 |
2,725 | // Copyright 2000-2017 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package org.jetbrains.java.decompiler.struct.consts;
import org.jetbrains.java.decompiler.code.CodeConstants;
public class PooledConstant implements CodeConstants {
public final int type;
public PooledConstant(int type) {
this.type = type;
}
public void resolveConstant(ConstantPool pool) { }
} | 140 |
348 | /*
* Copyright (C) 2015 <NAME>, <EMAIL>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sebastian_daschner.jaxrs_analyzer.analysis.project.classes.testclasses;
import com.sebastian_daschner.jaxrs_analyzer.builder.ClassResultBuilder;
import com.sebastian_daschner.jaxrs_analyzer.builder.HttpResponseBuilder;
import com.sebastian_daschner.jaxrs_analyzer.builder.MethodResultBuilder;
import com.sebastian_daschner.jaxrs_analyzer.model.rest.HttpMethod;
import com.sebastian_daschner.jaxrs_analyzer.model.results.ClassResult;
import com.sebastian_daschner.jaxrs_analyzer.model.results.MethodResult;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import java.util.Map;
// not meant to be correct JAX-RS code, just for test purposes
@Path("test")
public class TestClass9 {
@QueryParam("definitions")
private Map<String, Integer> definitions;
@GET
@Path("{info}")
public Response getInfo(final Map<String, String> info) {
return Response.ok().build();
}
public static ClassResult getResult() {
final MethodResult method = MethodResultBuilder.withResponses(HttpResponseBuilder.withStatues(200).build())
.andPath("{info}").andMethod(HttpMethod.GET).andRequestBodyType("Ljava/util/Map<Ljava/lang/String;Ljava/lang/String;>;")
.build();
return ClassResultBuilder.withResourcePath("test").andMethods(method).andQueryParam("definitions", "Ljava/util/Map<Ljava/lang/String;Ljava/lang/Integer;>;").build();
}
}
| 720 |
3,553 | <reponame>areese/fluent-bit
#include <stdlib.h>
#include <fluent-bit/flb_time.h>
#include <fluent-bit/flb_parser.h>
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_error.h>
#include <fluent-bit/flb_socket.h>
#include <fluent-bit/flb_http_client.h>
#include "flb_fuzz_header.h"
extern int fuzz_process_data(struct flb_http_client *c);
extern int fuzz_check_connection(struct flb_http_client *c);
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
{
struct flb_upstream *u;
struct flb_upstream_conn *u_conn = NULL;
struct flb_http_client *c;
struct flb_config *config;
char *uri = NULL;
if (size < 160) {
return 0;
}
config = flb_config_init();
if (config == NULL) {
return 0;
}
u = flb_upstream_create(config, "127.0.0.1", 8001, 0, NULL);
u_conn = flb_malloc(sizeof(struct flb_upstream_conn));
if (u_conn == NULL)
return 0;
u_conn->u = u;
char *proxy = NULL;
if (GET_MOD_EQ(2,1)) {
proxy = get_null_terminated(50, &data, &size);
}
uri = get_null_terminated(20, &data, &size);
int method = (int)data[0];
c = flb_http_client(u_conn, method, uri, NULL, 0,
"127.0.0.1", 8001, proxy, 0);
if (c != NULL) {
char *null_terminated = get_null_terminated(30, &data, &size);
/* Perform a set of operations on the http_client */
flb_http_basic_auth(c, null_terminated, null_terminated);
flb_http_set_content_encoding_gzip(c);
flb_http_set_keepalive(c);
flb_http_strip_port_from_host(c);
flb_http_allow_duplicated_headers(c, 0);
flb_http_buffer_size(c, (*(size_t *)data) & 0xfff);
MOVE_INPUT(4)
flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
flb_http_add_header(c, (char*)data, size, "Fluent-Bit", 10);
flb_http_buffer_size(c, (int)data[0]);
MOVE_INPUT(1)
flb_http_buffer_available(c);
size_t b_sent;
flb_http_do(c, &b_sent);
size_t out_size = 0;
flb_http_buffer_increase(c, (*(size_t *)data) & 0xfff, &out_size);
MOVE_INPUT(4)
/* Now we need to simulate the reading of data */
c->resp.status = 200;
if (c->resp.data != NULL) {
flb_free(c->resp.data);
}
char *new_nulltm = get_null_terminated(30, &data, &size);
c->resp.data_len = 30;
c->resp.data = new_nulltm;
fuzz_process_data(c);
fuzz_check_connection(c);
flb_http_client_destroy(c);
flb_free(null_terminated);
}
/* Now try the http_client_proxy_connect function. */
flb_http_client_proxy_connect(u_conn);
flb_free(u_conn);
flb_upstream_destroy(u);
flb_config_exit(config);
if (uri != NULL) {
flb_free(uri);
}
if (proxy != NULL) {
flb_free(proxy);
}
return 0;
}
| 1,446 |
12,278 | <filename>ReactNativeFrontend/ios/Pods/boost/boost/flyweight/tracking_tag.hpp
/* Copyright 2006-2008 <NAME>.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See http://www.boost.org/libs/flyweight for library home page.
*/
#ifndef BOOST_FLYWEIGHT_TRACKING_TAG_HPP
#define BOOST_FLYWEIGHT_TRACKING_TAG_HPP
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/config.hpp> /* keep it first to prevent nasty warns in MSVC */
#include <boost/parameter/parameters.hpp>
#include <boost/type_traits/is_base_and_derived.hpp>
namespace boost{
namespace flyweights{
/* Three ways to indicate that a given class T is a tracking policy:
* 1. Make it derived from tracking_marker.
* 2. Specialize is_tracking to evaluate to boost::mpl::true_.
* 3. Pass it as tracking<T> when defining a flyweight type.
*/
struct tracking_marker{};
template<typename T>
struct is_tracking:is_base_and_derived<tracking_marker,T>
{};
template<typename T=parameter::void_>
struct tracking:parameter::template_keyword<tracking<>,T>
{};
} /* namespace flyweights */
} /* namespace boost */
#endif
| 421 |
6,036 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cpu/math/hardmax.h"
#include "core/providers/common.h"
#include "core/util/math_cpuonly.h"
#include "core/util/math.h"
#include "core/providers/cpu/tensor/transpose.h"
namespace onnxruntime {
template <>
Status Hardmax<float>::Compute(OpKernelContext* ctx) const {
const auto* X = ctx->Input<Tensor>(0);
const TensorShape& X_shape = X->Shape();
size_t rank = X_shape.NumDimensions();
Tensor* Y = ctx->Output(0, X_shape);
// special case when there is a dim value of 0 in the shape.
if (X_shape.Size() == 0)
return Status::OK();
// handle negative and enforce axis is valid
const size_t axis = static_cast<size_t>(HandleNegativeAxis(axis_, rank));
bool is_transpose_required = false;
Tensor transposed_input;
std::vector<int64_t> transposed_input_dims;
Tensor intermediate_output; // output that the hardmax implementation will write into while using transposed input
std::vector<size_t> permutation(rank);
// The "semantic" meaning of axis has changed in opset-13.
// Please compare: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Hardmax
// with https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Hardmax-11 for detailed explanations
// To account for the opset-13 behavior, our plan will be to transpose the "axis" dim to the innermost dim
// and perform softmax and then reverse the transpose. We can skip the transposing aspect if the axis is already
// the innermost dim
if (opset_ >= 13 && axis != (rank - 1)) {
is_transpose_required = true;
}
if (is_transpose_required) {
AllocatorPtr alloc;
auto status = ctx->GetTempSpaceAllocator(&alloc);
if (!status.IsOK())
return status;
std::iota(std::begin(permutation), std::end(permutation), 0);
// swap the innermost dim with the dim corresponding to axis
permutation[axis] = rank - 1;
permutation[rank - 1] = axis;
transposed_input_dims.reserve(rank);
for (auto e : permutation) {
transposed_input_dims.push_back(X_shape[e]);
}
// Allocate a temporary tensor to hold transposed input
Tensor temp_input(X->DataType(), TensorShape(transposed_input_dims), alloc);
// Perform the transpose
ORT_RETURN_IF_ERROR(TransposeBase::DoTranspose(permutation, *X, temp_input));
transposed_input = std::move(temp_input);
// Allocate memory for the intermediate output
Tensor temp_output(Y->DataType(), TensorShape(transposed_input_dims), alloc);
intermediate_output = std::move(temp_output);
}
size_t tmp_N = is_transpose_required ? TensorShape(transposed_input_dims).SizeToDimension(rank - 1) : X_shape.SizeToDimension(axis);
size_t tmp_D = is_transpose_required ? TensorShape(transposed_input_dims).SizeFromDimension(rank - 1) : X_shape.SizeFromDimension(axis);
// Math::RowwiseMax expects int N and D.
if (tmp_N * tmp_D > INT32_MAX || tmp_N > INT32_MAX || tmp_D > INT32_MAX) {
std::ostringstream ss;
ss << "Hardmax inputs N, D and N * D must be < " << INT32_MAX << ". N=" << tmp_N << ", D=" << tmp_D;
std::string msg = ss.str();
return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, msg);
}
const int N = gsl::narrow_cast<int>(tmp_N);
const int D = gsl::narrow_cast<int>(tmp_D);
std::vector<float> rowmax_(N);
float* rowmax_data = rowmax_.data();
const float* X_data = nullptr;
float* Y_data = nullptr;
if (is_transpose_required) { // use intermediate buffers to compute the hardmax values
X_data = transposed_input.template Data<float>();
Y_data = intermediate_output.template MutableData<float>();
} else { // use the node input/output directly
X_data = X->template Data<float>();
Y_data = Y->template MutableData<float>();
}
math::RowwiseMax<float, CPUMathUtil>(N, D, X_data, rowmax_data, nullptr);
// Even if we had to transpose the input, it is safe to go with X_shape.Size() which computes
// the size of the buffer from the original input's shape as even if we do transpose, the size
// of the transposed buffer will be the same as the original input's buffer
math::Set<float, CPUMathUtil>(X_shape.Size(), 0.f, Y_data, &CPUMathUtil::Instance());
for (int i = 0; i < N; ++i) {
for (int j = 0; j < D; ++j) {
if (X_data[i * D + j] == rowmax_data[i]) {
Y_data[i * D + j] = 1;
break;
}
}
}
if (is_transpose_required) {
std::vector<size_t> reverse_permutation(rank);
for (size_t i = 0, end = permutation.size(); i < end; ++i) {
reverse_permutation[permutation[i]] = i;
}
// Perform the transpose to get the axes back to the original ordering
ORT_RETURN_IF_ERROR(TransposeBase::DoTranspose(reverse_permutation, intermediate_output, *Y));
}
return Status::OK();
}
ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
Hardmax,
1,
10,
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
Hardmax<float>);
// Opset 11 starts to support Neg Axis.
ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
Hardmax,
11,
12,
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
Hardmax<float>);
// Opset 13 changed the semantic meaning of the axis attribute.
ONNX_CPU_OPERATOR_KERNEL(
Hardmax,
13,
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
Hardmax<float>);
} // namespace onnxruntime
| 1,987 |
2,333 | //
// Licensed under the terms in License.txt
//
// Copyright 2010 <NAME>. All rights reserved.
//
#import "KiwiConfiguration.h"
#import "KWMatcher.h"
@interface KWBeEmptyMatcher : KWMatcher
#pragma mark - Configuring Matchers
- (void)beEmpty;
@end
| 90 |
1,118 | import logging
import os
import platform
import sys
import warnings
from time import time
import psutil
from pathlib2 import Path
from typing import Text
from .process.mp import BackgroundMonitor
from ..backend_api import Session
from ..binding.frameworks.tensorflow_bind import IsTensorboardInit
try:
from .gpu import gpustat
except ImportError:
gpustat = None
class ResourceMonitor(BackgroundMonitor):
_title_machine = ':monitor:machine'
_title_gpu = ':monitor:gpu'
def __init__(self, task, sample_frequency_per_sec=2., report_frequency_sec=30.,
first_report_sec=None, wait_for_first_iteration_to_start_sec=180.0,
max_wait_for_first_iteration_to_start_sec=1800., report_mem_used_per_process=True):
super(ResourceMonitor, self).__init__(task=task, wait_period=sample_frequency_per_sec)
self._task = task
self._sample_frequency = sample_frequency_per_sec
self._report_frequency = report_frequency_sec
self._first_report_sec = first_report_sec or report_frequency_sec
self.wait_for_first_iteration = wait_for_first_iteration_to_start_sec
self.max_check_first_iteration = max_wait_for_first_iteration_to_start_sec
self._num_readouts = 0
self._readouts = {}
self._previous_readouts = {}
self._previous_readouts_ts = time()
self._gpustat_fail = 0
self._gpustat = gpustat
self._active_gpus = None
self._process_info = psutil.Process() if report_mem_used_per_process else None
self._last_process_pool = {}
self._last_process_id_list = []
if not self._gpustat:
self._task.get_logger().report_text('ClearML Monitor: GPU monitoring is not available')
else: # if running_remotely():
# noinspection PyBroadException
try:
active_gpus = os.environ.get('NVIDIA_VISIBLE_DEVICES', '') or \
os.environ.get('CUDA_VISIBLE_DEVICES', '')
if active_gpus:
self._active_gpus = [int(g.strip()) for g in active_gpus.split(',')]
except Exception:
pass
def daemon(self):
seconds_since_started = 0
reported = 0
last_iteration = 0
fallback_to_sec_as_iterations = None
# get max GPU ID, and make sure our active list is within range
if self._active_gpus:
# noinspection PyBroadException
try:
gpu_stat = self._gpustat.new_query()
if max(self._active_gpus) > len(gpu_stat.gpus) - 1:
self._active_gpus = None
except Exception:
pass
# add Task runtime_properties with the machine spec
if Session.check_min_api_version('2.13'):
try:
machine_spec = self._get_machine_specs()
if machine_spec:
# noinspection PyProtectedMember
self._task._set_runtime_properties(runtime_properties=machine_spec)
except Exception as ex:
logging.getLogger('clearml.resource_monitor').debug(
'Failed logging machine specification: {}'.format(ex))
# last_iteration_interval = None
# last_iteration_ts = 0
# repeated_iterations = 0
while True:
last_report = time()
current_report_frequency = self._report_frequency if reported != 0 else self._first_report_sec
while (time() - last_report) < current_report_frequency:
# wait for self._sample_frequency seconds, if event set quit
if self._event.wait(1.0 / self._sample_frequency):
return
# noinspection PyBroadException
try:
self._update_readouts()
except Exception:
pass
seconds_since_started += int(round(time() - last_report))
# check if we do not report any metric (so it means the last iteration will not be changed)
if fallback_to_sec_as_iterations is None:
if IsTensorboardInit.tensorboard_used():
fallback_to_sec_as_iterations = False
elif seconds_since_started >= self.wait_for_first_iteration:
self._task.get_logger().report_text('ClearML Monitor: Could not detect iteration reporting, '
'falling back to iterations as seconds-from-start')
fallback_to_sec_as_iterations = True
elif fallback_to_sec_as_iterations is True and seconds_since_started <= self.max_check_first_iteration:
if self._check_logger_reported():
fallback_to_sec_as_iterations = False
self._task.get_logger().report_text('ClearML Monitor: Reporting detected, '
'reverting back to iteration based reporting')
clear_readouts = True
# if we do not have last_iteration, we just use seconds as iteration
if fallback_to_sec_as_iterations:
iteration = seconds_since_started
else:
iteration = self._task.get_last_iteration()
if iteration < last_iteration:
# we started a new session?!
# wait out
clear_readouts = False
iteration = last_iteration
elif iteration == last_iteration:
# repeated_iterations += 1
# if last_iteration_interval:
# # to be on the safe side, we don't want to pass the actual next iteration
# iteration += int(0.95*last_iteration_interval[0] * (seconds_since_started - last_iteration_ts)
# / last_iteration_interval[1])
# else:
# iteration += 1
clear_readouts = False
iteration = last_iteration
else:
# last_iteration_interval = (iteration - last_iteration, seconds_since_started - last_iteration_ts)
# repeated_iterations = 0
# last_iteration_ts = seconds_since_started
last_iteration = iteration
fallback_to_sec_as_iterations = False
clear_readouts = True
# start reporting only when we figured out, if this is seconds based, or iterations based
average_readouts = self._get_average_readouts()
if fallback_to_sec_as_iterations is not None:
for k, v in average_readouts.items():
# noinspection PyBroadException
try:
title = self._title_gpu if k.startswith('gpu_') else self._title_machine
# 3 points after the dot
value = round(v * 1000) / 1000.
self._task.get_logger().report_scalar(title=title, series=k, iteration=iteration, value=value)
except Exception:
pass
# clear readouts if this is update is not averaged
if clear_readouts:
self._clear_readouts()
# count reported iterations
reported += 1
def _update_readouts(self):
readouts = self._machine_stats()
elapsed = time() - self._previous_readouts_ts
self._previous_readouts_ts = time()
for k, v in readouts.items():
# cumulative measurements
if k.endswith('_mbs'):
v = (v - self._previous_readouts.get(k, v)) / elapsed
self._readouts[k] = self._readouts.get(k, 0.0) + v
self._num_readouts += 1
self._previous_readouts = readouts
def _get_num_readouts(self):
return self._num_readouts
def _get_average_readouts(self):
average_readouts = dict((k, v / float(self._num_readouts)) for k, v in self._readouts.items())
return average_readouts
def _clear_readouts(self):
self._readouts = {}
self._num_readouts = 0
def _machine_stats(self):
"""
:return: machine stats dictionary, all values expressed in megabytes
"""
cpu_usage = [float(v) for v in psutil.cpu_percent(percpu=True)]
stats = {
"cpu_usage": sum(cpu_usage) / float(len(cpu_usage)),
}
bytes_per_megabyte = 1024 ** 2
def bytes_to_megabytes(x):
return x / bytes_per_megabyte
virtual_memory = psutil.virtual_memory()
# stats["memory_used_gb"] = bytes_to_megabytes(virtual_memory.used) / 1024
stats["memory_used_gb"] = bytes_to_megabytes(
self._get_process_used_memory() if self._process_info else virtual_memory.used) / 1024
stats["memory_free_gb"] = bytes_to_megabytes(virtual_memory.available) / 1024
disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
stats["disk_free_percent"] = 100.0 - disk_use_percentage
with warnings.catch_warnings():
if logging.root.level > logging.DEBUG: # If the logging level is bigger than debug, ignore
# psutil.sensors_temperatures warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
sensor_stat = (psutil.sensors_temperatures() if hasattr(psutil, "sensors_temperatures") else {})
if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
stats["cpu_temperature"] = max([float(t.current) for t in sensor_stat["coretemp"]])
# update cached measurements
net_stats = psutil.net_io_counters()
stats["network_tx_mbs"] = bytes_to_megabytes(net_stats.bytes_sent)
stats["network_rx_mbs"] = bytes_to_megabytes(net_stats.bytes_recv)
io_stats = psutil.disk_io_counters()
stats["io_read_mbs"] = bytes_to_megabytes(io_stats.read_bytes)
stats["io_write_mbs"] = bytes_to_megabytes(io_stats.write_bytes)
# check if we can access the gpu statistics
if self._gpustat:
# noinspection PyBroadException
try:
stats.update(self._get_gpu_stats())
except Exception:
# something happened and we can't use gpu stats,
self._gpustat_fail += 1
if self._gpustat_fail >= 3:
self._task.get_logger().report_text('ClearML Monitor: GPU monitoring failed getting GPU reading, '
'switching off GPU monitoring')
self._gpustat = None
return stats
def _check_logger_reported(self):
titles = self.get_logger_reported_titles(self._task)
return len(titles) > 0
@classmethod
def get_logger_reported_titles(cls, task):
# noinspection PyProtectedMember
titles = list(task.get_logger()._get_used_title_series().keys())
try:
titles.remove(cls._title_machine)
except ValueError:
pass
try:
titles.remove(cls._title_gpu)
except ValueError:
pass
return titles
def _get_process_used_memory(self):
def mem_usage_children(a_mem_size, pr, parent_mem=None):
self._last_process_id_list.append(pr.pid)
# add out memory usage
our_mem = pr.memory_info()
mem_diff = our_mem.rss - parent_mem.rss if parent_mem else our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
# now we are the parent
for child in pr.children():
# get the current memory
m = pr.memory_info()
mem_diff = m.rss - our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
a_mem_size = mem_usage_children(a_mem_size, child, parent_mem=m)
return a_mem_size
# only run the memory usage query once per reporting period
# because this memory query is relatively slow, and changes very little.
if self._last_process_pool.get('cpu') and \
(time() - self._last_process_pool['cpu'][0]) < self._report_frequency:
return self._last_process_pool['cpu'][1]
# if we have no parent process, return 0 (it's an error)
if not self._process_info:
return 0
self._last_process_id_list = []
mem_size = mem_usage_children(0, self._process_info)
self._last_process_pool['cpu'] = time(), mem_size
return mem_size
def _get_gpu_stats(self):
if not self._gpustat:
return {}
# per process memory query id slow, so we only call it once per reporting period,
# On the rest of the samples we return the previous memory measurement
# update mem used by our process and sub processes
if self._process_info and (not self._last_process_pool.get('gpu') or
(time() - self._last_process_pool['gpu'][0]) >= self._report_frequency):
gpu_stat = self._gpustat.new_query(per_process_stats=True)
gpu_mem = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and i not in self._active_gpus:
continue
gpu_mem[i] = 0
for p in g.processes:
if p['pid'] in self._last_process_id_list:
gpu_mem[i] += p.get('gpu_memory_usage', 0)
self._last_process_pool['gpu'] = time(), gpu_mem
else:
# if we do no need to update the memory usage, run global query
# if we have no parent process (backward compatibility), return global stats
gpu_stat = self._gpustat.new_query()
gpu_mem = self._last_process_pool['gpu'][1] if self._last_process_pool.get('gpu') else None
# generate the statistics dict for actual report
stats = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and i not in self._active_gpus:
continue
stats["gpu_%d_temperature" % i] = float(g["temperature.gpu"])
stats["gpu_%d_utilization" % i] = float(g["utilization.gpu"])
stats["gpu_%d_mem_usage" % i] = 100. * float(g["memory.used"]) / float(g["memory.total"])
# already in MBs
stats["gpu_%d_mem_free_gb" % i] = float(g["memory.total"] - g["memory.used"]) / 1024
# use previously sampled process gpu memory, or global if it does not exist
stats["gpu_%d_mem_used_gb" % i] = float(gpu_mem[i] if gpu_mem else g["memory.used"]) / 1024
return stats
def _get_machine_specs(self):
# type: () -> dict
specs = {}
# noinspection PyBroadException
try:
specs = {
'platform': str(sys.platform),
'python_version': str(platform.python_version()),
'python_exec': str(sys.executable),
'OS': str(platform.platform(aliased=True)),
'processor': str(platform.machine()),
'cpu_cores': int(psutil.cpu_count()),
'memory_gb': round(psutil.virtual_memory().total / 1024 ** 3, 1),
'hostname': str(platform.node()),
'gpu_count': 0,
}
if self._gpustat:
gpu_stat = self._gpustat.new_query(shutdown=True, get_driver_info=True)
if gpu_stat.gpus:
gpus = [g for i, g in enumerate(gpu_stat.gpus) if not self._active_gpus or i in self._active_gpus]
specs.update(
gpu_count=int(len(gpus)),
gpu_type=', '.join(g.name for g in gpus),
gpu_memory=', '.join('{}GB'.format(round(g.memory_total/1024.0)) for g in gpus),
gpu_driver_version=gpu_stat.driver_version or '',
gpu_driver_cuda_version=gpu_stat.driver_cuda_version or '',
)
except Exception:
pass
return specs
| 7,894 |
339 | package com.github.vbauer.jackdaw.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
*
* <p>Java annotation processors and other systems use ServiceLoader to register implementations of well-known types
* using META-INF metadata. However, it is easy for a developer to forget to update or correctly specify the service
* descriptors. Metadata will be generated for any class annotated with @JService.</p>
*
* Example:
* <pre>{@code
* public interface BaseType {}
*
* @JService(BaseType.class)
* public class TypeA implements BaseType {}
*
* @JService(BaseType.class)
* public class TypeB implements BaseType {}
* }</pre>
*
* Generated file `META-INF/services/BaseType`:
* <pre>
* TypeA
* TypeB
* </pre>
* @author <NAME>
*/
@Documented
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @interface JService {
/**
* Interface implemented by this service provider.
* @return interface class
*/
Class<?> value();
}
| 356 |
849 | <gh_stars>100-1000
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from enum import Enum
from lte.protos.pipelined_pb2 import SetupFlowsResult
from magma.pipelined.bridge_util import BridgeTools, DatapathLookupError
from magma.pipelined.metrics import OPENFLOW_ERROR_MSG
from magma.pipelined.openflow.exceptions import MagmaOFError
from ryu import utils
from ryu.base import app_manager
from ryu.controller import dpset, ofp_event
from ryu.controller.handler import (
CONFIG_DISPATCHER,
HANDSHAKE_DISPATCHER,
MAIN_DISPATCHER,
set_ev_cls,
)
from ryu.ofproto import ofproto_v1_4
global_epoch = int(time.time())
class ControllerType(Enum):
PHYSICAL = 1
LOGICAL = 2
SPECIAL = 3
class ControllerNotReadyException(Exception):
pass
class MagmaController(app_manager.RyuApp):
"""
The base class for all MagmaControllers. Does not itself manage any tables,
but instead handles shared state for subclass controllers.
Applications should subclass this and can own some number of tables to
implement their own logic.
"""
# Inherited from RyuApp base class
OFP_VERSIONS = [ofproto_v1_4.OFP_VERSION]
# App name that should be overridden by the controller implementation
APP_NAME = ""
def __init__(self, service_manager, *args, **kwargs):
""" Try to lookup the datapath_id of the bridge to run the app on """
super(MagmaController, self).__init__(*args, **kwargs)
self._app_futures = kwargs['app_futures']
try:
self._datapath_id = BridgeTools.get_datapath_id(
kwargs['config']['bridge_name'],
)
except DatapathLookupError as e:
self.logger.error(
'Exception in %s contoller: %s', self.APP_NAME, e,
)
raise
if 'controller_port' in kwargs['config']:
self.CONF.ofp_tcp_listen_port = kwargs['config']['controller_port']
self._service_manager = service_manager
self._startup_flow_controller = None
self._startup_flows_fut = kwargs['app_futures']['startup_flows']
self.init_finished = False
@set_ev_cls(
ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER],
)
def record_of_errors(self, ev):
msg = ev.msg
self.logger.error(
"OF Error: type=0x%02x code=0x%02x "
"message=%s",
msg.type, msg.code, utils.hex_array(msg.data),
)
OPENFLOW_ERROR_MSG.labels(
error_type="0x%02x" % msg.type,
error_code="0x%02x" % msg.code,
).inc()
@set_ev_cls(dpset.EventDP, MAIN_DISPATCHER)
def datapath_event_handler(self, ev):
"""
This event handler is called on datapath connect and disconnect
Check datapath_id in case of multiple bridges
Args:
ev (dpset.EventDP): ryu event for connect/disconnect
"""
datapath = ev.dp
if self._datapath_id != datapath.id:
return
try:
if ev.enter:
self.initialize_on_connect(datapath)
# set a barrier to ensure things are applied
if self.APP_NAME in self._app_futures:
self._app_futures[self.APP_NAME].set_result(self)
else:
self.cleanup_on_disconnect(datapath)
except MagmaOFError as e:
act = 'initializing' if ev.enter else 'cleaning'
self.logger.error(
'Error %s %s flow rules: %s', act, self.APP_NAME, e,
)
def check_setup_request_epoch(self, epoch):
"""
Check if the controller is ready to be initialized after restart,
returns: status code if epoch is invalid/controller is initialized
None if controller can be initialized
"""
self.logger.info(
"Received Setup request with epoch - %d, current "
"epoch is - %d", epoch, global_epoch,
)
if epoch != global_epoch:
self.logger.warning(
"Received SetupFlowsRequest has outdated epoch - %d, current "
"epoch is - %d.", epoch, global_epoch,
)
return SetupFlowsResult.OUTDATED_EPOCH
if self._datapath is None:
self.logger.warning("Datapath not initilized, setup failed")
return SetupFlowsResult.FAILURE
if self.init_finished:
self.logger.warning('Controller already initialized, ignoring')
return SetupFlowsResult.SUCCESS
return None
def is_controller_ready(self):
"""
Check if the controller is setup & ready to process requests
"""
return self._datapath and self.init_finished
def initialize_on_connect(self, datapath):
"""
Initialize the app on the datapath connect event.
Subclasses can override this method to init default flows for
the table that they handle.
"""
pass
def cleanup_on_disconnect(self, datapath):
"""
Cleanup the app on the datapath disconnect event.
Subclasses can override this method to cleanup flows for
the table that they handle.
"""
pass
def delete_all_flows(self, datapath):
"""
Delete all flows in tables that the controller is responsible for.
"""
pass
| 2,532 |
14,668 | <filename>testing/libfuzzer/fuzzers/mach/mach_message_converter.h<gh_stars>1000+
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TESTING_LIBFUZZER_FUZZERS_MACH_MACH_MESSAGE_CONVERTER_H_
#define TESTING_LIBFUZZER_FUZZERS_MACH_MACH_MESSAGE_CONVERTER_H_
#include <mach/mach.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "base/mac/scoped_mach_port.h"
#include "testing/libfuzzer/fuzzers/mach/mach_message.pb.h"
namespace mach_fuzzer {
// Container for a Mach port right that will be sent in a message.
struct SendablePort {
mach_port_t name = MACH_PORT_NULL;
mach_msg_type_name_t disposition = 0;
MachPortType proto_type = static_cast<MachPortType>(-1);
base::mac::ScopedMachSendRight send_right;
base::mac::ScopedMachReceiveRight receive_right;
};
// Holds the buffer allocation and port references for a message to be sent.
struct SendableMessage {
// The message buffer.
std::unique_ptr<uint8_t[]> buffer;
// The |ports| are also encoded into the body of the message, but they are
// accessible here to allow for further manipulation.
std::vector<SendablePort> ports;
// Pointer to the header of the message stored in |buffer|.
mach_msg_header_t* header = nullptr;
};
// Converts the given protobuf message into a live Mach message, including port
// rights.
SendableMessage ConvertProtoToMachMessage(const MachMessage& proto);
// Takes the protobuf |proto|, converts it to a Mach message using
// ConvertProtoToMachMessage(), and then sends it via |local_port|. The port
// named by |local_port| must have a send right, which will be copied.
struct SendResult {
// The return value from mach_msg_send().
kern_return_t kr;
// The message that was sent, including its descriptors. This allows callers
// to control the lifetimes of any Mach rights after the message has been
// sent.
SendableMessage message;
};
SendResult SendMessage(mach_port_t local_port, const MachMessage& proto);
} // namespace mach_fuzzer
#endif // TESTING_LIBFUZZER_FUZZERS_MACH_MACH_MESSAGE_CONVERTER_H_
| 713 |
352 | package helloworld.behavioral.template_method;
/**
* @author <EMAIL>
*/
public class TemplateMethodHelloWorld extends AbstractHelloWorld{
@Override
public String getInterjection() {
return "Hello";
}
@Override
public String getSeparator() {
return " ";
}
@Override
public String getObject() {
return "Template Method";
}
@Override
public String getTerminator() {
return "!";
}
}
| 177 |
2,360 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOsloUtils(PythonPackage):
"""
The oslo.utils library provides support for common utility type functions,
such as encoding, exception handling, string manipulation, and time
handling.
"""
homepage = "https://docs.openstack.org/oslo.utils/"
pypi = "oslo.utils/oslo.utils-4.9.2.tar.gz"
maintainers = ['haampie']
version('4.9.2', sha256='20db285734ff6c3b50d5a6afcb2790ade0c7ba02fbc876feed43733f2c41a5c9')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.0,2.1.1:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| 563 |
404 | import os
import ycm_core
import ntpath
flags = [
'-DNANOPRINTF_IMPLEMENTATION',
'-DNANOPRINTF_USE_FIELD_WIDTH_FORMAT_SPECIFIERS=1',
'-DNANOPRINTF_USE_PRECISION_FORMAT_SPECIFIERS=1',
'-DNANOPRINTF_USE_FLOAT_FORMAT_SPECIFIERS=1',
'-DNANOPRINTF_USE_LARGE_FORMAT_SPECIFIERS=1',
'-DNANOPRINTF_USE_WRITEBACK_FORMAT_SPECIFIERS=1',
'-Wall',
'-Wextra',
'-Wall',
'-Weverything',
'-Wpedantic',
'-ansi',
'-Wno-c++98-compat-pedantic '
'-Wno-c++11-long-long',
'-Wno-reserved-id-macro',
'-Wno-old-style-cast',
'-Wno-keyword-macro',
'-Wno-disabled-macro-expansion',
'-Wno-weak-vtables',
'-Wno-global-constructors',
'-Wno-exit-time-destructors',
'-Wno-padded',
'-I', 'external/CppUTest/src/CppUTest_external/include'
]
cpp_flags = [ '-std=c++11', '-x', 'c++' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def PathLeaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def FlagsForFile( filename, **kwargs ):
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to) + cpp_flags
return {
'flags': final_flags,
'do_cache': True
}
| 913 |
72,551 | <filename>include/swift/Remote/TypeInfoProvider.h<gh_stars>1000+
//===--- TypeInfoProvider.h - Abstract access to type info ------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2020 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file declares an abstract interface for reading type layout info.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_REMOTE_TYPEINFOPROVIDER_H
#define SWIFT_REMOTE_TYPEINFOPROVIDER_H
namespace swift {
namespace reflection {
class TypeInfo;
}
namespace remote {
/// An abstract interface for providing external type layout information.
struct TypeInfoProvider {
virtual ~TypeInfoProvider() = default;
/// Attempt to read type information about (Clang)imported types that are not
/// represented in the metadata. LLDB can read this information from debug
/// info, for example.
virtual const reflection::TypeInfo *
getTypeInfo(llvm::StringRef mangledName) = 0;
};
} // namespace remote
} // namespace swift
#endif
| 353 |
1,466 | <gh_stars>1000+
/**
* Copyright 2004-present, Facebook, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.profilo.writer;
import com.facebook.jni.HybridData;
import com.facebook.profilo.mmapbuf.core.Buffer;
import com.facebook.proguard.annotations.DoNotStrip;
import com.facebook.soloader.SoLoader;
import javax.annotation.Nullable;
@DoNotStrip
public final class NativeTraceWriter {
static {
SoLoader.loadLibrary("profilo");
}
public static String getSanitizedTraceFolderName(String trace_id) {
return trace_id.replaceAll("[^a-zA-Z0-9\\-_.]", "_");
}
@DoNotStrip private HybridData mHybridData;
public NativeTraceWriter(
Buffer buffer,
String traceFolder,
String tracePrefix,
@Nullable NativeTraceWriterCallbacks callbacks) {
mHybridData = initHybrid(buffer, traceFolder, tracePrefix, callbacks);
}
private static native HybridData initHybrid(
Buffer buffer,
String traceFolder,
String tracePrefix,
@Nullable NativeTraceWriterCallbacks callbacks);
public native void loop();
public native void dump(long trace_id);
public native String getTraceFolder(long traceID);
}
| 533 |
3,834 | <filename>engine/runtime/src/main/java/org/enso/interpreter/node/expression/builtin/warning/GetReassignmentsNode.java
package org.enso.interpreter.node.expression.builtin.warning;
import com.oracle.truffle.api.TruffleLanguage;
import com.oracle.truffle.api.nodes.Node;
import org.enso.interpreter.dsl.BuiltinMethod;
import org.enso.interpreter.runtime.Context;
import org.enso.interpreter.runtime.data.Array;
import org.enso.interpreter.runtime.error.Warning;
import java.util.Arrays;
import java.util.Comparator;
@BuiltinMethod(
type = "Prim_Warning",
name = "get_reassignments",
description = "Gets the list of locations where the warnings was reassigned.")
public class GetReassignmentsNode extends Node {
Array execute(Object _this, Warning warning) {
Warning.Reassignment[] reassignments =
warning.getReassignments().toArray(Warning.Reassignment[]::new);
return new Array(Arrays.copyOf(reassignments, reassignments.length, Object[].class));
}
}
| 337 |
1,236 | // The MIT License (MIT)
// Copyright (c) 2016, Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#include <cstdint>
#include <stddef.h> // For ::size_t
#include "NativeJIT/CodeGen/JumpTable.h" // Label parameter and return value.
#include "Temporary/Assert.h"
#include "Temporary/NonCopyable.h" // Base class.
namespace Allocators
{
class IAllocator;
}
namespace NativeJIT
{
class CodeBuffer : public NonCopyable
{
public:
// Allocates a buffer from the code allocator. If the code inside
// the buffer is to be executed (and not just f. ex. transferred over the
// network), the allocator must return memory that is executable (see
// ExecutionBuffer class for an example).
CodeBuffer(Allocators::IAllocator& codeAllocator, unsigned capacity);
// Frees the buffer.
virtual ~CodeBuffer();
// Allocating and resolving jump labels.
// Use AllocateLabel() at any time to allocated a label representing a jump target.
// Use PlaceLabel() to associate the current buffer position with a label.
// Once all code generation is done, invoke Finalize() to patch all of the call
// sites with jump targets. Note that all allocated labels must be placed before
// calling Finalize().
Label AllocateLabel();
virtual void PlaceLabel(Label label);
// Writes a byte to the current position in the buffer.
void Emit8(uint8_t x);
// WARNING: Non portable. Assumes little endian machine architecture.
void Emit16(uint16_t x);
// WARNING: Non portable. Assumes little endian machine architecture.
void Emit32(uint32_t x);
// WARNING: Non portable. Assumes little endian machine architecture.
void Emit64(uint64_t x);
// Copies the provided data to the current position in the buffer.
void EmitBytes(uint8_t const *data, unsigned length);
// Writes the bits of the argument to the current position in the buffer.
// WARNING: Non portable. Assumes little endian machine architecture.
template <typename T>
void EmitBytes(T x);
// Replaces the contents of the buffer starting at startPosition and
// length bytes long with specified data. The portion of the buffer that
// will be changed with this call must already have been filled in
// (i.e. CurrentPosition() <= startPosition + length). Besides the buffer
// contents, no other CodeBuffer properties get modified.
void ReplaceBytes(unsigned startPosition, uint8_t const *data, unsigned length);
// Return the size of the buffer, in bytes.
unsigned GetCapacity() const;
// Returns the address of the start of the buffer.
// WARNING: Depending on how this class is used, this may not be the
// entry point to a function that's being built inside the buffer.
// F. ex. FunctionBuffer provides the GetEntryPoint() method to retrieve
// the function pointer.
uint8_t* BufferStart() const;
// Return the offset of the current write position in the buffer.
unsigned CurrentPosition() const;
virtual void Reset();
// Advances the current write position by byteCount and returns a pointer to the write position
// before advancing.
uint8_t* Advance(int byteCount);
template <typename T>
void AdvanceToAlignment();
void Fill(unsigned start, unsigned length, uint8_t value);
// Patches each call site with the correct offset derived from its resolved label.
void PatchCallSites();
protected:
void EmitCallSite(Label label, unsigned size);
private:
Allocators::IAllocator& m_codeAllocator;
unsigned m_capacity;
uint8_t* m_bufferStart;
uint8_t* m_bufferEnd;
uint8_t* m_current;
JumpTable m_localJumpTable; // Jumps within a single CodeBuffer.
// Verifies that the specified length can be written to the buffer.
// Throws if buffer overflow would occur.
void VerifyNoBufferOverflow(unsigned length);
};
//*************************************************************************
//
// Template and inline definitions for CodeBuffer.
//
//*************************************************************************
inline void CodeBuffer::VerifyNoBufferOverflow(unsigned length)
{
LogThrowAssert(m_current + length - 1 < m_bufferEnd,
"CodeBuffer overflow, wanted %u bytes, only %u out of %u bytes available",
length,
static_cast<unsigned>(m_bufferEnd - m_current),
m_capacity);
}
template <typename T>
void CodeBuffer::EmitBytes(T x)
{
static_assert(std::is_trivial<T>::value, "Invalid variable type.");
const size_t varSize = sizeof(T);
VerifyNoBufferOverflow(varSize);
*reinterpret_cast<T*>(m_current) = x;
m_current += varSize;
}
template <typename T>
void CodeBuffer::AdvanceToAlignment()
{
while ( (CurrentPosition() % sizeof(T)) != 0)
{
Emit8(0xaa);
}
}
}
| 2,223 |
2,577 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.model.xml.test;
import static org.camunda.bpm.model.xml.test.assertions.ModelAssertions.assertThat;
import static org.junit.Assert.fail;
import java.util.Collection;
import org.camunda.bpm.model.xml.Model;
import org.camunda.bpm.model.xml.ModelInstance;
import org.camunda.bpm.model.xml.impl.type.ModelElementTypeImpl;
import org.camunda.bpm.model.xml.impl.util.ModelTypeException;
import org.camunda.bpm.model.xml.instance.ModelElementInstance;
import org.camunda.bpm.model.xml.test.assertions.AttributeAssert;
import org.camunda.bpm.model.xml.test.assertions.ChildElementAssert;
import org.camunda.bpm.model.xml.test.assertions.ModelElementTypeAssert;
import org.camunda.bpm.model.xml.type.ModelElementType;
import org.junit.Test;
import org.w3c.dom.DOMException;
public abstract class AbstractModelElementInstanceTest {
protected class TypeAssumption {
public final String namespaceUri;
public final ModelElementType extendsType;
public final boolean isAbstract;
public TypeAssumption(boolean isAbstract) {
this(getDefaultNamespace(), isAbstract);
}
public TypeAssumption(String namespaceUri, boolean isAbstract) {
this(namespaceUri, null, isAbstract);
}
public TypeAssumption(Class<? extends ModelElementInstance> extendsType, boolean isAbstract) {
this(getDefaultNamespace(), extendsType, isAbstract);
}
public TypeAssumption(String namespaceUri, Class<? extends ModelElementInstance> extendsType, boolean isAbstract) {
this.namespaceUri = namespaceUri;
this.extendsType = model.getType(extendsType);
this.isAbstract = isAbstract;
}
}
protected class ChildElementAssumption {
public final String namespaceUri;
public final ModelElementType childElementType;
public final int minOccurs;
public final int maxOccurs;
public ChildElementAssumption(Class<? extends ModelElementInstance> childElementType) {
this(childElementType, 0, -1);
}
public ChildElementAssumption(String namespaceUri, Class<? extends ModelElementInstance> childElementType) {
this(namespaceUri, childElementType, 0, -1);
}
public ChildElementAssumption(Class<? extends ModelElementInstance> childElementType, int minOccurs) {
this(childElementType, minOccurs, -1);
}
public ChildElementAssumption(String namespaceUri, Class<? extends ModelElementInstance> childElementType, int minOccurs) {
this(namespaceUri, childElementType, minOccurs, -1);
}
public ChildElementAssumption(Class<? extends ModelElementInstance> childElementType, int minOccurs, int maxOccurs) {
this(getDefaultNamespace(), childElementType, minOccurs, maxOccurs);
}
public ChildElementAssumption(String namespaceUri, Class<? extends ModelElementInstance> childElementType, int minOccurs, int maxOccurs) {
this.namespaceUri = namespaceUri;
this.childElementType = model.getType(childElementType);
this.minOccurs = minOccurs;
this.maxOccurs = maxOccurs;
}
}
protected class AttributeAssumption {
public final String attributeName;
public final String namespace;
public final boolean isIdAttribute;
public final boolean isRequired;
public final Object defaultValue;
public AttributeAssumption(String attributeName) {
this(attributeName, false, false);
}
public AttributeAssumption(String namespace, String attributeName) {
this(namespace, attributeName, false, false);
}
public AttributeAssumption(String attributeName, boolean isIdAttribute) {
this(attributeName, isIdAttribute, false);
}
public AttributeAssumption(String namespace, String attributeName, boolean isIdAttribute) {
this(namespace, attributeName, isIdAttribute, false);
}
public AttributeAssumption(String attributeName, boolean isIdAttribute, boolean isRequired) {
this(attributeName, isIdAttribute, isRequired, null);
}
public AttributeAssumption(String namespace, String attributeName, boolean isIdAttribute, boolean isRequired) {
this(namespace, attributeName, isIdAttribute, isRequired, null);
}
public AttributeAssumption(String attributeName, boolean isIdAttribute, boolean isRequired, Object defaultValue) {
this(null, attributeName, isIdAttribute, isRequired, defaultValue);
}
public AttributeAssumption(String namespace, String attributeName, boolean isIdAttribute, boolean isRequired, Object defaultValue) {
this.attributeName = attributeName;
this.namespace = namespace;
this.isIdAttribute = isIdAttribute;
this.isRequired = isRequired;
this.defaultValue = defaultValue;
}
}
public static ModelInstance modelInstance;
public static Model model;
public static ModelElementType modelElementType;
public static void initModelElementType(GetModelElementTypeRule modelElementTypeRule) {
modelInstance = modelElementTypeRule.getModelInstance();
model = modelElementTypeRule.getModel();
modelElementType = modelElementTypeRule.getModelElementType();
assertThat(modelInstance).isNotNull();
assertThat(model).isNotNull();
assertThat(modelElementType).isNotNull();
}
public abstract String getDefaultNamespace();
public abstract TypeAssumption getTypeAssumption();
public abstract Collection<ChildElementAssumption> getChildElementAssumptions();
public abstract Collection<AttributeAssumption> getAttributesAssumptions();
public ModelElementTypeAssert assertThatType() {
return assertThat(modelElementType);
}
public AttributeAssert assertThatAttribute(String attributeName) {
return assertThat(modelElementType.getAttribute(attributeName));
}
public ChildElementAssert assertThatChildElement(ModelElementType childElementType) {
ModelElementTypeImpl modelElementTypeImpl = (ModelElementTypeImpl) modelElementType;
return assertThat(modelElementTypeImpl.getChildElementCollection(childElementType));
}
public ModelElementType getType(Class<? extends ModelElementInstance> instanceClass) {
return model.getType(instanceClass);
}
@Test
public void testType() {
assertThatType().isPartOfModel(model);
TypeAssumption assumption = getTypeAssumption();
assertThatType().hasTypeNamespace(assumption.namespaceUri);
if (assumption.isAbstract) {
assertThatType().isAbstract();
}
else {
assertThatType().isNotAbstract();
}
if (assumption.extendsType == null) {
assertThatType().extendsNoType();
}
else {
assertThatType().extendsType(assumption.extendsType);
}
if (assumption.isAbstract) {
try {
modelInstance.newInstance(modelElementType);
fail("Element type " + modelElementType.getTypeName() + " is abstract.");
}
catch (DOMException e) {
// expected exception
}
catch (ModelTypeException e) {
// expected exception
}
catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
}
else {
ModelElementInstance modelElementInstance = modelInstance.newInstance(modelElementType);
assertThat(modelElementInstance).isNotNull();
}
}
@Test
public void testChildElements() {
Collection<ChildElementAssumption> childElementAssumptions = getChildElementAssumptions();
if (childElementAssumptions == null) {
assertThatType().hasNoChildElements();
}
else {
assertThat(modelElementType.getChildElementTypes().size()).isEqualTo(childElementAssumptions.size());
for (ChildElementAssumption assumption : childElementAssumptions) {
assertThatType().hasChildElements(assumption.childElementType);
if (assumption.namespaceUri != null) {
assertThat(assumption.childElementType).hasTypeNamespace(assumption.namespaceUri);
}
assertThatChildElement(assumption.childElementType)
.occursMinimal(assumption.minOccurs)
.occursMaximal(assumption.maxOccurs);
}
}
}
@Test
public void testAttributes() {
Collection<AttributeAssumption> attributesAssumptions = getAttributesAssumptions();
if (attributesAssumptions == null) {
assertThatType().hasNoAttributes();
}
else {
assertThat(attributesAssumptions).hasSameSizeAs(modelElementType.getAttributes());
for (AttributeAssumption assumption : attributesAssumptions) {
assertThatType().hasAttributes(assumption.attributeName);
AttributeAssert attributeAssert = assertThatAttribute(assumption.attributeName);
attributeAssert.hasOwningElementType(modelElementType);
if (assumption.namespace != null) {
attributeAssert.hasNamespaceUri(assumption.namespace);
}
else {
attributeAssert.hasNoNamespaceUri();
}
if (assumption.isIdAttribute) {
attributeAssert.isIdAttribute();
}
else {
attributeAssert.isNotIdAttribute();
}
if (assumption.isRequired) {
attributeAssert.isRequired();
}
else {
attributeAssert.isOptional();
}
if (assumption.defaultValue == null) {
attributeAssert.hasNoDefaultValue();
}
else {
attributeAssert.hasDefaultValue(assumption.defaultValue);
}
}
}
}
}
| 3,320 |
476 | <filename>tools/jsoncons/tests/src/cbor/cbor_parser_tests.cpp
// Copyright 2016 <NAME>
// Distributed under Boost license
#ifdef __linux__
#define BOOST_TEST_DYN_LINK
#endif
#include <boost/test/unit_test.hpp>
#include <jsoncons/json.hpp>
#include <jsoncons_ext/cbor/cbor.hpp>
#include <jsoncons_ext/cbor/cbor_parser.hpp>
#include <sstream>
#include <vector>
#include <utility>
#include <ctime>
#include <limits>
using namespace jsoncons;
using namespace jsoncons::cbor;
BOOST_AUTO_TEST_SUITE(cbor_parser_tests)
void check_parsing(const std::vector<uint8_t>& v, const json& expected)
{
try
{
std::error_code ec;
jsoncons::json_decoder<json> decoder;
cbor_parser parser(decoder);
parser.update(v.data(),v.size());
parser.parse_some(ec);
json result = decoder.get_result();
BOOST_REQUIRE_MESSAGE(expected == result, expected.to_string());
}
catch (const std::exception& e)
{
std::cout << e.what() << std::endl;
std::cout << expected.to_string() << std::endl;
}
}
BOOST_AUTO_TEST_CASE(test_cbor_parsing)
{
// unsigned integer
check_parsing({0x00},json(0U));
check_parsing({0x01},json(1U));
check_parsing({0x0a},json(10U));
check_parsing({0x17},json(23U));
check_parsing({0x18,0x18},json(24U));
check_parsing({0x18,0xff},json(255U));
check_parsing({0x19,0x01,0x00},json(256U));
check_parsing({0x19,0xff,0xff},json(65535U));
check_parsing({0x1a,0,1,0x00,0x00},json(65536U));
check_parsing({0x1a,0xff,0xff,0xff,0xff},json(4294967295U));
check_parsing({0x1b,0,0,0,1,0,0,0,0},json(4294967296U));
check_parsing({0x1b,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff},json(std::numeric_limits<uint64_t>::max()));
// positive signed integer
check_parsing({0x00},json(0));
check_parsing({0x01},json(1));
check_parsing({0x0a},json(10));
check_parsing({0x17},json(23));
check_parsing({0x18,0x18},json(24));
check_parsing({0x18,0xff},json(255));
check_parsing({0x19,0x01,0x00},json(256));
check_parsing({0x19,0xff,0xff},json(65535));
check_parsing({0x1a,0,1,0x00,0x00},json(65536));
check_parsing({0x1a,0xff,0xff,0xff,0xff},json(4294967295));
check_parsing({0x1b,0,0,0,1,0,0,0,0},json(4294967296));
check_parsing({0x1b,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff},json(std::numeric_limits<int64_t>::max()));
// negative integers
check_parsing({0x20},json(-1));
check_parsing({0x21},json(-2));
check_parsing({0x37},json(-24));
check_parsing({0x38,0x18},json(-25));
check_parsing({0x38,0xff},json(-256));
check_parsing({0x39,0x01,0x00},json(-257));
check_parsing({0x39,0xff,0xff},json(-65536));
check_parsing({0x3a,0,1,0x00,0x00},json(-65537));
check_parsing({0x3a,0xff,0xff,0xff,0xff},json(-4294967296));
check_parsing({0x3b,0,0,0,1,0,0,0,0},json(-4294967297));
// null, true, false
check_parsing({0xf6},json::null());
check_parsing({0xf5},json(true));
check_parsing({0xf4},json(false));
// floating point
check_parsing({0xfb,0,0,0,0,0,0,0,0},json(0.0));
check_parsing({0xfb,0xbf,0xf0,0,0,0,0,0,0},json(-1.0));
check_parsing({0xfb,0xc1,0x6f,0xff,0xff,0xe0,0,0,0},json(-16777215.0));
// byte string
std::vector<uint8_t> v;
check_parsing({0x40},json(byte_string_view(v.data(),v.size())));
v = {' '};
check_parsing({0x41,' '},json(byte_string_view(v.data(),v.size())));
v = {0};
check_parsing({0x41,0},json(byte_string_view(v.data(),v.size())));
v = {'H','e','l','l','o'};
check_parsing({0x45,'H','e','l','l','o'},json(byte_string_view(v.data(),v.size())));
v = {'1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4'};
check_parsing({0x58,0x18,'1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4'},
json(byte_string_view(v.data(),v.size())));
// string
check_parsing({0x60},json(""));
check_parsing({0x61,' '},json(" "));
check_parsing({0x78,0x18,'1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4'},
json("123456789012345678901234"));
// byte strings with undefined length
check_parsing({0x5f,0xff}, json(byte_string()));
check_parsing({0x5f,0x40,0xff}, json(byte_string()));
check_parsing({0x5f,0x40,0x40,0xff}, json(byte_string()));
check_parsing({0x5f,0x43,'H','e','l',0x42,'l','o',0xff}, json(byte_string("Hello")));
check_parsing({0x5f,0x41,'H',0x41,'e',0x41,'l',0x41,'l',0x41,'o',0xff}, json(byte_string("Hello")));
check_parsing({0x5f,0x41,'H',0x41,'e',0x40,0x41,'l',0x41,'l',0x41,'o',0xff}, json(byte_string("Hello")));
// text strings with undefined length
check_parsing({0x7f,0xff}, json(""));
check_parsing({0x7f,0x60,0xff}, json(""));
check_parsing({0x7f,0x60,0x60,0xff}, json(""));
check_parsing({0x7f,0x63,'H','e','l',0x62,'l','o',0xff}, json("Hello"));
check_parsing({0x7f,0x61,'H',0x61,'e',0x61,'l',0x61,'l',0x61,'o',0xff}, json("Hello"));
check_parsing({0x7f,0x61,'H',0x61,'e',0x61,'l',0x60,0x61,'l',0x61,'o',0xff}, json("Hello"));
// arrays
check_parsing({0x80},json::array());
check_parsing({0x81,'\0'},json::parse("[0]"));
check_parsing({0x82,'\0','\0'},json::array({0,0}));
check_parsing({0x82,0x81,'\0','\0'}, json::parse("[[0],0]"));
check_parsing({0x81,0x65,'H','e','l','l','o'},json::parse("[\"Hello\"]"));
// indefinite length arrays
check_parsing({0x9f,0xff},json::array());
check_parsing({0x9f,0x9f,0xff,0xff},json::parse("[[]]"));
// maps
check_parsing({0xa0},json::object());
check_parsing({0xa1,0x62,'o','c',0x81,'\0'}, json::parse("{\"oc\": [0]}"));
check_parsing({0xa1,0x62,'o','c',0x84,'\0','\1','\2','\3'}, json::parse("{\"oc\": [0, 1, 2, 3]}"));
// indefinite length maps
check_parsing({0xbf,0xff},json::object());
check_parsing({0xbf,0x64,'N','a','m','e',0xbf,0xff,0xff},json::parse("{\"Name\":{}}"));
// bignum
check_parsing({0xc2,0x49,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
json(bignum(1,{0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00})));
}
BOOST_AUTO_TEST_SUITE_END()
| 3,194 |
496 | /*
* Copyright (C) <NAME>
*
* Based on nxt_diyfp.c from NGINX NJS project
*
* Copyright (C) <NAME>
* Copyright (C) NGINX, Inc.
*
* An internal diy_fp implementation.
* For details, see <NAME>. "Printing floating-point numbers quickly
* and accurately with integers." ACM Sigplan Notices 45.6 (2010): 233-243.
*/
#include "lexbor/core/diyfp.h"
typedef struct {
uint64_t significand;
int16_t bin_exp;
int16_t dec_exp;
}
lexbor_diyfp_cpe_t;
static const lexbor_diyfp_cpe_t lexbor_cached_powers[] = {
{ lexbor_uint64_hl(0xfa8fd5a0, 0x081c0288), -1220, -348 },
{ lexbor_uint64_hl(0xbaaee17f, 0xa23ebf76), -1193, -340 },
{ lexbor_uint64_hl(0x8b16fb20, 0x3055ac76), -1166, -332 },
{ lexbor_uint64_hl(0xcf42894a, 0x5dce35ea), -1140, -324 },
{ lexbor_uint64_hl(0x9a6bb0aa, 0x55653b2d), -1113, -316 },
{ lexbor_uint64_hl(0xe61acf03, 0x3d1a45df), -1087, -308 },
{ lexbor_uint64_hl(0xab70fe17, 0xc79ac6ca), -1060, -300 },
{ lexbor_uint64_hl(0xff77b1fc, 0xbebcdc4f), -1034, -292 },
{ lexbor_uint64_hl(0xbe5691ef, 0x416bd60c), -1007, -284 },
{ lexbor_uint64_hl(0x8dd01fad, 0x907ffc3c), -980, -276 },
{ lexbor_uint64_hl(0xd3515c28, 0x31559a83), -954, -268 },
{ lexbor_uint64_hl(0x9d71ac8f, 0xada6c9b5), -927, -260 },
{ lexbor_uint64_hl(0xea9c2277, 0x23ee8bcb), -901, -252 },
{ lexbor_uint64_hl(0xaecc4991, 0x4078536d), -874, -244 },
{ lexbor_uint64_hl(0x823c1279, 0x5db6ce57), -847, -236 },
{ lexbor_uint64_hl(0xc2109436, 0x4dfb5637), -821, -228 },
{ lexbor_uint64_hl(0x9096ea6f, 0x3848984f), -794, -220 },
{ lexbor_uint64_hl(0xd77485cb, 0x25823ac7), -768, -212 },
{ lexbor_uint64_hl(0xa086cfcd, 0x97bf97f4), -741, -204 },
{ lexbor_uint64_hl(0xef340a98, 0x172aace5), -715, -196 },
{ lexbor_uint64_hl(0xb23867fb, 0x2a35b28e), -688, -188 },
{ lexbor_uint64_hl(0x84c8d4df, 0xd2c63f3b), -661, -180 },
{ lexbor_uint64_hl(0xc5dd4427, 0x1ad3cdba), -635, -172 },
{ lexbor_uint64_hl(0x936b9fce, 0xbb25c996), -608, -164 },
{ lexbor_uint64_hl(0xdbac6c24, 0x7d62a584), -582, -156 },
{ lexbor_uint64_hl(0xa3ab6658, 0x0d5fdaf6), -555, -148 },
{ lexbor_uint64_hl(0xf3e2f893, 0xdec3f126), -529, -140 },
{ lexbor_uint64_hl(0xb5b5ada8, 0xaaff80b8), -502, -132 },
{ lexbor_uint64_hl(0x87625f05, 0x6c7c4a8b), -475, -124 },
{ lexbor_uint64_hl(0xc9bcff60, 0x34c13053), -449, -116 },
{ lexbor_uint64_hl(0x964e858c, 0x91ba2655), -422, -108 },
{ lexbor_uint64_hl(0xdff97724, 0x70297ebd), -396, -100 },
{ lexbor_uint64_hl(0xa6dfbd9f, 0xb8e5b88f), -369, -92 },
{ lexbor_uint64_hl(0xf8a95fcf, 0x88747d94), -343, -84 },
{ lexbor_uint64_hl(0xb9447093, 0x8fa89bcf), -316, -76 },
{ lexbor_uint64_hl(0x8a08f0f8, 0xbf0f156b), -289, -68 },
{ lexbor_uint64_hl(0xcdb02555, 0x653131b6), -263, -60 },
{ lexbor_uint64_hl(0x993fe2c6, 0xd07b7fac), -236, -52 },
{ lexbor_uint64_hl(0xe45c10c4, 0x2a2b3b06), -210, -44 },
{ lexbor_uint64_hl(0xaa242499, 0x697392d3), -183, -36 },
{ lexbor_uint64_hl(0xfd87b5f2, 0x8300ca0e), -157, -28 },
{ lexbor_uint64_hl(0xbce50864, 0x92111aeb), -130, -20 },
{ lexbor_uint64_hl(0x8cbccc09, 0x6f5088cc), -103, -12 },
{ lexbor_uint64_hl(0xd1b71758, 0xe219652c), -77, -4 },
{ lexbor_uint64_hl(0x9c400000, 0x00000000), -50, 4 },
{ lexbor_uint64_hl(0xe8d4a510, 0x00000000), -24, 12 },
{ lexbor_uint64_hl(0xad78ebc5, 0xac620000), 3, 20 },
{ lexbor_uint64_hl(0x813f3978, 0xf8940984), 30, 28 },
{ lexbor_uint64_hl(0xc097ce7b, 0xc90715b3), 56, 36 },
{ lexbor_uint64_hl(0x8f7e32ce, 0x7bea5c70), 83, 44 },
{ lexbor_uint64_hl(0xd5d238a4, 0xabe98068), 109, 52 },
{ lexbor_uint64_hl(0x9f4f2726, 0x179a2245), 136, 60 },
{ lexbor_uint64_hl(0xed63a231, 0xd4c4fb27), 162, 68 },
{ lexbor_uint64_hl(0xb0de6538, 0x8cc8ada8), 189, 76 },
{ lexbor_uint64_hl(0x83c7088e, 0x1aab65db), 216, 84 },
{ lexbor_uint64_hl(0xc45d1df9, 0x42711d9a), 242, 92 },
{ lexbor_uint64_hl(0x924d692c, 0xa61be758), 269, 100 },
{ lexbor_uint64_hl(0xda01ee64, 0x1a708dea), 295, 108 },
{ lexbor_uint64_hl(0xa26da399, 0x9aef774a), 322, 116 },
{ lexbor_uint64_hl(0xf209787b, 0xb47d6b85), 348, 124 },
{ lexbor_uint64_hl(0xb454e4a1, 0x79dd1877), 375, 132 },
{ lexbor_uint64_hl(0x865b8692, 0x5b9bc5c2), 402, 140 },
{ lexbor_uint64_hl(0xc83553c5, 0xc8965d3d), 428, 148 },
{ lexbor_uint64_hl(0x952ab45c, 0xfa97a0b3), 455, 156 },
{ lexbor_uint64_hl(0xde469fbd, 0x99a05fe3), 481, 164 },
{ lexbor_uint64_hl(0xa59bc234, 0xdb398c25), 508, 172 },
{ lexbor_uint64_hl(0xf6c69a72, 0xa3989f5c), 534, 180 },
{ lexbor_uint64_hl(0xb7dcbf53, 0x54e9bece), 561, 188 },
{ lexbor_uint64_hl(0x88fcf317, 0xf22241e2), 588, 196 },
{ lexbor_uint64_hl(0xcc20ce9b, 0xd35c78a5), 614, 204 },
{ lexbor_uint64_hl(0x98165af3, 0x7b2153df), 641, 212 },
{ lexbor_uint64_hl(0xe2a0b5dc, 0x971f303a), 667, 220 },
{ lexbor_uint64_hl(0xa8d9d153, 0x5ce3b396), 694, 228 },
{ lexbor_uint64_hl(0xfb9b7cd9, 0xa4a7443c), 720, 236 },
{ lexbor_uint64_hl(0xbb764c4c, 0xa7a44410), 747, 244 },
{ lexbor_uint64_hl(0x8bab8eef, 0xb6409c1a), 774, 252 },
{ lexbor_uint64_hl(0xd01fef10, 0xa657842c), 800, 260 },
{ lexbor_uint64_hl(0x9b10a4e5, 0xe9913129), 827, 268 },
{ lexbor_uint64_hl(0xe7109bfb, 0xa19c0c9d), 853, 276 },
{ lexbor_uint64_hl(0xac2820d9, 0x623bf429), 880, 284 },
{ lexbor_uint64_hl(0x80444b5e, 0x7aa7cf85), 907, 292 },
{ lexbor_uint64_hl(0xbf21e440, 0x03acdd2d), 933, 300 },
{ lexbor_uint64_hl(0x8e679c2f, 0x5e44ff8f), 960, 308 },
{ lexbor_uint64_hl(0xd433179d, 0x9c8cb841), 986, 316 },
{ lexbor_uint64_hl(0x9e19db92, 0xb4e31ba9), 1013, 324 },
{ lexbor_uint64_hl(0xeb96bf6e, 0xbadf77d9), 1039, 332 },
{ lexbor_uint64_hl(0xaf87023b, 0x9bf0ee6b), 1066, 340 },
};
#define LEXBOR_DIYFP_D_1_LOG2_10 0.30102999566398114 /* 1 / log2(10). */
lexbor_diyfp_t
lexbor_cached_power_dec(int exp, int *dec_exp)
{
unsigned int index;
const lexbor_diyfp_cpe_t *cp;
index = (exp + LEXBOR_DECIMAL_EXPONENT_OFF) / LEXBOR_DECIMAL_EXPONENT_DIST;
cp = &lexbor_cached_powers[index];
*dec_exp = cp->dec_exp;
return lexbor_diyfp(cp->significand, cp->bin_exp);
}
lexbor_diyfp_t
lexbor_cached_power_bin(int exp, int *dec_exp)
{
int k;
unsigned int index;
const lexbor_diyfp_cpe_t *cp;
k = (int) ceil((-61 - exp) * LEXBOR_DIYFP_D_1_LOG2_10)
+ LEXBOR_DECIMAL_EXPONENT_OFF - 1;
index = (unsigned) (k >> 3) + 1;
cp = &lexbor_cached_powers[index];
*dec_exp = -(LEXBOR_DECIMAL_EXPONENT_MIN + (int) (index << 3));
return lexbor_diyfp(cp->significand, cp->bin_exp);
}
#undef LEXBOR_DIYFP_D_1_LOG2_10
| 3,606 |
312 | #include "wfrest/BluePrint.h"
using namespace wfrest;
inline void set_admin_bp(BluePrint &bp)
{
bp.GET("/page/new/", [](const HttpReq *req, HttpResp *resp)
{
fprintf(stderr, "New page\n");
});
bp.GET("/page/edit/", [](const HttpReq *req, HttpResp *resp)
{
fprintf(stderr, "Edit page\n");
});
} | 163 |
670 | #Import required libraries :
import random
import math
import matplotlib.pyplot as plt
#Main function to estimate PI value :
def monte_carlo(runs,needles,n_length,b_width):
#Empty list to store pi values :
pi_values = []
#Horizontal line for actual value of PI :
plt.axhline(y=math.pi, color='r', linestyle='-')
#For all runs :
for i in range(runs):
#Initialize number of hits as 0.
nhits = 0
#For all needles :
for j in range(needles):
#We will find the distance from the nearest vertical line :
#Min = 0 Max = b_width/2
x = random.uniform(0,b_width/2.0)
#The theta value will be from 0 to pi/2 :
theta = random.uniform(0,math.pi/2)
#Checking if the needle crosses the line or not :
xtip = x - (n_length/2.0)*math.cos(theta)
if xtip < 0 :
nhits += 1
#Going with the formula :
numerator = 2.0 * n_length * needles
denominator = b_width * nhits
#Append the final value of pi :
pi_values.append((numerator/denominator))
#Final pi value after all iterations :
print(pi_values[-1])
#Plotting the graph :
plt.plot(pi_values)
#Total number of runs :
runs = 100
#Total number of needles :
needles = 100000
#Length of needle :
n_length = 2
#space between 2 verical lines :
b_width =2
#Calling the main function :
monte_carlo(runs,needles,n_length,b_width)
| 745 |
769 | /*
* Copyright (c) Baidu Inc. All rights reserved.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.baidu.bjf.remoting.protobuf;
import java.io.IOException;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.Descriptors.Descriptor;
/**
* Codec interface include encode and decode support.
*
* @author xiemalin
* @since 1.0.0
*/
public interface Codec<T> {
/**
* Do byte encode action
*
* @param t generic target object
* @return encoded byte array
* @throws IOException if target object is invalid
*/
byte[] encode(T t) throws IOException;
/**
* Do decode action from byte array
*
* @param bytes encoded byte array
* @return parse byte array to target object
* @throws IOException if byte array is invalid
*/
T decode(byte[] bytes) throws IOException;
/**
* Calculate size of target object
*
* @param t target object
* @return size of
* @throws IOException if target object is invalid
*/
int size(T t) throws IOException;
/**
* Write target object to byte array
*
* @param t target object
* @param out target {@link CodedOutputStream}
* @throws IOException if target object is invalid
*/
void writeTo(T t, CodedOutputStream out) throws IOException;
/**
* Read object from target byte array input stream
*
* @param intput target input stream object
* @return unserialize object
* @throws IOException if byte array is invalid
*/
T readFrom(CodedInputStream intput) throws IOException;
/**
* Get {@code Descriptor} to support dynamic mesage call for protocol buffer
* @return {@link Descriptor} instance
* @throws IOException in case of create {@link Descriptor} failed
*/
Descriptor getDescriptor() throws IOException;
}
| 977 |
2,069 | [{"Id":null,"OrganizationId":null,"ProjectId":null,"StackId":null,"IsFirstOccurrence":false,"IsFixed":false,"IsHidden":false,"CreatedUtc":"0001-01-01T00:00:00","Idx":{},"Type":"log","Source":"V2_EventUpgrade","Date":"2020-01-01T00:00:00-06:00","Tags":[],"Message":"Accusam commodo laoreet ut hendrerit veniam accusam voluptua eleifend clita adipiscing nam aliquip nulla stet ea et.","Geo":null,"Value":null,"Data":{"@level":"Error","MachineName":"RD000D3A1080EC","job":"EventPostsJob","host":"RD000D3A1080EC","process":"EventPostsJob","@version":"3.1.1940 a68c285679","@environment":{"ProcessorCount":4,"TotalPhysicalMemory":7515721728,"AvailablePhysicalMemory":5682245632,"CommandLine":"EventPostsJob.exe","ProcessName":"D:\\local\\Temp\\jobs\\continuous\\EventPost\\y5zflq0z.nn1\\EventPostsJob.exe","ProcessId":"5352","ProcessMemorySize":177127424,"ThreadName":null,"ThreadId":"5980","Architecture":"x64","OSName":null,"OSVersion":null,"IpAddress":"127.0.0.1","MachineName":"RD000D3A1080EC","InstallId":"920668f78bae4a52b8c1681b7330ba46","RuntimeVersion":"4.0.30319.42000","Data":{}}},"ReferenceId":null,"SessionId":null}] | 390 |
646 | <reponame>Amine-El-Ghaoual/griddb
/*
Copyright (c) 2017 TOSHIBA Digital Solutions Corporation
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "gs_error.h"
#include "sql_temp_store.h"
#include <iostream>
#include <iomanip>
#ifndef _WIN32
#include <signal.h>
#include <fcntl.h>
#endif
#ifndef WIN32
#include <iostream>
#include <execinfo.h>
#include <unistd.h>
#endif
UTIL_TRACER_DECLARE(IO_MONITOR);
const uint64_t LocalTempStore::SWAP_FILE_IO_MAX_RETRY_COUNT = 10;
const uint32_t LocalTempStore::DEFAULT_IO_WARNING_THRESHOLD_MILLIS = 5000;
const char8_t* const LocalTempStore::SWAP_FILE_BASE_NAME = "swap_";
const char8_t* const LocalTempStore::SWAP_FILE_EXTENSION = ".dat";
const char8_t LocalTempStore::SWAP_FILE_SEPARATOR = '_';
const uint32_t LocalTempStore::DEFAULT_BLOCK_SIZE = 512 * 1024;
const uint32_t LocalTempStore::MAX_BLOCK_EXP_SIZE = 24;
const uint32_t LocalTempStore::MIN_BLOCK_EXP_SIZE = 14;
const int32_t LocalTempStore::DEFAULT_STORE_MEMORY_LIMIT_MB = 1024;
#ifndef WIN32
const int32_t LocalTempStore::DEFAULT_STORE_SWAP_FILE_SIZE_LIMIT_MB = INT32_MAX;
#else
const int32_t LocalTempStore::DEFAULT_STORE_SWAP_FILE_SIZE_LIMIT_MB = 2048;
#endif
const std::string LocalTempStore::DEFAULT_SWAP_FILES_TOP_DIR = "swap";
const int32_t LocalTempStore::DEFAULT_STORE_SWAP_SYNC_SIZE_MB = 1024;
const int32_t LocalTempStore::DEFAULT_STORE_SWAP_SYNC_INTERVAL = 0;
const size_t LocalTempStore::BLOCK_INFO_POOL_FREE_LIMIT = 1000;
const size_t LocalTempStore::MIN_INITIAL_BUCKETS = 10;
const uint8_t LocalTempStore::Block::FLAG_FULL_BLOCK = 0;
const uint8_t LocalTempStore::Block::FLAG_PARTIAL_BLOCK_1 = 1;
const uint8_t LocalTempStore::Block::FLAG_PARTIAL_BLOCK_2 = 2;
const LocalTempStore::GroupId LocalTempStore::UNDEF_GROUPID = UINT64_MAX;
const LocalTempStore::ResourceId LocalTempStore::UNDEF_RESOURCEID = UINT64_MAX;
const LocalTempStore::BlockId LocalTempStore::UNDEF_BLOCKID = UINT64_MAX;
const uint64_t LocalTempStore::UNDEF_FILEBLOCKID = UINT64_MAX;
LocalTempStore::LocalTempStore(
const Config &config, LTSVariableSizeAllocator &varAllocator,
bool autoUseDefault)
:
varAlloc_(&varAllocator)
, bufferManager_(NULL), resourceInfoManager_(NULL)
, swapFilesTopDir_(config.swapFilesTopDir_)
, defaultBlockExpSize_(util::ilog2(util::nextPowerOf2(config.blockSize_))) {
static_cast<void>(autoUseDefault);
Config bufferManagerConfig = config;
if (config.blockSize_ == 0) {
bufferManagerConfig.blockSize_ = DEFAULT_BLOCK_SIZE;
defaultBlockExpSize_ = util::ilog2(util::nextPowerOf2(DEFAULT_BLOCK_SIZE));
}
else if (config.blockSize_ < (1 << LocalTempStore::MIN_BLOCK_EXP_SIZE)) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"BlockSize (" << config.blockSize_ << ") is too small.");
}
else if (config.blockSize_ > (1 << LocalTempStore::MAX_BLOCK_EXP_SIZE)) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"BlockSize (" << config.blockSize_ << ") is too large.");
}
if (config.blockSize_ == 0) {
bufferManagerConfig.blockSize_ = DEFAULT_BLOCK_SIZE;
defaultBlockExpSize_ = util::ilog2(util::nextPowerOf2(DEFAULT_BLOCK_SIZE));
}
if (swapFilesTopDir_.empty()) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"SwapFilesTopDir must not be empty.");
}
else {
try {
if (util::FileSystem::exists(swapFilesTopDir_.c_str())) {
if (!util::FileSystem::isDirectory(swapFilesTopDir_.c_str())) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"SwapFilesTopDir (" << swapFilesTopDir_.c_str() <<
") is not direcotry");
}
util::Directory dir(swapFilesTopDir_.c_str());
if (dir.isParentOrSelfChecked()) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"SwapFilesTopDir must not be \".\" or \"..\".");
}
}
else {
util::FileSystem::createDirectoryTree(swapFilesTopDir_.c_str());
util::Directory dir(swapFilesTopDir_.c_str());
if (dir.isParentOrSelfChecked()) {
GS_THROW_SYSTEM_ERROR(GS_ERROR_LTS_INVALID_PARAMETER,
"SwapFilesTopDir must not be \".\" or \"..\".");
}
}
}
catch (std::exception &e) {
GS_RETHROW_SYSTEM_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"SwapFilesTopDir open failed"));
}
}
resourceInfoManager_ = ALLOC_VAR_SIZE_NEW(*varAlloc_) ResourceInfoManager(*this);
bufferManager_ = ALLOC_VAR_SIZE_NEW(*varAlloc_) BufferManager(*varAlloc_, config, *this);
}
LocalTempStore::~LocalTempStore() try {
ALLOC_VAR_SIZE_DELETE(*varAlloc_, bufferManager_);
ALLOC_VAR_SIZE_DELETE(*varAlloc_, resourceInfoManager_);
}
catch (...) {
}
void LocalTempStore::useBlockSize(uint32_t blockSize) {
static_cast<void>(blockSize);
}
void LocalTempStore::setSwapFileSizeLimit(uint64_t size) {
bufferManager_->setSwapFileSizeLimit(size);
}
void LocalTempStore::setStableMemoryLimit(uint64_t size) {
bufferManager_->setStableMemoryLimit(size);
}
LocalTempStore::GroupId LocalTempStore::allocateGroup() {
return resourceInfoManager_->allocateGroup();
}
void LocalTempStore::deallocateGroup(LocalTempStore::GroupId groupId) {
resourceInfoManager_->freeGroup(groupId);
}
LocalTempStore::ResourceId LocalTempStore::allocateResource(
LocalTempStore::ResourceType type, LocalTempStore::GroupId groupId) {
return resourceInfoManager_->allocateResource(type, groupId);
}
void LocalTempStore::deallocateResource(LocalTempStore::ResourceId resourceId) {
return resourceInfoManager_->freeResource(resourceId);
}
LocalTempStore::BufferManager::BufferManager(
LocalTempStore::LTSVariableSizeAllocator &varAlloc,
const LocalTempStore::Config &config,
LocalTempStore &store):
varAlloc_(varAlloc), store_(store),
swapFileSizeLimit_(config.swapFileSizeLimit_),
file_(NULL),
blockInfoTable_(varAlloc, store, store.defaultBlockExpSize_,
config.stableMemoryLimit_, config.swapFileSizeLimit_),
maxBlockNth_(0), inuseMaxBlockNth_(0), wroteMaxBlockNth_(0),
trimSwapFileThreshold_(0),
blockExpSize_(store.defaultBlockExpSize_),
blockSize_(1 << store.defaultBlockExpSize_),
stableMemoryLimit_(config.stableMemoryLimit_),
currentTotalMemory_(0), peakTotalMemory_(0),
activeBlockCount_(0)
{
file_ = ALLOC_VAR_SIZE_NEW(varAlloc_) File(
varAlloc_, store, config, store.defaultBlockExpSize_, config.swapFilesTopDir_);
file_->open();
BlockId trimUnitBlockCount = SWAP_FILE_TRIM_UNIT_SIZE / blockSize_;
trimSwapFileThreshold_ = trimUnitBlockCount > 10 ? trimUnitBlockCount : 10;
}
LocalTempStore::BufferManager::~BufferManager() {
if (file_) {
file_->close();
ALLOC_VAR_SIZE_DELETE(varAlloc_, file_);
file_ = NULL;
}
}
void LocalTempStore::BufferManager::setSwapFileSizeLimit(uint64_t size) {
util::LockGuard<util::Mutex> guard(mutex_);
swapFileSizeLimit_ = size;
}
void LocalTempStore::BufferManager::allocateMemoryMain(
BlockId id, BlockInfo* &blockInfo) {
try {
blockInfoTable_.insert(id, blockInfo);
blockInfo->data_ = static_cast<uint8_t*>(blockInfoTable_.fixedAlloc_->allocate());
assert(blockInfo->data_);
LocalTempStore::Block::Header::resetHeader(blockInfo->data_, blockExpSize_);
}
catch (std::exception &e) {
blockInfoTable_.remove(id);
decrementActiveBlockCount();
blockInfo->clear();
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Memory allocation failed"));
}
}
bool LocalTempStore::BufferManager::allocateMemory(BlockId id, BlockInfo* &blockInfo) {
blockInfo = NULL;
bool needSwap = false;
if (getAllocatedMemory() < stableMemoryLimit_) {
allocateMemoryMain(id, blockInfo);
addCurrentMemory();
}
else if (checkTotalMemory()) {
allocateMemoryMain(id, blockInfo);
addCurrentMemory();
}
else {
BlockInfo* swapOutTarget = blockInfoTable_.getNextTarget();
if (swapOutTarget) {
blockInfo = swapOutTarget;
#ifndef NDEBUG
BlockId headerBlockId = LocalTempStore::Block::Header::getBlockId(blockInfo->data_);
assert(headerBlockId == blockInfo->blockId_);
#endif
needSwap = true;
}
else {
allocateMemoryMain(id, blockInfo);
addCurrentMemory();
}
}
assert(blockInfo);
assert(blockInfo->baseBlockInfo_);
return needSwap;
}
void LocalTempStore::BufferManager::allocate(
BlockInfo* &blockInfo, uint64_t affinity, bool force) {
static_cast<void>(affinity);
BlockId id = UNDEF_BLOCKID;
try {
LockGuardVariant<util::Mutex> guard(mutex_);
id = LocalTempStore::makeBlockId(
blockInfoTable_.allocateBlockNth(force), blockExpSize_);
incrementActiveBlockCount();
bool needSwap = allocateMemory(id, blockInfo);
blockInfoTable_.update(blockInfo, id);
blockInfo->addReference();
if (needSwap) {
blockInfo->swapOut(store_, id);
}
guard.release();
try {
blockInfo->lock();
blockInfo->setup(store_, id, true);
blockInfo->unlock();
}
catch(std::exception &e) {
blockInfo->unlock();
GS_RETHROW_USER_ERROR(e, "");
}
guard.acquire();
#ifndef NDEBUG
BlockInfo* blockInfo2 = blockInfoTable_.lookup(id);
assert(blockInfo == blockInfo2);
#endif
blockInfo->baseBlockInfo_ = lookupBaseInfo(id);
assert(LocalTempStore::Block::Header::getBlockId(blockInfo->data_) == blockInfo->blockId_);
assert(id == blockInfo->blockId_);
LocalTempStore::Block::Header::resetHeader(blockInfo->data_, blockExpSize_);
LocalTempStore::Block::Header::setBlockId(blockInfo->data_, id);
blockInfo->baseBlockInfo_->assignmentCount_ &= (~MASK_ALREADY_SWAPPED);
}
catch(std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Failed to create block."));
}
}
void LocalTempStore::BufferManager::get(BlockId id, BlockInfo* &blockInfo) {
try {
LockGuardVariant<util::Mutex> guard(mutex_);
blockInfo = blockInfoTable_.lookup(id);
if (blockInfo) {
blockInfo->addReference();
guard.release();
try {
blockInfo->lock();
blockInfo->setup(store_, id, false);
blockInfo->unlock();
assert(blockInfo->blockId_ == id);
}
catch(std::exception &e) {
blockInfo->unlock();
GS_RETHROW_USER_ERROR(e, "");
}
guard.acquire();
blockInfo->baseBlockInfo_ = lookupBaseInfo(id);
assert(LocalTempStore::Block::Header::getBlockId(blockInfo->data_) == blockInfo->blockId_);
assert(id == blockInfo->blockId_);
return;
}
else {
LocalTempStore::BaseBlockInfo* baseInfo = blockInfoTable_.lookupBaseInfo(id);
if (baseInfo->assignmentCount_ != 0) {
allocateMemory(id, blockInfo);
blockInfoTable_.update(blockInfo, id);
blockInfo->addReference();
blockInfo->swapOut(store_, id);
guard.release();
try {
blockInfo->lock();
blockInfo->setup(store_, id, false);
blockInfo->unlock();
}
catch(std::exception &e) {
blockInfo->unlock();
GS_RETHROW_USER_ERROR(e, "");
}
guard.acquire();
#ifndef NDEBUG
BlockInfo* blockInfo2 = blockInfoTable_.lookup(id);
assert(blockInfo == blockInfo2);
#endif
blockInfo->baseBlockInfo_ = lookupBaseInfo(id);
assert(LocalTempStore::Block::Header::getBlockId(blockInfo->data_) == blockInfo->blockId_);
assert(id == blockInfo->blockId_);
return;
}
else {
blockInfo = NULL;
assert(false);
GS_THROW_USER_ERROR(GS_ERROR_CM_INTERNAL_ERROR, "");
}
}
}
catch(std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Failed to get block: (blockId=" << id << ")"));
}
}
void LocalTempStore::BufferManager::release(BlockInfo &blockInfo, uint64_t affinity) {
static_cast<void>(affinity);
blockInfoTable_.fixedAlloc_->deallocate(blockInfo.data_);
subCurrentMemory();
BlockId blockId = blockInfo.getBlockId();
if (inuseMaxBlockNth_ > blockId) {
inuseMaxBlockNth_ = blockInfoTable_.getCurrentMaxInuseBlockNth(blockId);
}
blockInfo.clear();
blockInfoTable_.remove(blockId);
decrementActiveBlockCount();
}
void LocalTempStore::BufferManager::decrementActiveBlockCount() {
assert(activeBlockCount_ > 0);
--activeBlockCount_;
if (activeBlockCount_ == 0) {
if (file_->getFileSize() > 0) {
try {
file_->trim(0);
UTIL_TRACE_INFO(SQL_TEMP_STORE,
"Truncate swap file. (maxActiveBlockCount=" <<
getMaxActiveBlockCount() << ")");
} catch(std::exception &e) {
UTIL_TRACE_WARNING(SQL_TEMP_STORE,
"Failed to truncate swap file. (reason=" <<
GS_EXCEPTION_MESSAGE(e) << ")");
}
}
}
}
void LocalTempStore::BufferManager::setStableMemoryLimit(uint64_t size) {
util::LockGuard<util::Mutex> guard(mutex_);
stableMemoryLimit_ = size;
blockInfoTable_.fixedAlloc_->setLimit(
util::AllocatorStats::STAT_STABLE_LIMIT, static_cast<size_t>(size));
}
size_t LocalTempStore::BufferManager::getAllocatedMemory() {
return blockInfoTable_.fixedAlloc_->getElementSize()
* (blockInfoTable_.fixedAlloc_->getTotalElementCount()
- blockInfoTable_.fixedAlloc_->getFreeElementCount());
}
void LocalTempStore::BufferManager::swapIn(BlockId blockId, BlockInfo &blockInfo) {
BlockId blockNth = LocalTempStore::getBlockNth(blockId);
try {
file_->readBlock(blockNth, 1, blockInfo.data());
}
catch(std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Swap file read failed: (blockId=" << blockId << ")"));
}
LocalTempStore::Block::Header::validateHeader(blockInfo.data_, blockExpSize_, false);
}
void LocalTempStore::BufferManager::swapOut(BlockInfo &blockInfo) {
assert(blockInfo.baseBlockInfo_);
bool isSwapped =
((MASK_ALREADY_SWAPPED & blockInfo.baseBlockInfo_->assignmentCount_) != 0);
if (!isSwapped && blockInfo.baseBlockInfo_->assignmentCount_ > 0) {
BlockId blockNth = LocalTempStore::getBlockNth(blockInfo.getBlockId());
if (blockInfo.data() == NULL) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_SWAP_OUT_BLOCK_FAILED,
"blockInfo.data_ is NULL. blockId=" << blockInfo.getBlockId() <<
", blockNth=" << blockNth <<
", assignmentCount=" << blockInfo.baseBlockInfo_->assignmentCount_);
}
if (!file_) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_SWAP_OUT_BLOCK_FAILED,
"file_ is NULL.");
}
assert(blockInfo.data() != NULL);
LocalTempStore::Block::Header::validateHeader(blockInfo.data(), blockExpSize_, false);
file_->writeBlock(blockNth, 1, blockInfo.data());
blockInfo.baseBlockInfo_->assignmentCount_ |= MASK_ALREADY_SWAPPED;
}
}
void LocalTempStore::BufferManager::dumpActiveBlockId(std::ostream &ostr) {
blockInfoTable_.dumpActiveBlockId(ostr, blockExpSize_);
}
uint64_t LocalTempStore::BufferManager::getSwapFileSize() {
return file_->getFileSize();
}
uint64_t LocalTempStore::BufferManager::getWriteBlockCount() {
return file_->getWriteBlockCount();
}
uint64_t LocalTempStore::BufferManager::getWriteOperation() {
return file_->getWriteOperation();
}
uint64_t LocalTempStore::BufferManager::getWriteSize() {
return file_->getWriteSize();
}
uint64_t LocalTempStore::BufferManager::getWriteTime() {
return file_->getWriteTime();
}
uint64_t LocalTempStore::BufferManager::getReadBlockCount() {
return file_->getReadBlockCount();
}
uint64_t LocalTempStore::BufferManager::getReadOperation() {
return file_->getReadOperation();
}
uint64_t LocalTempStore::BufferManager::getReadSize() {
return file_->getReadSize();
}
uint64_t LocalTempStore::BufferManager::getReadTime() {
return file_->getReadTime();
}
LocalTempStore::BlockInfoTable::BlockInfoTable(
LTSVariableSizeAllocator &varAlloc, LocalTempStore &store,
uint32_t blockExpSize, uint64_t stableMemoryLimit
, uint64_t swapFileSizeLimit
) :
varAlloc_(varAlloc), fixedAlloc_(NULL), store_(store),
blockInfoList_(varAlloc),
#if UTIL_CXX11_SUPPORTED
blockInfoMap_(LocalTempStore::MIN_INITIAL_BUCKETS, BlockInfoMap::hasher(), BlockInfoMap::key_equal(), varAlloc),
#else
blockInfoMap_(BlockInfoMap::key_compare(), varAlloc),
#endif
freeBlockIdSet_(BlockIdSet::key_compare(), varAlloc),
baseBlockInfoArray_(varAlloc),
blockExpSize_(blockExpSize), stableMemoryLimit_(stableMemoryLimit)
, blockIdMaxLimit_(0)
{
blockInfoPool_ = ALLOC_VAR_SIZE_NEW(varAlloc_) util::ObjectPool<LocalTempStore::BlockInfo>(
util::AllocatorInfo(ALLOCATOR_GROUP_SQL_LTS, "blockInfoPool"));
blockInfoPool_->setFreeElementLimit(LocalTempStore::BLOCK_INFO_POOL_FREE_LIMIT);
fixedAlloc_ = ALLOC_VAR_SIZE_NEW(varAlloc_) LocalTempStore::LTSFixedSizeAllocator(util::AllocatorInfo(ALLOCATOR_GROUP_SQL_LTS, "ltsBufMgrFixed"), 1 << blockExpSize_);
uint64_t maxCount = stableMemoryLimit_ / static_cast<uint32_t>(1UL << blockExpSize_) + 1;
fixedAlloc_->setFreeElementLimit(static_cast<size_t>(maxCount));
blockIdMaxLimit_ = swapFileSizeLimit / static_cast<uint64_t>(1UL << blockExpSize_) + 1;
}
LocalTempStore::BlockInfoTable::~BlockInfoTable() {
BlockInfoMap::iterator itr = blockInfoMap_.begin();
for (; itr != blockInfoMap_.end(); ++itr) {
LocalTempStore::BlockInfo* blockInfo = *(itr->second);
if (blockInfo->data_) {
fixedAlloc_->deallocate(blockInfo->data_);
store_.getBufferManager().subCurrentMemory();
blockInfo->data_ = NULL;
}
UTIL_OBJECT_POOL_DELETE(*blockInfoPool_, blockInfo);
}
ALLOC_VAR_SIZE_DELETE(varAlloc_, fixedAlloc_);
fixedAlloc_ = NULL;
ALLOC_VAR_SIZE_DELETE(varAlloc_, blockInfoPool_);
blockInfoPool_ = NULL;
}
LocalTempStore::BufferManager::File::File(
LTSVariableSizeAllocator &varAlloc, LocalTempStore &store,
const LocalTempStore::Config &config,
uint32_t blockExpSize, const std::string &topDir)
: varAlloc_(varAlloc), store_(store), swapFilesTopDir_(topDir)
, file_(NULL), blockExpSize_(blockExpSize)
, blockSize_(1 << blockExpSize)
, blockNum_(0), readBlockCount_(0), readOperation_(0), readSize_(0), readTime_(0)
, writeBlockCount_(0), writeOperation_(0), writeSize_(0), writeTime_(0)
, swapSyncInterval_(config.swapSyncInterval_), swapSyncCount_(0)
, swapSyncSize_(config.swapSyncSize_)
, swapReleaseInterval_(config.swapReleaseInterval_)
, ioWarningThresholdMillis_(
LocalTempStore::DEFAULT_IO_WARNING_THRESHOLD_MILLIS) {
if (config.swapSyncSize_ > 0) {
uint32_t interval = static_cast<uint32_t>((config.swapSyncSize_ - 1) / blockSize_ + 1);
if (config.swapSyncInterval_ > 0) {
swapSyncInterval_ = static_cast<uint32_t>(std::min(config.swapSyncInterval_, interval));
}
else {
swapSyncInterval_ = interval;
}
}
UTIL_TRACE_INFO(SQL_TEMP_STORE,
"File init: fileName," << fileName_ <<
", swapSyncInterval_=" << swapSyncInterval_ <<
", swapSyncSize_=" << swapSyncSize_);
}
LocalTempStore::BufferManager::File::~File() try {
if (file_) {
UTIL_TRACE_DEBUG(SQL_TEMP_STORE, "~File(): fileName=" << fileName_.c_str()
<< ", readCount=" << readBlockCount_
<< ", writeCount=" << writeBlockCount_);
}
ALLOC_VAR_SIZE_DELETE(varAlloc_, file_);
file_ = NULL;
}
catch (...) {
}
uint64_t LocalTempStore::BufferManager::File::getWriteBlockCount() {
return writeBlockCount_;
}
uint64_t LocalTempStore::BufferManager::File::getWriteOperation() {
return writeOperation_;
}
uint64_t LocalTempStore::BufferManager::File::getWriteSize() {
return writeSize_;
}
uint64_t LocalTempStore::BufferManager::File::getWriteTime() {
return writeTime_;
}
uint64_t LocalTempStore::BufferManager::File::getReadBlockCount() {
return readBlockCount_;
}
uint64_t LocalTempStore::BufferManager::File::getReadOperation() {
return readOperation_;
}
uint64_t LocalTempStore::BufferManager::File::getReadSize() {
return readSize_;
}
uint64_t LocalTempStore::BufferManager::File::getReadTime() {
return readTime_;
}
void LocalTempStore::BufferManager::File::resetWriteBlockCount() {
writeBlockCount_ = 0;
}
void LocalTempStore::BufferManager::File::resetReadBlockCount() {
readBlockCount_ = 0;
}
void LocalTempStore::BufferManager::File::countFileBlock(
uint64_t &totalCount, uint64_t &fileSize) {
totalCount = blockNum_;
fileSize = getFileSize();
}
void LocalTempStore::BlockInfoTable::dumpActiveBlockId(
std::ostream &ostr, uint32_t blockExpSize) {
size_t blockNum = baseBlockInfoArray_.size();
bool found = false;
ostr << "blockNum=" << blockNum << ", freeBlockNthList=(";
for (uint64_t blockNth = 0; blockNth < blockNum; ++blockNth) {
if (freeBlockIdSet_.find(LocalTempStore::makeBlockId(blockNth, blockExpSize))
== freeBlockIdSet_.end()) {
ostr << blockNth << ", ";
found = true;
}
}
if (found) {
ostr << ")" << std::endl;
}
else {
ostr << "empty)" << std::endl;
}
}
size_t LocalTempStore::BufferManager::File::readBlock(
FileBlockId fileBlockId, size_t count, void *buffer) {
try {
assert(buffer);
assert(file_);
if (blockNum_ > count + fileBlockId - 1) {
ssize_t remain = static_cast<ssize_t>(count * blockSize_);
uint8_t* addr = static_cast<uint8_t*>(buffer);
off_t offset = static_cast<off_t>(fileBlockId * blockSize_);
const uint64_t readStartClock = util::Stopwatch::currentClock();
uint64_t retryCount = 0;
GS_FILE_READ_ALL(
IO_MONITOR, (*file_), addr, remain, offset,
ioWarningThresholdMillis_, retryCount);
const uint32_t readTime = util::Stopwatch::clockToMillis(
util::Stopwatch::currentClock() - readStartClock);
readBlockCount_ += count;
++readOperation_;
readSize_ += static_cast<uint64_t>(count * blockSize_);
readTime_ += readTime;
uint32_t currentCheckSum =
LocalTempStore::Block::Header::calcBlockCheckSum(buffer);
uint32_t headerCheckSum =
LocalTempStore::Block::Header::getHeaderCheckSum(buffer);
if (headerCheckSum != currentCheckSum) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_SWAP_IN_BLOCK_FAILED,
"Checksum error occured.");
}
return count;
}
else {
GS_THROW_USER_ERROR(GS_ERROR_LTS_SWAP_IN_BLOCK_FAILED,
"Target block is not existed: fileName=" << fileName_ <<
",blockNo," << fileBlockId);
}
}
catch (std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Swap file read failed: fileName=" << fileName_ <<
",blockNo," << fileBlockId));
}
}
size_t LocalTempStore::BufferManager::File::writeBlock(
FileBlockId fileBlockId, size_t count, void* buffer) {
LocalTempStore::Block::Header::updateHeaderCheckSum(buffer);
assert(buffer);
try {
assert(file_);
uint64_t writeOffset = fileBlockId;
ssize_t remain = static_cast<ssize_t>(count * blockSize_);
const uint8_t* addr = static_cast<const uint8_t*>(buffer);
off_t offset = static_cast<off_t>(fileBlockId * blockSize_);
const uint64_t writeStartClock = util::Stopwatch::currentClock();
uint64_t retryCount = 0;
GS_FILE_WRITE_ALL(
IO_MONITOR, (*file_), addr, remain, offset,
ioWarningThresholdMillis_, retryCount);
const uint32_t writeTime = util::Stopwatch::clockToMillis(
util::Stopwatch::currentClock() - writeStartClock);
writeBlockCount_ += count;
++writeOperation_;
writeSize_ += static_cast<uint64_t>(count * blockSize_);
writeTime_ += writeTime;
if (blockNum_ < (count + writeOffset)) {
blockNum_ = (count + writeOffset);
}
if (swapSyncInterval_ > 0 &&
(writeBlockCount_ % swapSyncInterval_ == 0)) {
file_->sync();
UTIL_TRACE_INFO(SQL_TEMP_STORE,
"sync swap file: fileName," << fileName_ <<
",writeBlockCount," << writeBlockCount_);
#ifndef WIN32
advise(POSIX_FADV_DONTNEED);
#endif
}
return count;
}
catch (std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Swap file write failed: fileName=" << fileName_ <<
",blockNo," << fileBlockId));
}
}
bool LocalTempStore::BufferManager::File::open() {
try {
if (!swapFilesTopDir_.empty()) {
util::NormalOStringStream name;
name << LocalTempStore::SWAP_FILE_BASE_NAME << blockExpSize_ <<
LocalTempStore::SWAP_FILE_EXTENSION;
util::FileSystem::createPath(
swapFilesTopDir_.c_str(), name.str().c_str(), fileName_);
}
if (util::FileSystem::exists(fileName_.c_str())) {
try {
util::FileSystem::remove(fileName_.c_str());
}
catch (std::exception &e) {
UTIL_TRACE_WARNING(SQL_TEMP_STORE, "Swap file remove failed. (reason="
<< GS_EXCEPTION_MESSAGE(e) << ")");
}
file_ = ALLOC_VAR_SIZE_NEW(varAlloc_) util::NamedFile();
file_->open(fileName_.c_str(),
util::FileFlag::TYPE_CREATE
| util::FileFlag::TYPE_TRUNCATE
| util::FileFlag::TYPE_READ_WRITE);
file_->lock();
blockNum_ = 0;
return false;
}
else {
u8string dirName;
util::FileSystem::getDirectoryName(fileName_.c_str(), dirName);
util::FileSystem::createDirectoryTree(dirName.c_str());
file_ = ALLOC_VAR_SIZE_NEW(varAlloc_) util::NamedFile();
file_->open(fileName_.c_str(),
util::FileFlag::TYPE_CREATE |
util::FileFlag::TYPE_READ_WRITE);
file_->lock();
blockNum_ = 0;
return true;
}
}
catch (std::exception &e) {
GS_RETHROW_USER_OR_SYSTEM(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Failed to create swap file"));
}
}
void LocalTempStore::BufferManager::File::close() {
if (file_) {
file_->unlock();
#ifndef WIN32
file_->sync();
advise(POSIX_FADV_DONTNEED);
#endif
file_->close();
try {
util::FileSystem::remove(fileName_.c_str());
}
catch (std::exception &e) {
UTIL_TRACE_WARNING(SQL_TEMP_STORE,
"Swap file remove failed. (reason=" <<
GS_EXCEPTION_MESSAGE(e) << ")");
}
ALLOC_VAR_SIZE_DELETE(varAlloc_, file_);
}
file_ = NULL;
}
void LocalTempStore::BufferManager::File::flush() {
if (file_) {
const uint64_t startClock = util::Stopwatch::currentClock();
file_->sync();
const uint32_t lap = util::Stopwatch::clockToMillis(util::Stopwatch::currentClock() - startClock);
if (lap > ioWarningThresholdMillis_) {
UTIL_TRACE_WARNING(IO_MONITOR,
"[LONG I/O] sync time," << lap <<
",fileName," << fileName_);
}
}
}
void LocalTempStore::BufferManager::File::trim(uint64_t size) {
if (file_) {
file_->setSize(size);
}
}
void LocalTempStore::BufferManager::File::advise(int32_t advise) {
#ifndef WIN32
if (!file_->isClosed()) {
int32_t ret = posix_fadvise(file_->getHandle(), 0, 0, advise);
if (ret > 0) {
UTIL_TRACE_WARNING(SQL_TEMP_STORE,
"fadvise failed. :" <<
"fileName," << fileName_ <<
",advise," << advise <<
",returnCode," << ret);
}
UTIL_TRACE_INFO(SQL_TEMP_STORE,
"advise(POSIX_FADV_DONTNEED) : fileName," << fileName_);
}
#endif
}
uint64_t LocalTempStore::BufferManager::File::getFileSize() {
uint64_t fileSize = 0;
try {
if (file_) {
util::FileStatus fileStatus;
file_->getStatus(&fileStatus);
fileSize = fileStatus.getSize();
}
}
catch (std::exception &e) {
GS_RETHROW_USER_ERROR(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Failed to get file status"));
}
return fileSize;
}
LocalTempStore::Block::Block() : blockInfo_(NULL) {
}
LocalTempStore::Block::Block(LocalTempStore &store, LocalTempStore::BlockId blockId)
: blockInfo_(NULL) {
store.getBufferManager().get(blockId, blockInfo_);
}
LocalTempStore::Block::Block(LocalTempStore &store, size_t blockSize, uint64_t affinity) {
static_cast<void>(blockSize);
store.getBufferManager().allocate(blockInfo_, affinity);
setSwapped(false);
}
LocalTempStore::Block::Block(LocalTempStore &store, const void *data, size_t size) {
store.getBufferManager().allocate(blockInfo_);
uint32_t blockExpSize = LocalTempStore::Block::Header::getBlockExpSize(blockInfo_->data());
assert(size == static_cast<size_t>(1 << blockExpSize));
if (size != static_cast<size_t>(1 << blockExpSize)) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid block size (blockSize=" << (1 << blockExpSize) <<
", imageSize=" << size << ")");
}
uint8_t* dest = static_cast<uint8_t*>(blockInfo_->data())
+ LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET;
const uint8_t* src = static_cast<const uint8_t*>(data)
+ LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET;
memcpy(dest, src, size - LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET);
setSwapped(false);
LocalTempStore::Block::Header::validateHeader(blockInfo_->data(), blockExpSize, false);
}
LocalTempStore::Block::Block(LocalTempStore::BlockInfo &blockInfo)
: blockInfo_(&blockInfo) {
}
void LocalTempStore::Block::dumpContents(std::ostream &ostr) const {
assert(blockInfo_);
blockInfo_->dumpContents(ostr);
ostr << std::endl;
}
void LocalTempStore::Block::Header::validateHeader(
void *block, uint32_t blockExpSize, bool verifyCheckSum) {
uint16_t magic = LocalTempStore::Block::Header::getMagic(block);
uint16_t version = LocalTempStore::Block::Header::getVersion(block);
uint32_t expSize = LocalTempStore::Block::Header::getBlockExpSize(block);
assert(magic == LocalTempStore::Block::Header::MAGIC_NUMBER);
if (magic != LocalTempStore::Block::Header::MAGIC_NUMBER) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid magic number (value=" <<
LocalTempStore::Block::Header::MAGIC_NUMBER << ")");
}
assert(version == LocalTempStore::Block::Header::VERSION_NUMBER);
if (version != LocalTempStore::Block::Header::VERSION_NUMBER) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid version number (value=" <<
LocalTempStore::Block::Header::MAGIC_NUMBER << ")");
}
assert(expSize == blockExpSize);
if (blockExpSize != 0 && expSize != blockExpSize) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid block size (expected=" << blockExpSize <<
", value=" <<
LocalTempStore::Block::Header::MAGIC_NUMBER << ")");
}
if (verifyCheckSum) {
uint32_t headerCheckSum = LocalTempStore::Block::Header::getHeaderCheckSum(block);
uint32_t blockCheckSum = LocalTempStore::Block::Header::calcBlockCheckSum(block);
assert(headerCheckSum != blockCheckSum);
if (headerCheckSum != blockCheckSum) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Checksum error (header=" << headerCheckSum <<
", actual=" << blockCheckSum <<")");
}
}
}
void LocalTempStore::BlockInfo::dumpContents(std::ostream &ostr) const {
assert(false);
ostr << std::endl;
}
bool LocalTempStore::BlockInfo::checkLocked(bool getLock) {
bool isLocked = !mutex_.tryLock();
if (!getLock) {
mutex_.unlock();
}
return isLocked;
};
LocalTempStore::BlockInfo::BlockInfo(LocalTempStore &store, BlockId blockId)
: data_(NULL), baseBlockInfo_(NULL), store_(&store)
, blockId_(0)
, refCount_(0) {
}
void LocalTempStore::BlockInfo::initialize(LocalTempStore &store, BlockId id) {
store_ = &store;
blockId_ = id;
refCount_ = 0;
}
void LocalTempStore::BlockInfo::swapOut(LocalTempStore &store, BlockId id) {
assert(data_);
bool isSwapped =
((MASK_ALREADY_SWAPPED & baseBlockInfo_->assignmentCount_) != 0);
uint64_t assignmentCount = (~MASK_ALREADY_SWAPPED) & baseBlockInfo_->assignmentCount_;
if (!isSwapped && assignmentCount > 0 && blockId_ != id) {
#ifndef NDEBUG
BlockId headerBlockId = LocalTempStore::Block::Header::getBlockId(data_);
assert(headerBlockId == blockId_);
#endif
store.getBufferManager().swapOut(*this);
}
}
void LocalTempStore::BlockInfo::setup(LocalTempStore &store, BlockId id, bool isNew) {
static const uint32_t blockExpSize = store.getDefaultBlockExpSize();
assert(data_);
BlockId headerBlockId = LocalTempStore::Block::Header::getBlockId(data_);
if (!isNew && (id != blockId_ || id != headerBlockId)) {
store.getBufferManager().swapIn(id, *this);
assert(LocalTempStore::Block::Header::getBlockId(data_) == id);
}
if (isNew) {
LocalTempStore::Block::Header::resetHeader(data_, blockExpSize);
LocalTempStore::Block::Header::setBlockId(data_, id);
}
store_ = &store;
blockId_ = id;
#ifndef NDEBUG
headerBlockId = LocalTempStore::Block::Header::getBlockId(data_);
assert(headerBlockId == id);
assert(blockId_ == id);
#endif
}
void LocalTempStore::Block::assign(const void *data, size_t size, bool isPartial) {
assert(blockInfo_);
assert(data);
blockInfo_->assign(data, size, isPartial);
}
void LocalTempStore::BlockInfo::assign(const void *data, size_t size, bool isPartial) {
assert(data_);
assert(store_);
const size_t blockSize = static_cast<size_t>(
1 << LocalTempStore::Block::Header::getBlockExpSize(data_));
assert(isPartial ? true : (size == blockSize));
if (size > blockSize) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Block size is too small (blockSize=" << blockSize <<
", imageSize=" << size << ")");
}
uint8_t* dest = data_ + LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET;
const uint8_t* src =
static_cast<const uint8_t*>(data) +
LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET;
memcpy(dest, src, size - LocalTempStore::Block::Header::CONTIGUOUS_BLOCK_NUM_OFFSET);
}
LocalTempStore::Config::Config(const ConfigTable &config)
: blockSize_(DEFAULT_BLOCK_SIZE)
, swapFileSizeLimit_(ConfigTable::megaBytesToBytes(
config.getUInt32(CONFIG_TABLE_SQL_STORE_SWAP_FILE_SIZE_LIMIT)))
, stableMemoryLimit_(ConfigTable::megaBytesToBytes(
config.getUInt32(CONFIG_TABLE_SQL_STORE_MEMORY_LIMIT)))
, swapFilesTopDir_(
config.get<const char8_t *>(CONFIG_TABLE_SQL_STORE_SWAP_FILE_PATH))
, swapSyncInterval_(config.getUInt32(CONFIG_TABLE_SQL_STORE_SWAP_SYNC_INTERVAL))
, swapSyncSize_(ConfigTable::megaBytesToBytes(
config.getUInt32(CONFIG_TABLE_SQL_STORE_SWAP_SYNC_SIZE)))
, swapReleaseInterval_(config.getUInt32(CONFIG_TABLE_SQL_STORE_SWAP_RELEASE_INTERVAL))
{
}
LocalTempStore::Config::Config()
: blockSize_(DEFAULT_BLOCK_SIZE)
, swapFileSizeLimit_(ConfigTable::megaBytesToBytes(
static_cast<size_t>(DEFAULT_STORE_SWAP_FILE_SIZE_LIMIT_MB)))
, stableMemoryLimit_(ConfigTable::megaBytesToBytes(
static_cast<size_t>(DEFAULT_STORE_MEMORY_LIMIT_MB)))
, swapFilesTopDir_(DEFAULT_SWAP_FILES_TOP_DIR)
, swapSyncInterval_(0)
, swapSyncSize_(0)
, swapReleaseInterval_(0)
{
}
LocalTempStore::Group::Group(LocalTempStore &store): store_(store), groupId_(UNDEF_GROUPID) {
groupId_ = store_.allocateGroup();
}
LocalTempStore::Group::~Group() try {
store_.deallocateGroup(groupId_);
}
catch (...) {
}
LocalTempStore& LocalTempStore::Group::getStore() const {
return store_;
}
LocalTempStore::GroupId LocalTempStore::Group::getId() const {
return groupId_;
}
LocalTempStore::ResourceInfo& LocalTempStore::getResourceInfo(ResourceId resourceId) {
return resourceInfoManager_->getResourceInfo(resourceId);
}
void LocalTempStore::incrementGroupResourceCount(GroupId groupId) {
resourceInfoManager_->incrementGroupResourceCount(groupId);
}
void LocalTempStore::decrementGroupResourceCount(GroupId groupId) {
resourceInfoManager_->decrementGroupResourceCount(groupId);
}
int64_t LocalTempStore::getGroupResourceCount(GroupId groupId) {
return resourceInfoManager_->getGroupResourceCount(groupId);
}
size_t LocalTempStore::getVariableAllocatorStat(
size_t &totalSize, size_t &freeSize, size_t &hugeCount, size_t &hugeSize) {
totalSize = varAlloc_->getTotalElementSize();
freeSize = varAlloc_->getFreeElementSize();
hugeCount = varAlloc_->getHugeElementSize();
hugeSize = varAlloc_->getHugeElementSize();
return (totalSize - freeSize) + hugeSize;
}
void LocalTempStore::countBlockInfo(
uint64_t &freeCount, uint64_t &latchedCount, uint64_t &unlatchedCount, uint64_t &noneCount) {
bufferManager_->countBlockInfo(freeCount, latchedCount, unlatchedCount, noneCount);
}
void LocalTempStore::countFileBlock(uint64_t &totalCount, uint64_t &freeCount, uint64_t &fileSize) {
totalCount = 0;
freeCount = 0;
fileSize = 0;
bufferManager_->countFileBlock(totalCount, fileSize);
}
void LocalTempStore::BufferManager::countFileBlock(uint64_t &totalCount, uint64_t &fileSize) {
file_->countFileBlock(totalCount, fileSize);
}
void LocalTempStore::dumpBlockInfo() {
bufferManager_->dumpBlockInfo();
}
void LocalTempStore::dumpBlockInfoContents() {
bufferManager_->dumpBlockInfoContents();
}
void LocalTempStore::BufferManager::countBlockInfo(
uint64_t &freeCount, uint64_t &latchedCount,
uint64_t &unlatchedCount, uint64_t &noneCount) {
util::LockGuard<util::Mutex> guard(mutex_);
blockInfoTable_.countBlockInfo(
blockExpSize_, freeCount, latchedCount, unlatchedCount, noneCount);
}
void LocalTempStore::BlockInfoTable::countBlockInfo(
uint32_t blockExpSize, uint64_t &freeCount, uint64_t &latchedCount,
uint64_t &unlatchedCount, uint64_t &noneCount) {
freeCount = 0;
latchedCount = 0;
unlatchedCount = 0;
noneCount = 0;
size_t blockNum = baseBlockInfoArray_.size();
for (uint64_t blockNth = 0; blockNth < blockNum; ++blockNth) {
BlockId id = LocalTempStore::makeBlockId(blockNth, blockExpSize);
BaseBlockInfo* baseBlockInfo = lookupBaseInfo(id);
BlockInfo* blockInfo = lookup(id);
if (blockInfo) {
if (blockInfo->getReferenceCount() > 0) {
++latchedCount;
}
else {
if (0 == ((~MASK_ALREADY_SWAPPED) & baseBlockInfo->assignmentCount_)) {
++freeCount;
}
else {
++unlatchedCount;
}
}
}
else {
if (0 == ((~MASK_ALREADY_SWAPPED) & baseBlockInfo->assignmentCount_)) {
++freeCount;
}
else {
++unlatchedCount;
}
}
}
}
void LocalTempStore::BufferManager::dumpBlockInfo() {
std::cerr << "LocalTempStore::BufferManager::dumpMemory" << std::endl;
std::cerr << std::endl;
}
void LocalTempStore::BufferManager::dumpBlockInfoContents() {
std::cerr << "LocalTempStore::BufferManager::dumpMemoryContents" << std::endl;
std::cerr << std::endl;
}
void LocalTempStore::dumpMemory(std::ostream &ostr, const void* top, size_t size) {
int64_t count = 0;
const uint8_t* startAddr = static_cast<const uint8_t*>(top);
const uint8_t* addr = startAddr;
for (; addr < startAddr + size; ++addr) {
if (count % 16 == 0) {
ostr << std::setw(8) << std::setfill('0') << std::hex
<< std::nouppercase << (uintptr_t)(addr - startAddr) << " ";
}
ostr << std::setw(2) << std::setfill('0') << std::hex
<< std::nouppercase << (uint32_t)(*addr) << " ";
if (count % 16 == 15) {
ostr << std::endl;
}
++count;
}
ostr << std::endl;
}
void LocalTempStore::dumpBackTrace() {
#ifndef WIN32
int nptrs;
const int FRAME_DEPTH = 50;
void *buffer[FRAME_DEPTH];
nptrs = backtrace(buffer, FRAME_DEPTH);
backtrace_symbols_fd(buffer, nptrs, STDOUT_FILENO);
#endif
abort();
}
LocalTempStore::ResourceInfoManager::ResourceInfoManager(LocalTempStore &store) :
store_(store),
#if UTIL_CXX11_SUPPORTED
groupInfoMap_(LocalTempStore::MIN_INITIAL_BUCKETS, GroupInfoMap::hasher(),
GroupInfoMap::key_equal(), store.getVarAllocator()),
resourceInfoMap_(LocalTempStore::MIN_INITIAL_BUCKETS, ResourceInfoMap::hasher(),
ResourceInfoMap::key_equal(), store.getVarAllocator()),
#else
groupInfoMap_(GroupInfoMap::key_compare(), store.getVarAllocator()),
resourceInfoMap_(ResourceInfoMap::key_compare(), store.getVarAllocator()),
#endif
maxGroupId_(0),
maxResourceId_(0) {
}
LocalTempStore::ResourceInfoManager::~ResourceInfoManager() {
util::LockGuard<util::Mutex> guard(mutex_);
groupInfoMap_.clear();
resourceInfoMap_.clear();
}
LocalTempStore::GroupId LocalTempStore::ResourceInfoManager::allocateGroup() {
util::LockGuard<util::Mutex> guard(mutex_);
GroupId id = ++maxGroupId_;
assert(groupInfoMap_.find(id) == groupInfoMap_.end());
GroupInfo info;
groupInfoMap_[id] = info;
GroupInfo& groupInfo = groupInfoMap_[id];
groupInfo.groupId_ = id;
groupInfo.resourceCount_ = 0;
groupInfo.status_ = GROUP_ACTIVE;
assert(groupInfo.groupId_ == id);
return id;
}
LocalTempStore::GroupInfo& LocalTempStore::ResourceInfoManager::getGroupInfo(GroupId id) {
util::LockGuard<util::Mutex> guard(mutex_);
assert(id <= maxGroupId_);
assert(groupInfoMap_.find(id) != groupInfoMap_.end());
assert(groupInfoMap_[id].groupId_ == id);
return groupInfoMap_[id];
}
void LocalTempStore::ResourceInfoManager::freeGroup(GroupId groupId) {
util::LockGuard<util::Mutex> guard(mutex_);
assert(groupId <= maxGroupId_);
if (groupInfoMap_.find(groupId) != groupInfoMap_.end()) {
GroupInfo &info = groupInfoMap_[groupId];
info.status_ = GROUP_INACTIVE;
if (0 == info.resourceCount_) {
groupInfoMap_.erase(groupId);
}
}
}
void LocalTempStore::ResourceInfoManager::incrementGroupResourceCount(
GroupId groupId) {
if (groupInfoMap_.find(groupId) == groupInfoMap_.end()) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_INVALID_GROUP_ID,
"GroupId (" << groupId <<
") is invalid. (Not allocated or already deallocated.)");
}
GroupInfo& groupInfo = groupInfoMap_[groupId];
++groupInfo.resourceCount_;
}
void LocalTempStore::ResourceInfoManager::decrementGroupResourceCount(GroupId groupId) {
if (groupInfoMap_.find(groupId) == groupInfoMap_.end()) {
return;
}
GroupInfo& groupInfo = groupInfoMap_[groupId];
--groupInfo.resourceCount_;
assert(groupInfo.resourceCount_ >= 0);
if (groupInfo.resourceCount_ == 0) {
groupInfoMap_.erase(groupId);
}
}
int64_t LocalTempStore::ResourceInfoManager::getGroupResourceCount(GroupId groupId) {
assert(groupInfoMap_.find(groupId) != groupInfoMap_.end());
GroupInfo &groupInfo = groupInfoMap_[groupId];
return groupInfo.resourceCount_;
}
LocalTempStore::ResourceId LocalTempStore::ResourceInfoManager::allocateResource(
LocalTempStore::ResourceType type, LocalTempStore::GroupId groupId) {
util::LockGuard<util::Mutex> guard(mutex_);
ResourceId id = ++maxResourceId_;
ResourceInfo info;
info.groupId_ = groupId;
info.id_ = id;
info.status_ = LocalTempStore::RESOURCE_INITIALIZED;
info.resource_ = NULL;
info.type_ = type;
assert(resourceInfoMap_.find(id) == resourceInfoMap_.end());
resourceInfoMap_[id] = info;
if (UNDEF_GROUPID != groupId) {
store_.incrementGroupResourceCount(groupId);
}
return id;
}
LocalTempStore::ResourceInfo& LocalTempStore::ResourceInfoManager::getResourceInfo(ResourceId id) {
util::LockGuard<util::Mutex> guard(mutex_);
assert(id <= maxResourceId_);
if (resourceInfoMap_.find(id) != resourceInfoMap_.end()) {
assert(resourceInfoMap_[id].id_ == id);
return resourceInfoMap_[id];
}
else {
GS_THROW_USER_ERROR(GS_ERROR_LTS_INVALID_RESOURCE_ID,
"ResourceId(" << id << ") is not found");
}
}
void LocalTempStore::ResourceInfoManager::freeResource(ResourceId resourceId) {
util::LockGuard<util::Mutex> guard(mutex_);
assert(resourceId <= maxResourceId_);
if (resourceInfoMap_.find(resourceId) != resourceInfoMap_.end()) {
ResourceInfo &resourceInfo = resourceInfoMap_[resourceId];
assert(resourceInfo.status_ != RESOURCE_NONE);
resourceInfo.status_ = RESOURCE_INACTIVE;
if (UNDEF_GROUPID != resourceInfo.groupId_) {
store_.decrementGroupResourceCount(resourceInfo.groupId_);
}
resourceInfoMap_.erase(resourceId);
}
}
void LocalTempStore::Block::encode(EventByteOutStream &out, uint32_t checkRatio) {
try {
if (blockInfo_ != NULL) {
encodeMain(out, checkRatio);
}
}
catch (std::exception &e) {
GS_RETHROW_USER_OR_SYSTEM(
e, GS_EXCEPTION_MERGE_MESSAGE(e,
"Encode binary failed"));
}
}
void LocalTempStore::Block::decode(util::StackAllocator &alloc, EventByteInStream &in) {
decodeMain(alloc, in);
}
void LocalTempStore::Block::encodeMain(EventByteOutStream &out, uint32_t checkRatio) {
uint32_t usedRatio = 0;
uint32_t usedSize = 0;
assert((1U << Header::getBlockExpSize(data())) == DEFAULT_BLOCK_SIZE);
static const uint32_t BLOCK_SIZE = DEFAULT_BLOCK_SIZE;
if (Header::getContiguousBlockNum(data()) >= 0) {
usedSize = Header::getNextFixDataOffset(data())
+ BLOCK_SIZE - Header::getNextVarDataOffset(data()) + 1;
usedRatio = usedSize * 100 / BLOCK_SIZE;
}
else {
usedSize = Header::getNextVarDataOffset(data());
usedRatio = usedSize * 100 / BLOCK_SIZE;
}
if (usedRatio <= checkRatio) {
if (Header::getContiguousBlockNum(data()) >= 0) {
if (BLOCK_SIZE == Header::getNextVarDataOffset(data())) {
out << FLAG_PARTIAL_BLOCK_1;
uint32_t fixedPartSize = Header::getNextFixDataOffset(data());
out << std::pair<const uint8_t*, size_t>(
static_cast<const uint8_t *>(data()), fixedPartSize);
}
else {
out << FLAG_PARTIAL_BLOCK_2;
uint32_t fixedPartSize = Header::getNextFixDataOffset(data());
out << fixedPartSize;
out << std::pair<const uint8_t*, size_t>(
static_cast<const uint8_t *>(data()), fixedPartSize);
uint32_t varPartSize = BLOCK_SIZE - Header::getNextVarDataOffset(data());
out << std::pair<const uint8_t*, size_t>(
static_cast<const uint8_t *>(data())
+ Header::getNextVarDataOffset(data()), varPartSize);
}
}
else {
out << FLAG_PARTIAL_BLOCK_1;
out << std::pair<const uint8_t*, size_t>(
static_cast<const uint8_t *>(data()),
Header::getNextVarDataOffset(data()));
}
}
else {
out << FLAG_FULL_BLOCK;
out << std::pair<const uint8_t*, size_t>(
static_cast<const uint8_t *>(data()), BLOCK_SIZE);
}
}
void LocalTempStore::Block::decodeMain(
util::StackAllocator &alloc, util::ArrayByteInStream &in) {
util::StackAllocator::Scope scope(alloc);
size_t size = static_cast<size_t>(in.base().remaining());
const uint8_t temp = 0;
assert((1U << Header::getBlockExpSize(data())) == DEFAULT_BLOCK_SIZE);
static const uint32_t BLOCK_SIZE = DEFAULT_BLOCK_SIZE;
#ifndef NDEBUG
uint64_t origBlockId = LocalTempStore::Block::Header::getBlockId(data());
#endif
if (size != 0) {
uint8_t partialFlag;
in >> partialFlag;
size = static_cast<size_t>(in.base().remaining());
switch (partialFlag) {
case FLAG_FULL_BLOCK:
{
assert(BLOCK_SIZE == size);
util::XArray<uint8_t> binary(alloc);
binary.resize(size);
binary.assign(size, temp);
in >> std::make_pair(binary.data(), size);
if (size != BLOCK_SIZE) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid block size (blockSize=" << BLOCK_SIZE <<
", imageSize=" << size << ")");
}
assign(binary.data(), size, false);
}
break;
case FLAG_PARTIAL_BLOCK_1:
{
util::XArray<uint8_t> binary(alloc);
binary.resize(size);
binary.assign(size, temp);
in >> std::make_pair(binary.data(), size);
if ((1U << Header::getBlockExpSize(binary.data())) != BLOCK_SIZE) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid block size (blockSize=" << BLOCK_SIZE <<
", imageBlockSize=" << (1U << Header::getBlockExpSize(binary.data())) << ")");
}
assign(binary.data(), size, true);
}
break;
case FLAG_PARTIAL_BLOCK_2:
{
uint32_t firstPartSize;
in >> firstPartSize;
assert(firstPartSize <= BLOCK_SIZE);
util::XArray<uint8_t> binary(alloc);
binary.resize(firstPartSize);
binary.assign(firstPartSize, temp);
in >> std::make_pair(binary.data(), firstPartSize);
if ((1U << Header::getBlockExpSize(binary.data())) != BLOCK_SIZE) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Invalid block size (blockSize=" << BLOCK_SIZE <<
", imageBlockSize=" << (1U << Header::getBlockExpSize(binary.data())) << ")");
}
assign(binary.data(), firstPartSize, true);
uint32_t secondPartSize = static_cast<uint32_t>(in.base().remaining());
assert(secondPartSize <= BLOCK_SIZE);
assert((firstPartSize + secondPartSize) <= BLOCK_SIZE);
if ((firstPartSize + secondPartSize) > BLOCK_SIZE) {
GS_THROW_USER_ERROR(GS_ERROR_LTS_BLOCK_VALIDATION_FAILED,
"Block size is too small (blockSize=" << BLOCK_SIZE <<
", totalImageSize=" << (firstPartSize + secondPartSize) << ")");
}
uint8_t *addr = static_cast<uint8_t*>(data()) + (BLOCK_SIZE - secondPartSize);
in >> std::make_pair(addr, secondPartSize);
}
break;
default:
assert(false);
break;
}
assert(LocalTempStore::Block::Header::getBlockId(data()) == origBlockId);
LocalTempStore::Block::Header::validateHeader(
data(), Header::getBlockExpSize(data()), false);
}
}
LTSEventBufferManager::LTSEventBufferManager(LocalTempStore &store) :
store_(store),
group_(store_) {
}
LTSEventBufferManager::~LTSEventBufferManager() {
}
size_t LTSEventBufferManager::getUnitSize() {
assert(LocalTempStore::DEFAULT_BLOCK_SIZE >
LocalTempStore::Block::Header::BLOCK_HEADER_SIZE);
return LocalTempStore::DEFAULT_BLOCK_SIZE -
LocalTempStore::Block::Header::BLOCK_HEADER_SIZE;
}
std::pair<LTSEventBufferManager::BufferId, void*>
LTSEventBufferManager::allocate() {
LocalTempStore::BlockInfo *blockInfo;
store_.getBufferManager().allocate(blockInfo, group_.getId(), true);
store_.getBufferManager().addAssignment(blockInfo->getBlockId());
return std::make_pair(
BufferId(blockInfo->getBlockId(), 0), getBlockBody(*blockInfo));
}
void LTSEventBufferManager::deallocate(const BufferId &id) {
store_.getBufferManager().removeAssignment(id.first);
}
void* LTSEventBufferManager::latch(const BufferId &id) {
LocalTempStore::BlockInfo *blockInfo;
store_.getBufferManager().get(id.first, blockInfo);
return getBlockBody(*blockInfo);
}
void LTSEventBufferManager::unlatch(const BufferId &id) {
LocalTempStore::BlockInfo *blockInfo;
store_.getBufferManager().get(id.first, blockInfo);
for (size_t i = 0; i < 2; i++) {
store_.getBufferManager().removeReference(*blockInfo);
}
}
void* LTSEventBufferManager::getBlockBody(
LocalTempStore::BlockInfo &blockInfo) {
return static_cast<uint8_t*>(blockInfo.data()) +
LocalTempStore::Block::Header::BLOCK_HEADER_SIZE;
}
| 19,091 |
15,337 | import re
from collections import defaultdict
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For each user, if the group with the exact scope of permissions exists,
add the user to it, else create a new group with this scope of permissions
and add the user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
counter = get_counter_value(Group)
mapping = create_permissions_mapping(User)
for perms, users in mapping.items():
group = get_group_with_given_permissions(perms, groups)
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, counter, Group)
group.user_set.add(*users)
counter += 1
def get_counter_value(Group):
"""Get the number of next potential group."""
pattern = r"^Group (\d+)$"
group = Group.objects.filter(name__iregex=pattern).order_by("name").last()
if not group:
return 1
return int(re.match(pattern, group.name).group(1)) + 1
def create_permissions_mapping(User):
"""Create mapping permissions to users and potential new group name."""
mapping = defaultdict(set)
users = User.objects.filter(user_permissions__isnull=False).distinct().iterator()
for user in users:
permissions = user.user_permissions.all().order_by("pk")
perm_pks = tuple([perm.pk for perm in permissions])
mapping[perm_pks].add(user.pk)
user.user_permissions.clear()
return mapping
def get_group_with_given_permissions(permissions, groups):
"""Get group with given set of permissions."""
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, counter, Group):
"""Create new group with given set of permissions."""
group_name = f"Group {counter:03d}"
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0040_auto_20200415_0443"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
| 957 |
313 | <reponame>gridgentoo/titus-control-plane
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.api.jobmanager.model.job.disruptionbudget;
import java.time.DayOfWeek;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.DateTimeExt;
/**
* Collection of helper functions for {@link TimeWindow}.
*/
public final class TimeWindowFunctions {
public static boolean isEmpty(TimeWindow timeWindow) {
return timeWindow.getDays().isEmpty() && timeWindow.getHourlyTimeWindows().isEmpty();
}
/**
* Returns predicate which when evaluated checks the current time against the defined time window.
* If the time window is empty (does not define any days our hours), it matches any time.
*
* @returns true if the current time is within the time window, false otherwise
*/
public static Supplier<Boolean> isInTimeWindowPredicate(TitusRuntime titusRuntime, TimeWindow timeWindow) {
if (isEmpty(timeWindow)) {
return () -> true;
}
List<Function<DayOfWeek, Boolean>> dayPredicates = new ArrayList<>();
timeWindow.getDays().forEach(day -> dayPredicates.add(buildDayPredicate(day)));
List<Function<Integer, Boolean>> hourPredicates = new ArrayList<>();
timeWindow.getHourlyTimeWindows().forEach(h -> hourPredicates.add(buildHourlyTimeWindows(h)));
Function<DayOfWeek, Boolean> combinedDayPredicate = dayPredicates.isEmpty() ? day -> true : oneOf(dayPredicates);
Function<Integer, Boolean> combinedHourPredicate = hourPredicates.isEmpty() ? hour -> true : oneOf(hourPredicates);
ZoneId zoneId;
try {
zoneId = DateTimeExt.toZoneId(timeWindow.getTimeZone());
} catch (Exception e) {
titusRuntime.getCodeInvariants().unexpectedError("Unrecognized time zone (data not properly validated)", e);
return () -> false;
}
return () -> {
ZonedDateTime dateTime = Instant.ofEpochMilli(titusRuntime.getClock().wallTime()).atZone(zoneId);
return combinedDayPredicate.apply(dateTime.getDayOfWeek()) && combinedHourPredicate.apply(dateTime.getHour());
};
}
/**
* Returns predicate that evaluates to true only when {@link #isInTimeWindowPredicate(TitusRuntime, TimeWindow)} evaluates
* to true for at least one of the provided time windows.
*/
public static Supplier<Boolean> isInTimeWindowPredicate(TitusRuntime titusRuntime, Collection<TimeWindow> timeWindows) {
if (CollectionsExt.isNullOrEmpty(timeWindows)) {
return () -> true;
}
List<Supplier<Boolean>> predicates = timeWindows.stream()
.map(t -> isInTimeWindowPredicate(titusRuntime, t))
.collect(Collectors.toList());
return () -> {
for (Supplier<Boolean> predicate : predicates) {
if (predicate.get()) {
return true;
}
}
return false;
};
}
private static <T> Function<T, Boolean> oneOf(List<Function<T, Boolean>> basicPredicates) {
return argument -> {
if (basicPredicates.isEmpty()) {
return true;
}
for (Function<T, Boolean> predicate : basicPredicates) {
if (predicate.apply(argument)) {
return true;
}
}
return false;
};
}
private static Function<DayOfWeek, Boolean> buildDayPredicate(Day expectedDay) {
DayOfWeek expectedDayOfWeek = expectedDay.toDayOfWeek();
return currentDayOfWeek -> currentDayOfWeek == expectedDayOfWeek;
}
private static Function<Integer, Boolean> buildHourlyTimeWindows(HourlyTimeWindow timeWindow) {
if (timeWindow.getEndHour() < timeWindow.getStartHour()) {
return epochMs -> true;
}
return currentHour -> timeWindow.getStartHour() <= currentHour && currentHour <= timeWindow.getEndHour();
}
}
| 1,829 |
335 | {
"word": "Misimpression",
"definitions": [
"A wrong or faulty impression."
],
"parts-of-speech": "Noun"
} | 59 |
12,718 | /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Upcall description for nfsdcld communication
*
* Copyright (c) 2012 Red Hat, Inc.
* Author(s): <NAME> <<EMAIL>>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _NFSD_CLD_H
#define _NFSD_CLD_H
#include <linux/types.h>
/* latest upcall version available */
#define CLD_UPCALL_VERSION 2
/* defined by RFC3530 */
#define NFS4_OPAQUE_LIMIT 1024
#ifndef SHA256_DIGEST_SIZE
#define SHA256_DIGEST_SIZE 32
#endif
enum cld_command {
Cld_Create, /* create a record for this cm_id */
Cld_Remove, /* remove record of this cm_id */
Cld_Check, /* is this cm_id allowed? */
Cld_GraceDone, /* grace period is complete */
Cld_GraceStart, /* grace start (upload client records) */
Cld_GetVersion, /* query max supported upcall version */
};
/* representation of long-form NFSv4 client ID */
struct cld_name {
__u16 cn_len; /* length of cm_id */
unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */
} __attribute__((packed));
/* sha256 hash of the kerberos principal */
struct cld_princhash {
__u8 cp_len; /* length of cp_data */
unsigned char cp_data[SHA256_DIGEST_SIZE]; /* hash of principal */
} __attribute__((packed));
struct cld_clntinfo {
struct cld_name cc_name;
struct cld_princhash cc_princhash;
} __attribute__((packed));
/* message struct for communication with userspace */
struct cld_msg {
__u8 cm_vers; /* upcall version */
__u8 cm_cmd; /* upcall command */
__s16 cm_status; /* return code */
__u32 cm_xid; /* transaction id */
union {
__s64 cm_gracetime; /* grace period start time */
struct cld_name cm_name;
__u8 cm_version; /* for getting max version */
} __attribute__((packed)) cm_u;
} __attribute__((packed));
/* version 2 message can include hash of kerberos principal */
struct cld_msg_v2 {
__u8 cm_vers; /* upcall version */
__u8 cm_cmd; /* upcall command */
__s16 cm_status; /* return code */
__u32 cm_xid; /* transaction id */
union {
struct cld_name cm_name;
__u8 cm_version; /* for getting max version */
struct cld_clntinfo cm_clntinfo; /* name & princ hash */
} __attribute__((packed)) cm_u;
} __attribute__((packed));
struct cld_msg_hdr {
__u8 cm_vers; /* upcall version */
__u8 cm_cmd; /* upcall command */
__s16 cm_status; /* return code */
__u32 cm_xid; /* transaction id */
} __attribute__((packed));
#endif /* !_NFSD_CLD_H */ | 1,139 |
4,538 | <gh_stars>1000+
/*
* Copyright (C) 2015-2018 Alibaba Group Holding Limited
*/
#ifndef __WIFI_PROVISION_INTERNAL_H__
#define __WIFI_PROVISION_INTERNAL_H__
#include "linkkit/infra/infra_config.h"
#include <string.h>
#include <stdio.h>
#include "aws_lib.h"
#include "zconfig_lib.h"
#include "zconfig_utils.h"
#include "zconfig_protocol.h"
#include "zconfig_ieee80211.h"
#include "awss_event.h"
#include "awss_timer.h"
#include "awss_main.h"
#include "os.h"
#include "linkkit/infra/infra_compat.h"
#include "awss_smartconfig.h"
#include "linkkit/infra/infra_sha1.h"
#include "passwd.h"
#include "awss_utils.h"
#include "awss_statis.h"
#include "awss_packet.h"
#include "awss_notify.h"
#include "awss_cmp.h"
#include "linkkit/wifi_provision_api.h"
#include "awss_cmp.h"
#include "awss_crypt.h"
#include <stdlib.h>
#include "linkkit/infra/infra_json_parser.h"
#include "linkkit/mqtt_api.h"
#include "awss_dev_reset_internal.h"
#include "awss_info.h"
#include "awss_bind_statis.h"
#include "awss_aplist.h"
#include "connect_ap.h"
#include "linkkit/infra/infra_aes.h"
#include "linkkit/wrappers/wrappers.h"
#ifdef AWSS_SUPPORT_SMARTCONFIG_WPS
#include "awss_wps.h"
#endif
#ifdef AWSS_SUPPORT_HT40
#include "awss_ht40.h"
#endif
#if defined(AWSS_SUPPORT_AHA)
#include "awss_wifimgr.h"
#endif
#ifndef AWSS_DISABLE_ENROLLEE
#include "awss_enrollee.h"
#endif
#if defined(AWSS_SUPPORT_AHA)
#include "awss_aha.h"
#endif
#if defined(WIFI_PROVISION_ENABLED) || defined(DEV_BIND_ENABLED)
#include "linkkit/coap_api.h"
#include "iotx_coap.h"
#endif
#ifdef AWSS_SUPPORT_DISCOVER
#include "awss_discover.h"
#endif
#endif
| 736 |
11,356 | <filename>deps/aws-sdk-cpp-1.3.50/aws-cpp-sdk-core/source/utils/crypto/EncryptionMaterials.cpp
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/core/utils/crypto/EncryptionMaterials.h>
namespace Aws
{
namespace Utils
{
namespace Crypto
{
//this is here to force the linker to behave correctly since this is an interface that will need to cross the dll
//boundary.
EncryptionMaterials::~EncryptionMaterials()
{}
}
}
} | 353 |
1,100 | <reponame>WelsonAA/Intro-to-Java-Programming
/*********************************************************************************
* (Combine two lists) Write a method that returns the union of two array lists *
* of integers using the following header: *
* *
* public static ArrayList<Integer> union( *
* ArrayList<Integer> list1, ArrayList<Integer> list2) *
* *
* For example, the union of two array lists {2, 3, 1, 5} and {3, 4, 6} is *
* {2, 3, 1, 5, 3, 4, 6}. Write a test program that prompts the user to enter two *
* lists, each with five integers, and displays their union. The numbers are *
* separated by exactly one space in the output. *
*********************************************************************************/
import java.util.Scanner;
import java.util.ArrayList;
public class Exercise_11_14 {
/** Main method */
public static void main(String[] args) {
// Create two ArrayLists
ArrayList<Integer> list1 = new ArrayList<Integer>();
ArrayList<Integer> list2 = new ArrayList<Integer>();
// Prompt the user to enter two lists
// each with five integers
System.out.print("Enter five integers for list1: ");
fill(list1);
System.out.print("Enter five integers for list2: ");
fill(list2);
// Combined lists
ArrayList<Integer> list3 = union(list1, list2);
// Display combined list
System.out.print("The combined list is: ");
for (int i = 0; i < list3.size(); i++) {
System.out.print(list3.get(i) + " ");
}
System.out.println();
}
/** Returns the union of two array lists of integers */
public static ArrayList<Integer> union(
ArrayList<Integer> list1, ArrayList<Integer> list2) {
ArrayList<Integer> list3 = list1;
for (int i = 0; i < list2.size(); i++) {
list3.add(list2.get(i));
}
return list3;
}
/** Adds user input to a list */
public static void fill(ArrayList<Integer> list) {
// Create a Scanner
Scanner input = new Scanner(System.in);
for (int i = 0; i < 5; i++) {
list.add(input.nextInt());
}
}
} | 958 |
3,600 | package com.github.dreamhead.moco.verification;
import static java.lang.String.format;
public final class BetweenVerification extends AbstractTimesVerification {
private final int min;
private final int max;
public BetweenVerification(final int min, final int max) {
this.min = min;
this.max = max;
}
@Override
protected boolean meet(final int size) {
return size >= min && size <= max;
}
@Override
protected String expectedTip() {
return format("{%d, %d}", min, max);
}
}
| 197 |
1,144 | package de.metas.ui.web.pickingV2.productsToPick.rows.factory;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.compiere.model.I_C_UOM;
import com.google.common.collect.Maps;
import de.metas.handlingunits.HuId;
import de.metas.handlingunits.IHandlingUnitsBL;
import de.metas.handlingunits.model.I_M_HU;
import de.metas.handlingunits.storage.IHUProductStorage;
import de.metas.product.IProductBL;
import de.metas.product.ProductId;
import de.metas.quantity.Quantity;
import de.metas.util.Services;
import de.metas.util.collections.CollectionUtils;
import lombok.NonNull;
import lombok.Value;
/*
* #%L
* metasfresh-webui-api
* %%
* Copyright (C) 2019 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
final class ProductsToPickSourceStorage
{
private final IHandlingUnitsBL handlingUnitsBL = Services.get(IHandlingUnitsBL.class);
private final IProductBL productBL = Services.get(IProductBL.class);
private final Map<HuId, I_M_HU> husCache = new HashMap<>();
private final Map<ReservableStorageKey, ReservableStorage> storages = new HashMap<>();
public I_M_HU getHU(final HuId huId)
{
return husCache.computeIfAbsent(huId, handlingUnitsBL::getById);
}
public void warmUpCacheForHuIds(final Collection<HuId> huIds)
{
CollectionUtils.getAllOrLoad(husCache, huIds, this::retrieveHUs);
}
private Map<HuId, I_M_HU> retrieveHUs(final Collection<HuId> huIds)
{
return Maps.uniqueIndex(handlingUnitsBL.getByIds(huIds), hu -> HuId.ofRepoId(hu.getM_HU_ID()));
}
public ReservableStorage getStorage(final HuId huId, final ProductId productId)
{
final ReservableStorageKey key = ReservableStorageKey.of(huId, productId);
return storages.computeIfAbsent(key, this::retrieveStorage);
}
private ReservableStorage retrieveStorage(final ReservableStorageKey key)
{
final ProductId productId = key.getProductId();
final I_M_HU hu = getHU(key.getHuId());
final IHUProductStorage huProductStorage = handlingUnitsBL
.getStorageFactory()
.getStorage(hu)
.getProductStorageOrNull(productId);
if (huProductStorage == null)
{
final I_C_UOM uom = productBL.getStockUOM(productId);
return new ReservableStorage(productId, Quantity.zero(uom));
}
else
{
final Quantity qtyFreeToReserve = huProductStorage.getQty();
return new ReservableStorage(productId, qtyFreeToReserve);
}
}
@Value(staticConstructor = "of")
private static class ReservableStorageKey
{
@NonNull
HuId huId;
@NonNull
ProductId productId;
}
}
| 1,112 |
666 | <reponame>Sairam954/bdl-benchmarks<gh_stars>100-1000
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Uncertainty estimator for the deterministic deep model baseline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def predict(x, model, num_samples, type="entropy"):
"""Simple sigmoid uncertainty estimator.
Args:
x: `numpy.ndarray`, datapoints from input space,
with shape [B, H, W, 3], where B the batch size and
H, W the input images height and width accordingly.
model: `tensorflow.keras.Model`, a probabilistic model,
which accepts input with shape [B, H, W, 3] and
outputs sigmoid probability [0.0, 1.0], and also
accepts boolean arguments `training=False` for
disabling dropout at test time.
type: (optional) `str`, type of uncertainty returns,
one of {"entropy", "stddev"}.
Returns:
mean: `numpy.ndarray`, predictive mean, with shape [B].
uncertainty: `numpy.ndarray`, ncertainty in prediction,
with shape [B].
"""
import numpy as np
import scipy.stats
# Get shapes of data
B, _, _, _ = x.shape
# Single forward pass from the deterministic model
p = model(x, training=False)
# Bernoulli output distribution
dist = scipy.stats.bernoulli(p)
# Predictive mean calculation
mean = dist.mean()
# Use predictive entropy for uncertainty
if type == "entropy":
uncertainty = dist.entropy()
# Use predictive standard deviation for uncertainty
elif type == "stddev":
uncertainty = dist.std()
else:
raise ValueError(
"Unrecognized type={} provided, use one of {'entropy', 'stddev'}".
format(type))
return mean, uncertainty
| 727 |
345 | """
Copyright (c) 2016-2020 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from sqlalchemy import and_
from programy.storage.stores.sql.store.sqlstore import SQLStore
from programy.storage.entities.link import LinkStore
from programy.storage.stores.sql.dao.link import Link
from programy.utils.logging.ylogger import YLogger
class SQLLinkStore(SQLStore, LinkStore):
def __init__(self, storage_engine):
SQLStore.__init__(self, storage_engine)
LinkStore.__init__(self)
def _get_all(self):
return self._storage_engine.session.query(Link)
def empty(self):
self._get_all().delete()
def create_link(self, primary_userid, provided_key, generated_key, expires, expired=False, retry_count=0):
link = Link(primary_user=primary_userid, generated_key=generated_key, provided_key=provided_key,
expires=expires, expired=expired, retry_count=retry_count)
self._storage_engine.session.add(link)
return link
def get_link(self, userid):
try:
link = self._storage_engine.session.query(Link).filter(Link.primary_user == userid).one()
return link
except Exception as e:
YLogger.exception_nostack(self, "Failed to get link", e)
return None
def _delete_link(self, userid):
result = self._storage_engine.session.query(Link).filter(Link.primary_user == userid).delete()
return bool(result==1)
def remove_link(self, userid):
try:
return self._delete_link(userid)
except Exception as excep:
YLogger.exception_nostack(self, "Failed to remove link", excep)
return False
def link_exists(self, userid, provided_key, generated_key):
try:
self._storage_engine.session.query(Link).filter(Link.primary_user == userid,
Link.provided_key == provided_key,
Link.generated_key == generated_key).one()
return True
except Exception as excep:
YLogger.exception_nostack(self, "Failed to check link exists", excep)
return False
def _get_link(self, id):
return self._storage_engine.session.query(Link).filter(and_(Link.id == id)).one()
def update_link(self, link):
existing = self._get_link(link.id)
if existing is not None:
existing.primary_user = link.primary_user
existing.generated_key = link.generated_key
existing.provided_key = link.provided_key
existing.expired = link.expired
existing.expires = link.expires
existing.retry_count = link.retry_count
self._storage_engine.session.commit()
return True
return False
| 1,487 |
6,989 | // Copyright (c) Facebook Inc. and Microsoft Corporation.
// Licensed under the MIT license.
#pragma once
#include "onnx/onnx_pb.h"
#ifdef ONNX_ML
#include <contrib/libs/onnx/proto/onnx_operators_ml.pb.h>
#else
//#include <contrib/libs/onnx/proto/onnx_operators.pb.h>
#error "Arcadia supports only ONNX_ML-enabled build"
#endif
| 136 |
957 | <reponame>emadurandal/Effekseer<gh_stars>100-1000
static const char metal_model_distortion_vs[] = R"(mtlcode
#pragma clang diagnostic ignored "-Wmissing-prototypes"
#include <metal_stdlib>
#include <simd/simd.h>
using namespace metal;
struct VS_Input
{
float3 Pos;
float3 Normal;
float3 Binormal;
float3 Tangent;
float2 UV;
float4 Color;
uint Index;
};
struct VS_Output
{
float4 PosVS;
float2 UV;
float4 ProjBinormal;
float4 ProjTangent;
float4 PosP;
float4 Color;
};
struct VS_ConstantBuffer
{
float4x4 mCameraProj;
float4x4 mModel_Inst[40];
float4 fUV[40];
float4 fModelColor[40];
float4 fLightDirection;
float4 fLightColor;
float4 fLightAmbient;
float4 mUVInversed;
};
struct main0_out
{
float2 _entryPointOutput_UV [[user(locn0)]];
float4 _entryPointOutput_ProjBinormal [[user(locn1)]];
float4 _entryPointOutput_ProjTangent [[user(locn2)]];
float4 _entryPointOutput_PosP [[user(locn3)]];
float4 _entryPointOutput_Color [[user(locn4)]];
float4 gl_Position [[position]];
};
struct main0_in
{
float3 Input_Pos [[attribute(0)]];
float3 Input_Normal [[attribute(1)]];
float3 Input_Binormal [[attribute(2)]];
float3 Input_Tangent [[attribute(3)]];
float2 Input_UV [[attribute(4)]];
float4 Input_Color [[attribute(5)]];
};
static inline __attribute__((always_inline))
VS_Output _main(VS_Input Input, constant VS_ConstantBuffer& v_31)
{
uint index = Input.Index;
float4x4 mModel = transpose(v_31.mModel_Inst[index]);
float4 uv = v_31.fUV[index];
float4 modelColor = v_31.fModelColor[index] * Input.Color;
VS_Output Output = VS_Output{ float4(0.0), float2(0.0), float4(0.0), float4(0.0), float4(0.0), float4(0.0) };
float4 localPos = float4(Input.Pos.x, Input.Pos.y, Input.Pos.z, 1.0);
float4 worldPos = localPos * mModel;
Output.PosVS = v_31.mCameraProj * worldPos;
Output.Color = modelColor;
float2 outputUV = Input.UV;
outputUV.x = (outputUV.x * uv.z) + uv.x;
outputUV.y = (outputUV.y * uv.w) + uv.y;
outputUV.y = v_31.mUVInversed.x + (v_31.mUVInversed.y * outputUV.y);
Output.UV = outputUV;
float4 localNormal = float4(Input.Normal.x, Input.Normal.y, Input.Normal.z, 0.0);
float4 localBinormal = float4(Input.Binormal.x, Input.Binormal.y, Input.Binormal.z, 0.0);
float4 localTangent = float4(Input.Tangent.x, Input.Tangent.y, Input.Tangent.z, 0.0);
float4 worldNormal = localNormal * mModel;
float4 worldBinormal = localBinormal * mModel;
float4 worldTangent = localTangent * mModel;
worldNormal = normalize(worldNormal);
worldBinormal = normalize(worldBinormal);
worldTangent = normalize(worldTangent);
Output.ProjBinormal = v_31.mCameraProj * (worldPos + worldBinormal);
Output.ProjTangent = v_31.mCameraProj * (worldPos + worldTangent);
Output.PosP = Output.PosVS;
return Output;
}
vertex main0_out main0(main0_in in [[stage_in]], constant VS_ConstantBuffer& v_31 [[buffer(0)]], uint gl_InstanceIndex [[instance_id]])
{
main0_out out = {};
VS_Input Input;
Input.Pos = in.Input_Pos;
Input.Normal = in.Input_Normal;
Input.Binormal = in.Input_Binormal;
Input.Tangent = in.Input_Tangent;
Input.UV = in.Input_UV;
Input.Color = in.Input_Color;
Input.Index = gl_InstanceIndex;
VS_Output flattenTemp = _main(Input, v_31);
out.gl_Position = flattenTemp.PosVS;
out._entryPointOutput_UV = flattenTemp.UV;
out._entryPointOutput_ProjBinormal = flattenTemp.ProjBinormal;
out._entryPointOutput_ProjTangent = flattenTemp.ProjTangent;
out._entryPointOutput_PosP = flattenTemp.PosP;
out._entryPointOutput_Color = flattenTemp.Color;
return out;
}
)";
| 1,553 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_svx.hxx"
#include <com/sun/star/table/XMergeableCell.hpp>
#include <algorithm>
#include <boost/bind.hpp>
#include <vcl/svapp.hxx>
#include <vos/mutex.hxx>
#include "cell.hxx"
#include "cellcursor.hxx"
#include "tablemodel.hxx"
#include "tablerow.hxx"
#include "tablerows.hxx"
#include "tablecolumn.hxx"
#include "tablecolumns.hxx"
#include "tableundo.hxx"
#include "svx/svdotable.hxx"
#include "svx/svdmodel.hxx"
#include "svx/svdstr.hrc"
#include "svx/svdglob.hxx"
//#define PLEASE_DEBUG_THE_TABLES 1
using ::rtl::OUString;
using namespace ::osl;
using namespace ::vos;
using namespace ::com::sun::star::uno;
using namespace ::com::sun::star::table;
using namespace ::com::sun::star::lang;
using namespace ::com::sun::star::container;
using namespace ::com::sun::star::beans;
using namespace ::com::sun::star::util;
// -----------------------------------------------------------------------------
namespace sdr { namespace table {
// -----------------------------------------------------------------------------
// removes the given range from a vector
template< class Vec, class Iter > void remove_range( Vec& rVector, sal_Int32 nIndex, sal_Int32 nCount )
{
const sal_Int32 nSize = static_cast<sal_Int32>(rVector.size());
if( nCount && (nIndex >= 0) && (nIndex < nSize) )
{
if( (nIndex + nCount) >= nSize )
{
// remove at end
rVector.resize( nIndex );
}
else
{
Iter aBegin( rVector.begin() );
while( nIndex-- )
aBegin++;
if( nCount == 1 )
{
rVector.erase( aBegin );
}
else
{
Iter aEnd( aBegin );
while( nCount-- )
aEnd++;
rVector.erase( aBegin, aEnd );
}
}
}
}
// -----------------------------------------------------------------------------
/** inserts a range into a vector */
template< class Vec, class Iter, class Entry > sal_Int32 insert_range( Vec& rVector, sal_Int32 nIndex, sal_Int32 nCount )
{
if( nCount )
{
if( nIndex >= static_cast< sal_Int32 >( rVector.size() ) )
{
// append at end
nIndex = static_cast< sal_Int32 >( rVector.size() ); // cap to end
rVector.resize( nIndex + nCount );
}
else
{
// insert
sal_Int32 nFind = nIndex;
Iter aIter( rVector.begin() );
while( nFind-- )
aIter++;
Entry aEmpty;
rVector.insert( aIter, nCount, aEmpty );
}
}
return nIndex;
}
// -----------------------------------------------------------------------------
TableModel::TableModel( SdrTableObj* pTableObj )
: TableModelBase( m_aMutex )
, mpTableObj( pTableObj )
, mbModified( sal_False )
, mbNotifyPending( false )
, mnNotifyLock( 0 )
{
}
TableModel::TableModel( SdrTableObj* pTableObj, const TableModelRef& xSourceTable )
: TableModelBase( m_aMutex )
, mpTableObj( pTableObj )
, mbModified( sal_False )
, mbNotifyPending( false )
, mnNotifyLock( 0 )
{
if( xSourceTable.is() )
{
const sal_Int32 nColCount = xSourceTable->getColumnCountImpl();
const sal_Int32 nRowCount = xSourceTable->getRowCountImpl();
init( nColCount, nRowCount );
sal_Int32 nRows = nRowCount;
while( nRows-- )
(*maRows[nRows]) = (*xSourceTable->maRows[nRows]);
sal_Int32 nColumns = nColCount;
while( nColumns-- )
(*maColumns[nColumns]) = (*xSourceTable->maColumns[nColumns]);
// copy cells
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
for( sal_Int32 nRow = 0; nRow < nRowCount; ++nRow )
{
CellRef xTargetCell( getCell( nCol, nRow ) );
if( xTargetCell.is() )
xTargetCell->cloneFrom( xSourceTable->getCell( nCol, nRow ) );
}
}
}
}
// -----------------------------------------------------------------------------
TableModel::~TableModel()
{
}
// -----------------------------------------------------------------------------
void TableModel::init( sal_Int32 nColumns, sal_Int32 nRows )
{
if( nRows < 20 )
maRows.reserve( 20 );
if( nColumns < 20 )
maColumns.reserve( 20 );
if( nRows && nColumns )
{
maColumns.resize( nColumns );
maRows.resize( nRows );
while( nRows-- )
maRows[nRows].set( new TableRow( this, nRows, nColumns ) );
while( nColumns-- )
maColumns[nColumns].set( new TableColumn( this, nColumns ) );
}
}
// -----------------------------------------------------------------------------
// ICellRange
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getLeft()
{
return 0;
}
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getTop()
{
return 0;
}
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getRight()
{
return getColumnCount();
}
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getBottom()
{
return getRowCount();
}
// -----------------------------------------------------------------------------
Reference< XTable > TableModel::getTable()
{
return this;
}
// -----------------------------------------------------------------------------
void TableModel::UndoInsertRows( sal_Int32 nIndex, sal_Int32 nCount )
{
TableModelNotifyGuard aGuard( this );
// remove the rows
remove_range<RowVector,RowVector::iterator>( maRows, nIndex, nCount );
updateRows();
setModified(sal_True);
}
// -----------------------------------------------------------------------------
void TableModel::UndoRemoveRows( sal_Int32 nIndex, RowVector& aRows )
{
TableModelNotifyGuard aGuard( this );
const sal_Int32 nCount = sal::static_int_cast< sal_Int32 >( aRows.size() );
nIndex = insert_range<RowVector,RowVector::iterator,TableRowRef>( maRows, nIndex, nCount );
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
maRows[nIndex+nOffset] = aRows[nOffset];
updateRows();
setModified(sal_True);
}
// -----------------------------------------------------------------------------
void TableModel::UndoInsertColumns( sal_Int32 nIndex, sal_Int32 nCount )
{
TableModelNotifyGuard aGuard( this );
// now remove the columns
remove_range<ColumnVector,ColumnVector::iterator>( maColumns, nIndex, nCount );
sal_Int32 nRows = getRowCountImpl();
while( nRows-- )
maRows[nRows]->removeColumns( nIndex, nCount );
updateColumns();
setModified(sal_True);
}
// -----------------------------------------------------------------------------
void TableModel::UndoRemoveColumns( sal_Int32 nIndex, ColumnVector& aCols, CellVector& aCells )
{
TableModelNotifyGuard aGuard( this );
const sal_Int32 nCount = sal::static_int_cast< sal_Int32 >( aCols.size() );
// assert if there are not enough cells saved
DBG_ASSERT( (aCols.size() * maRows.size()) == aCells.size(), "sdr::table::TableModel::UndoRemoveColumns(), invalid undo data!" );
nIndex = insert_range<ColumnVector,ColumnVector::iterator,TableColumnRef>( maColumns, nIndex, nCount );
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
maColumns[nIndex+nOffset] = aCols[nOffset];
CellVector::iterator aIter( aCells.begin() );
sal_Int32 nRows = getRowCountImpl();
for( sal_Int32 nRow = 0; nRow < nRows; ++nRow )
{
CellVector::iterator aIter2 = aIter + nRow * nCount;
DBG_ASSERT(aIter2 < aCells.end(), "sdr::table::TableModel::UndoRemoveColumns(), invalid iterator!");
maRows[nRow]->insertColumns( nIndex, nCount, &aIter2 );
}
updateColumns();
setModified(sal_True);
}
// -----------------------------------------------------------------------------
// XTable
// -----------------------------------------------------------------------------
Reference< XCellCursor > SAL_CALL TableModel::createCursor() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
return createCursorByRange( Reference< XCellRange >( this ) );
}
// -----------------------------------------------------------------------------
Reference< XCellCursor > SAL_CALL TableModel::createCursorByRange( const Reference< XCellRange >& Range ) throw (IllegalArgumentException, RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
ICellRange* pRange = dynamic_cast< ICellRange* >( Range.get() );
if( (pRange == 0) || (pRange->getTable().get() != this) )
throw IllegalArgumentException();
TableModelRef xModel( this );
return new CellCursor( xModel, pRange->getLeft(), pRange->getTop(), pRange->getRight(), pRange->getBottom() );
}
// -----------------------------------------------------------------------------
sal_Int32 SAL_CALL TableModel::getRowCount() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
return getRowCountImpl();
}
// -----------------------------------------------------------------------------
sal_Int32 SAL_CALL TableModel::getColumnCount() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
return getColumnCountImpl();
}
// -----------------------------------------------------------------------------
// XComponent
// -----------------------------------------------------------------------------
void TableModel::dispose() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
TableModelBase::dispose();
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::addEventListener( const Reference< XEventListener >& xListener ) throw (RuntimeException)
{
TableModelBase::addEventListener( xListener );
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::removeEventListener( const Reference< XEventListener >& xListener ) throw (RuntimeException)
{
TableModelBase::removeEventListener( xListener );
}
// -----------------------------------------------------------------------------
// XModifiable
// -----------------------------------------------------------------------------
sal_Bool SAL_CALL TableModel::isModified( ) throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
return mbModified;
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::setModified( sal_Bool bModified ) throw (PropertyVetoException, RuntimeException)
{
{
OGuard aGuard( Application::GetSolarMutex() );
mbModified = bModified;
}
if( bModified )
notifyModification();
}
// -----------------------------------------------------------------------------
// XModifyBroadcaster
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::addModifyListener( const Reference< XModifyListener >& xListener ) throw (RuntimeException)
{
rBHelper.addListener( XModifyListener::static_type() , xListener );
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::removeModifyListener( const Reference< XModifyListener >& xListener ) throw (RuntimeException)
{
rBHelper.removeListener( XModifyListener::static_type() , xListener );
}
// -----------------------------------------------------------------------------
// XColumnRowRange
// -----------------------------------------------------------------------------
Reference< XTableColumns > SAL_CALL TableModel::getColumns() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
if( !mxTableColumns.is() )
mxTableColumns.set( new TableColumns( this ) );
return mxTableColumns.get();
}
// -----------------------------------------------------------------------------
Reference< XTableRows > SAL_CALL TableModel::getRows() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
if( !mxTableRows.is() )
mxTableRows.set( new TableRows( this ) );
return mxTableRows.get();
}
// -----------------------------------------------------------------------------
// XCellRange
// -----------------------------------------------------------------------------
Reference< XCell > SAL_CALL TableModel::getCellByPosition( sal_Int32 nColumn, sal_Int32 nRow ) throw ( IndexOutOfBoundsException, RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
CellRef xCell( getCell( nColumn, nRow ) );
if( xCell.is() )
return xCell.get();
throw IndexOutOfBoundsException();
}
// -----------------------------------------------------------------------------
Reference< XCellRange > SAL_CALL TableModel::getCellRangeByPosition( sal_Int32 nLeft, sal_Int32 nTop, sal_Int32 nRight, sal_Int32 nBottom ) throw (IndexOutOfBoundsException, RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
if( (nLeft >= 0) && (nTop >= 0) && (nRight >= nLeft) && (nBottom >= nTop) && (nRight < getColumnCountImpl()) && (nBottom < getRowCountImpl() ) )
{
TableModelRef xModel( this );
return new CellRange( xModel, nLeft, nTop, nRight, nBottom );
}
throw IndexOutOfBoundsException();
}
// -----------------------------------------------------------------------------
Reference< XCellRange > SAL_CALL TableModel::getCellRangeByName( const OUString& /*aRange*/ ) throw (RuntimeException)
{
return Reference< XCellRange >();
}
// -----------------------------------------------------------------------------
// XPropertySet
// -----------------------------------------------------------------------------
Reference< XPropertySetInfo > SAL_CALL TableModel::getPropertySetInfo( ) throw (RuntimeException)
{
Reference< XPropertySetInfo > xInfo;
return xInfo;
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::setPropertyValue( const ::rtl::OUString& /*aPropertyName*/, const Any& /*aValue*/ ) throw (UnknownPropertyException, PropertyVetoException, IllegalArgumentException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
Any SAL_CALL TableModel::getPropertyValue( const OUString& /*PropertyName*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
return Any();
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::addPropertyChangeListener( const OUString& /*aPropertyName*/, const Reference< XPropertyChangeListener >& /*xListener*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::removePropertyChangeListener( const OUString& /*aPropertyName*/, const Reference< XPropertyChangeListener >& /*xListener*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::addVetoableChangeListener( const OUString& /*aPropertyName*/, const Reference< XVetoableChangeListener >& /*xListener*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::removeVetoableChangeListener( const OUString& /*aPropertyName*/, const Reference< XVetoableChangeListener >& /*xListener*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
// XFastPropertySet
// -----------------------------------------------------------------------------
void SAL_CALL TableModel::setFastPropertyValue( ::sal_Int32 /*nHandle*/, const Any& /*aValue*/ ) throw (UnknownPropertyException, PropertyVetoException, IllegalArgumentException, WrappedTargetException, RuntimeException)
{
}
// -----------------------------------------------------------------------------
Any SAL_CALL TableModel::getFastPropertyValue( ::sal_Int32 /*nHandle*/ ) throw (UnknownPropertyException, WrappedTargetException, RuntimeException)
{
Any aAny;
return aAny;
}
// -----------------------------------------------------------------------------
// internals
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getRowCountImpl() const
{
return static_cast< sal_Int32 >( maRows.size() );
}
// -----------------------------------------------------------------------------
sal_Int32 TableModel::getColumnCountImpl() const
{
return static_cast< sal_Int32 >( maColumns.size() );
}
// -----------------------------------------------------------------------------
void TableModel::disposing()
{
if( !maRows.empty() )
{
RowVector::iterator aIter( maRows.begin() );
while( aIter != maRows.end() )
(*aIter++)->dispose();
RowVector().swap(maRows);
}
if( !maColumns.empty() )
{
ColumnVector::iterator aIter( maColumns.begin() );
while( aIter != maColumns.end() )
(*aIter++)->dispose();
ColumnVector().swap(maColumns);
}
if( mxTableColumns.is() )
{
mxTableColumns->dispose();
mxTableColumns.clear();
}
if( mxTableRows.is() )
{
mxTableRows->dispose();
mxTableRows.clear();
}
mpTableObj = 0;
}
// -----------------------------------------------------------------------------
// XBroadcaster
// -----------------------------------------------------------------------------
void TableModel::lockBroadcasts() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
++mnNotifyLock;
}
// -----------------------------------------------------------------------------
void TableModel::unlockBroadcasts() throw (RuntimeException)
{
OGuard aGuard( Application::GetSolarMutex() );
--mnNotifyLock;
if( mnNotifyLock <= 0 )
{
mnNotifyLock = 0;
if( mbNotifyPending )
notifyModification();
}
}
// -----------------------------------------------------------------------------
#ifdef PLEASE_DEBUG_THE_TABLES
#include <stdio.h>
#endif
void TableModel::notifyModification()
{
::osl::MutexGuard guard( m_aMutex );
if( (mnNotifyLock == 0) && mpTableObj && mpTableObj->GetModel() )
{
mbNotifyPending = false;
::cppu::OInterfaceContainerHelper * pModifyListeners = rBHelper.getContainer( XModifyListener::static_type() );
if( pModifyListeners )
{
EventObject aSource;
aSource.Source = static_cast< ::cppu::OWeakObject* >(this);
pModifyListeners->notifyEach( &XModifyListener::modified, aSource);
}
}
else
{
mbNotifyPending = true;
}
#ifdef PLEASE_DEBUG_THE_TABLES
FILE* file = fopen( "c:\\table.xml","w" );
const sal_Int32 nColCount = getColumnCountImpl();
const sal_Int32 nRowCount = getRowCountImpl();
fprintf( file, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\r" );
fprintf( file, "<table columns=\"%ld\" rows=\"%ld\" updated=\"%s\">\n\r", nColCount, nRowCount, mbNotifyPending ? "false" : "true");
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
fprintf( file, "<column this=\"%lx\"/>\n\r", maColumns[nCol].get() );
}
// first check merged cells before and inside the removed rows
for( sal_Int32 nRow = 0; nRow < nRowCount; ++nRow )
{
fprintf( file, "<row this=\"%lx\">\n\r", maRows[nRow].get() );
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
CellRef xCell( getCell( nCol, nRow ) );
fprintf( file, "<cell this=\"%lx\"", xCell.get() );
sal_Int32 nRowSpan = xCell->getRowSpan();
sal_Int32 nColSpan = xCell->getColumnSpan();
sal_Bool bMerged = xCell->isMerged();
if( nColSpan != 1 )
fprintf( file, " column-span=\"%ld\"", nColSpan );
if( nRowSpan != 1 )
fprintf( file, " row-span=\"%ld\"", nRowSpan );
if( bMerged )
fprintf( file, " merged=\"true\"" );
fprintf( file, "/>" );
}
fprintf( file, "\n\r</row>\n\r" );
}
fprintf( file, "</table>\n\r" );
fclose( file );
#endif
}
// -----------------------------------------------------------------------------
CellRef TableModel::getCell( sal_Int32 nCol, sal_Int32 nRow ) const
{
if( ((nRow >= 0) && (nRow < getRowCountImpl())) && (nCol >= 0) && (nCol < getColumnCountImpl()) )
{
return maRows[nRow]->maCells[nCol];
}
else
{
CellRef xRet;
return xRet;
}
}
// -----------------------------------------------------------------------------
/*
bool TableModel::getCellPos( const CellRef& xCell, ::sal_Int32& rnCol, ::sal_Int32& rnRow ) const
{
const sal_Int32 nRowCount = getRowCount();
const sal_Int32 nColCount = getColumnCount();
for( rnRow = 0; rnRow < nRowCount; rnRow++ )
{
for( rnCol = 0; rnCol < nColCount; rnCol++ )
{
if( maRows[rnRow]->maCells[rnCol] == xCell )
{
return true;
}
}
}
return false;
}
*/
// -----------------------------------------------------------------------------
CellRef TableModel::createCell()
{
CellRef xCell;
if( mpTableObj )
mpTableObj->createCell( xCell );
return xCell;
}
// -----------------------------------------------------------------------------
void TableModel::insertColumns( sal_Int32 nIndex, sal_Int32 nCount )
{
if( nCount && mpTableObj )
{
try
{
SdrModel* pModel = mpTableObj->GetModel();
TableModelNotifyGuard aGuard( this );
nIndex = insert_range<ColumnVector,ColumnVector::iterator,TableColumnRef>( maColumns, nIndex, nCount );
sal_Int32 nRows = getRowCountImpl();
while( nRows-- )
maRows[nRows]->insertColumns( nIndex, nCount );
ColumnVector aNewColumns(nCount);
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
{
TableColumnRef xNewCol( new TableColumn( this, nIndex+nOffset ) );
maColumns[nIndex+nOffset] = xNewCol;
aNewColumns[nOffset] = xNewCol;
}
const bool bUndo = pModel && mpTableObj->IsInserted() && pModel->IsUndoEnabled();
if( bUndo )
{
pModel->BegUndo( ImpGetResStr(STR_TABLE_INSCOL) );
pModel->AddUndo( pModel->GetSdrUndoFactory().CreateUndoGeoObject(*mpTableObj) );
TableModelRef xThis( this );
nRows = getRowCountImpl();
CellVector aNewCells( nCount * nRows );
CellVector::iterator aCellIter( aNewCells.begin() );
nRows = getRowCountImpl();
for( sal_Int32 nRow = 0; nRow < nRows; ++nRow )
{
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
(*aCellIter++) = getCell( nIndex + nOffset, nRow );
}
pModel->AddUndo( new InsertColUndo( xThis, nIndex, aNewColumns, aNewCells ) );
}
const sal_Int32 nRowCount = getRowCountImpl();
// check if cells merge over new columns
for( sal_Int32 nCol = 0; nCol < nIndex; ++nCol )
{
for( sal_Int32 nRow = 0; nRow < nRowCount; ++nRow )
{
CellRef xCell( getCell( nCol, nRow ) );
sal_Int32 nColSpan = (xCell.is() && !xCell->isMerged()) ? xCell->getColumnSpan() : 1;
if( (nColSpan != 1) && ((nColSpan + nCol ) > nIndex) )
{
// cell merges over newly created columns, so add the new columns to the merged cell
const sal_Int32 nRowSpan = xCell->getRowSpan();
nColSpan += nCount;
merge( nCol, nRow, nColSpan, nRowSpan );
}
}
}
if( bUndo )
pModel->EndUndo();
if( pModel )
pModel->SetChanged();
}
catch( Exception& )
{
DBG_ERROR("sdr::table::TableModel::insertColumns(), exception caught!");
}
updateColumns();
setModified(sal_True);
}
}
// -----------------------------------------------------------------------------
void TableModel::removeColumns( sal_Int32 nIndex, sal_Int32 nCount )
{
sal_Int32 nColCount = getColumnCountImpl();
if( mpTableObj && nCount && (nIndex >= 0) && (nIndex < nColCount) )
{
try
{
TableModelNotifyGuard aGuard( this );
// clip removed columns to columns actually avalaible
if( (nIndex + nCount) > nColCount )
nCount = nColCount - nIndex;
sal_Int32 nRows = getRowCountImpl();
SdrModel* pModel = mpTableObj->GetModel();
const bool bUndo = pModel && mpTableObj->IsInserted() && pModel->IsUndoEnabled();
if( bUndo )
{
pModel->BegUndo( ImpGetResStr(STR_UNDO_COL_DELETE) );
pModel->AddUndo( pModel->GetSdrUndoFactory().CreateUndoGeoObject(*mpTableObj) );
TableModelRef xThis( this );
ColumnVector aRemovedCols( nCount );
sal_Int32 nOffset;
for( nOffset = 0; nOffset < nCount; ++nOffset )
{
aRemovedCols[nOffset] = maColumns[nIndex+nOffset];
}
CellVector aRemovedCells( nCount * nRows );
CellVector::iterator aCellIter( aRemovedCells.begin() );
for( sal_Int32 nRow = 0; nRow < nRows; ++nRow )
{
for( nOffset = 0; nOffset < nCount; ++nOffset )
(*aCellIter++) = getCell( nIndex + nOffset, nRow );
}
pModel->AddUndo( new RemoveColUndo( xThis, nIndex, aRemovedCols, aRemovedCells ) );
}
// only rows before and inside the removed rows are considered
nColCount = nIndex + nCount + 1;
const sal_Int32 nRowCount = getRowCountImpl();
// first check merged cells before and inside the removed rows
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
for( sal_Int32 nRow = 0; nRow < nRowCount; ++nRow )
{
CellRef xCell( getCell( nCol, nRow ) );
sal_Int32 nColSpan = (xCell.is() && !xCell->isMerged()) ? xCell->getColumnSpan() : 1;
if( nColSpan <= 1 )
continue;
if( nCol >= nIndex )
{
// current cell is inside the removed columns
if( (nCol + nColSpan) > ( nIndex + nCount ) )
{
// current cells merges with columns after the removed columns
const sal_Int32 nRemove = nCount - nCol + nIndex;
CellRef xTargetCell( getCell( nIndex + nCount, nRow ) );
if( xTargetCell.is() )
{
if( bUndo )
xTargetCell->AddUndo();
xTargetCell->merge( nColSpan - nRemove, xCell->getRowSpan() );
xTargetCell->replaceContentAndFormating( xCell );
}
}
}
else if( nColSpan > (nIndex - nCol) )
{
// current cells spans inside the removed columns, so adjust
const sal_Int32 nRemove = ::std::min( nCount, nCol + nColSpan - nIndex );
if( bUndo )
xCell->AddUndo();
xCell->merge( nColSpan - nRemove, xCell->getRowSpan() );
}
}
}
// now remove the columns
remove_range<ColumnVector,ColumnVector::iterator>( maColumns, nIndex, nCount );
while( nRows-- )
maRows[nRows]->removeColumns( nIndex, nCount );
if( bUndo )
pModel->EndUndo();
if( pModel )
pModel->SetChanged();
}
catch( Exception& )
{
DBG_ERROR("sdr::table::TableModel::removeColumns(), exception caught!");
}
updateColumns();
setModified(sal_True);
}
}
// -----------------------------------------------------------------------------
void TableModel::insertRows( sal_Int32 nIndex, sal_Int32 nCount )
{
if( nCount && mpTableObj )
{
SdrModel* pModel = mpTableObj->GetModel();
const bool bUndo = pModel && mpTableObj->IsInserted() && pModel->IsUndoEnabled();
try
{
TableModelNotifyGuard aGuard( this );
nIndex = insert_range<RowVector,RowVector::iterator,TableRowRef>( maRows, nIndex, nCount );
RowVector aNewRows(nCount);
const sal_Int32 nColCount = getColumnCountImpl();
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
{
TableRowRef xNewRow( new TableRow( this, nIndex+nOffset, nColCount ) );
maRows[nIndex+nOffset] = xNewRow;
aNewRows[nOffset] = xNewRow;
}
if( bUndo )
{
pModel->BegUndo( ImpGetResStr(STR_TABLE_INSROW) );
pModel->AddUndo( pModel->GetSdrUndoFactory().CreateUndoGeoObject(*mpTableObj) );
TableModelRef xThis( this );
pModel->AddUndo( new InsertRowUndo( xThis, nIndex, aNewRows ) );
}
// check if cells merge over new columns
for( sal_Int32 nRow = 0; nRow < nIndex; ++nRow )
{
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
CellRef xCell( getCell( nCol, nRow ) );
sal_Int32 nRowSpan = (xCell.is() && !xCell->isMerged()) ? xCell->getRowSpan() : 1;
if( (nRowSpan > 1) && ((nRowSpan + nRow) > nIndex) )
{
// cell merges over newly created columns, so add the new columns to the merged cell
const sal_Int32 nColSpan = xCell->getColumnSpan();
nRowSpan += nCount;
merge( nCol, nRow, nColSpan, nRowSpan );
}
}
}
}
catch( Exception& )
{
DBG_ERROR("sdr::table::TableModel::insertRows(), exception caught!");
}
if( bUndo )
pModel->EndUndo();
if( pModel )
pModel->SetChanged();
updateRows();
setModified(sal_True);
}
}
// -----------------------------------------------------------------------------
void TableModel::removeRows( sal_Int32 nIndex, sal_Int32 nCount )
{
sal_Int32 nRowCount = getRowCountImpl();
if( mpTableObj && nCount && (nIndex >= 0) && (nIndex < nRowCount) )
{
SdrModel* pModel = mpTableObj->GetModel();
const bool bUndo = pModel && mpTableObj->IsInserted()&& pModel->IsUndoEnabled();
try
{
TableModelNotifyGuard aGuard( this );
// clip removed rows to rows actually avalaible
if( (nIndex + nCount) > nRowCount )
nCount = nRowCount - nIndex;
if( bUndo )
{
pModel->BegUndo( ImpGetResStr(STR_UNDO_ROW_DELETE) );
pModel->AddUndo( pModel->GetSdrUndoFactory().CreateUndoGeoObject(*mpTableObj) );
TableModelRef xThis( this );
RowVector aRemovedRows( nCount );
for( sal_Int32 nOffset = 0; nOffset < nCount; ++nOffset )
aRemovedRows[nOffset] = maRows[nIndex+nOffset];
pModel->AddUndo( new RemoveRowUndo( xThis, nIndex, aRemovedRows ) );
}
// only rows before and inside the removed rows are considered
nRowCount = nIndex + nCount + 1;
const sal_Int32 nColCount = getColumnCountImpl();
// first check merged cells before and inside the removed rows
for( sal_Int32 nRow = 0; nRow < nRowCount; ++nRow )
{
for( sal_Int32 nCol = 0; nCol < nColCount; ++nCol )
{
CellRef xCell( getCell( nCol, nRow ) );
sal_Int32 nRowSpan = (xCell.is() && !xCell->isMerged()) ? xCell->getRowSpan() : 1;
if( nRowSpan <= 1 )
continue;
if( nRow >= nIndex )
{
// current cell is inside the removed rows
if( (nRow + nRowSpan) > (nIndex + nCount) )
{
// current cells merges with rows after the removed rows
const sal_Int32 nRemove = nCount - nRow + nIndex;
CellRef xTargetCell( getCell( nCol, nIndex + nCount ) );
if( xTargetCell.is() )
{
if( bUndo )
xTargetCell->AddUndo();
xTargetCell->merge( xCell->getColumnSpan(), nRowSpan - nRemove );
xTargetCell->replaceContentAndFormating( xCell );
}
}
}
else if( nRowSpan > (nIndex - nRow) )
{
// current cells spans inside the removed rows, so adjust
const sal_Int32 nRemove = ::std::min( nCount, nRow + nRowSpan - nIndex );
if( bUndo )
xCell->AddUndo();
xCell->merge( xCell->getColumnSpan(), nRowSpan - nRemove );
}
}
}
// now remove the rows
remove_range<RowVector,RowVector::iterator>( maRows, nIndex, nCount );
if( bUndo )
pModel->EndUndo();
if( pModel )
pModel->SetChanged();
}
catch( Exception& )
{
DBG_ERROR("sdr::table::TableModel::removeRows(), exception caught!");
}
updateRows();
setModified(sal_True);
}
}
// -----------------------------------------------------------------------------
TableRowRef TableModel::getRow( sal_Int32 nRow ) const throw (IndexOutOfBoundsException)
{
if( (nRow >= 0) && (nRow < getRowCountImpl()) )
return maRows[nRow];
throw IndexOutOfBoundsException();
}
// -----------------------------------------------------------------------------
TableColumnRef TableModel::getColumn( sal_Int32 nColumn ) const throw (IndexOutOfBoundsException)
{
if( (nColumn >= 0) && (nColumn < getColumnCountImpl()) )
return maColumns[nColumn];
throw IndexOutOfBoundsException();
}
// -----------------------------------------------------------------------------
/** deletes rows and columns that are completly merged. Must be called between BegUndo/EndUndo! */
void TableModel::optimize()
{
TableModelNotifyGuard aGuard( this );
bool bWasModified = false;
if( !maRows.empty() && !maColumns.empty() )
{
sal_Int32 nCol = getColumnCountImpl() - 1;
while( nCol > 0 )
{
bool bEmpty = true;
for( sal_Int32 nRow = 0; (nRow < getRowCountImpl()) && bEmpty; nRow++ )
{
Reference< XMergeableCell > xCell( getCellByPosition( nCol, nRow ), UNO_QUERY );
if( xCell.is() && !xCell->isMerged() )
bEmpty = false;
}
if( bEmpty )
{
if( nCol > 0 ) try
{
const OUString sWidth( RTL_CONSTASCII_USTRINGPARAM("Width") );
sal_Int32 nWidth1 = 0, nWidth2 = 0;
Reference< XPropertySet > xSet1( static_cast< XCellRange* >( maColumns[nCol].get() ), UNO_QUERY_THROW );
Reference< XPropertySet > xSet2( static_cast< XCellRange* >( maColumns[nCol-1].get() ), UNO_QUERY_THROW );
xSet1->getPropertyValue( sWidth ) >>= nWidth1;
xSet2->getPropertyValue( sWidth ) >>= nWidth2;
nWidth1 += nWidth2;
xSet2->setPropertyValue( sWidth, Any( nWidth1 ) );
}
catch( Exception& e )
{
(void)e;
DBG_ERROR("svx::TableModel::optimize(), exception caught!");
}
removeColumns( nCol, 1 );
bWasModified = true;
}
nCol--;
}
sal_Int32 nRow = getRowCountImpl() - 1;
while( nRow > 0 )
{
bool bEmpty = true;
for( nCol = 0; (nCol < getColumnCountImpl()) && bEmpty; nCol++ )
{
Reference< XMergeableCell > xCell( getCellByPosition( nCol, nRow ), UNO_QUERY );
if( xCell.is() && !xCell->isMerged() )
bEmpty = false;
}
if( bEmpty )
{
if( nRow > 0 ) try
{
const OUString sHeight( RTL_CONSTASCII_USTRINGPARAM("Height") );
sal_Int32 nHeight1 = 0, nHeight2 = 0;
Reference< XPropertySet > xSet1( static_cast< XCellRange* >( maRows[nRow].get() ), UNO_QUERY_THROW );
Reference< XPropertySet > xSet2( static_cast< XCellRange* >( maRows[nRow-1].get() ), UNO_QUERY_THROW );
xSet1->getPropertyValue( sHeight ) >>= nHeight1;
xSet2->getPropertyValue( sHeight ) >>= nHeight2;
nHeight1 += nHeight2;
xSet2->setPropertyValue( sHeight, Any( nHeight1 ) );
}
catch( Exception& e )
{
(void)e;
DBG_ERROR("svx::TableModel::optimize(), exception caught!");
}
removeRows( nRow, 1 );
bWasModified = true;
}
nRow--;
}
}
if( bWasModified )
setModified(sal_True);
}
// -----------------------------------------------------------------------------
void TableModel::merge( sal_Int32 nCol, sal_Int32 nRow, sal_Int32 nColSpan, sal_Int32 nRowSpan )
{
SdrModel* pModel = mpTableObj->GetModel();
const bool bUndo = pModel && mpTableObj->IsInserted() && pModel->IsUndoEnabled();
const sal_Int32 nLastRow = nRow + nRowSpan;
const sal_Int32 nLastCol = nCol + nColSpan;
if( (nLastRow > getRowCount()) || (nLastCol > getRowCount() ) )
{
DBG_ERROR("TableModel::merge(), merge beyound the table!");
}
// merge first cell
CellRef xOriginCell( dynamic_cast< Cell* >( getCellByPosition( nCol, nRow ).get() ) );
if( xOriginCell.is() )
{
if( bUndo )
xOriginCell->AddUndo();
xOriginCell->merge( nColSpan, nRowSpan );
}
sal_Int32 nTempCol = nCol + 1;
// merge remaining cells
for( ; nRow < nLastRow; nRow++ )
{
for( ; nTempCol < nLastCol; nTempCol++ )
{
CellRef xCell( dynamic_cast< Cell* >( getCellByPosition( nTempCol, nRow ).get() ) );
if( xCell.is() && !xCell->isMerged() )
{
if( bUndo )
xCell->AddUndo();
xCell->setMerged();
xOriginCell->mergeContent( xCell );
}
}
nTempCol = nCol;
}
}
// -----------------------------------------------------------------------------
void TableModel::updateRows()
{
sal_Int32 nRow = 0;
RowVector::iterator iter = maRows.begin();
while( iter != maRows.end() )
{
(*iter++)->mnRow = nRow++;
}
}
// -----------------------------------------------------------------------------
void TableModel::updateColumns()
{
sal_Int32 nColumn = 0;
ColumnVector::iterator iter = maColumns.begin();
while( iter != maColumns.end() )
{
(*iter++)->mnColumn = nColumn++;
}
}
// -----------------------------------------------------------------------------
} }
| 12,951 |
844 | {
"github-username": "somadattareddy",
"favourite-emoji": "😃",
"favourite-music": "https://soundcloud.com/user-218216676/dont-look-back-feat-kotomi-ryan-elderslowed-down",
"favourite-color": "#808080"
} | 100 |
344 | // Copyright 2012 Google Inc. All Rights Reserved.
// Author: <EMAIL> (<NAME>)
// Class to build AutoFDO profile.
#ifndef AUTOFDO_PROFILE_WRITER_H_
#define AUTOFDO_PROFILE_WRITER_H_
#include <cstdint>
#include "symbol_map.h"
namespace devtools_crosstool_autofdo {
class SymbolMap;
class ProfileWriter {
public:
explicit ProfileWriter(const SymbolMap *symbol_map)
: symbol_map_(symbol_map) {}
explicit ProfileWriter() : symbol_map_(nullptr) {}
virtual ~ProfileWriter() {}
virtual bool WriteToFile(const std::string &output_file) = 0;
void setSymbolMap(const SymbolMap *symbol_map) { symbol_map_ = symbol_map; }
void Dump();
protected:
const SymbolMap *symbol_map_;
};
class AutoFDOProfileWriter : public ProfileWriter {
public:
explicit AutoFDOProfileWriter(const SymbolMap *symbol_map,
uint32_t gcov_version)
: ProfileWriter(symbol_map), gcov_version_(gcov_version) {}
explicit AutoFDOProfileWriter(uint32_t gcov_version)
: gcov_version_(gcov_version) {}
bool WriteToFile(const std::string &output_file) override;
private:
// Opens the output file, and writes the header.
bool WriteHeader(const std::string &output_file);
// Finishes writing, closes the output file.
bool WriteFinish();
// Writes the function profile to the gcda file. The profile has two parts:
// GCOV_TAG_AFDO_FILE_NAMES:
// String table that stores all the file names.
// GCOV_TAG_AFDO_FUNCTION:
// Function table that stores all the function info:
// TAG
// Length of the section
// Number of functions
// Function_1: function name
// Function_1: file name
// ...
// Function_n: ...
//
// The new function profile format:
// GCOV_TAG_AFDO_FILE_NAMES:
// String table that stores all the file names.
// GCOV_TAG_AFDO_FUNCTION:
// Function table that stores all the function info:
// TAG
// Length of the section
// Number of functions
// symbol profile 1
// symbol profile 2
// ...
// symbol profile num_functions
//
// Symbol profile is an iterative structure:
//
// symbol profile: func_name, file_name, start_line,
// num_pos_counts, num_callsites
// offset_1: num_targets, count
// target_1: count
// offset_2: num_targets, count
// ...
// offset_num_pos_counts:
// callsite_offset_1: symbol profile
// callsite_offset_2: symbol profile
// ...
// callsite_offset_num_callsites: symbol profile
void WriteFunctionProfile();
// Writes the module grouping info into the gcda file.
// TODO(b/132437226): LIPO has been deprecated so no module grouping info
// is needed in the gcda file. However, even if no LIPO is used, gcc used
// by chromeOS kernel will still check the module grouping fields whenever
// it reads a gcda file. To be compatible, we keep the minimum fields which
// are necessary for gcc to be able to read a gcda file and remove the
// rest of LIPO stuff.
// We can remove the leftover if chromeOS kernel starts using llvm or can
// change their gcc in sync with autofdo tool.
//
// The minimum fields to keep:
// TAG
// Length of the section (will always be 0)
// Number of modules (will always be 0)
void WriteModuleGroup();
// Writes working set to gcov file.
void WriteWorkingSet();
uint32_t gcov_version_;
};
class SymbolTraverser {
public:
virtual ~SymbolTraverser() {}
protected:
SymbolTraverser() : level_(0) {}
virtual void Start(const SymbolMap &symbol_map) {
for (const auto &name_symbol : symbol_map.map()) {
if (!symbol_map.ShouldEmit(name_symbol.second->total_count)) {
continue;
}
VisitTopSymbol(name_symbol.first, name_symbol.second);
Traverse(name_symbol.second);
}
}
virtual void VisitTopSymbol(const std::string &name, const Symbol *node) {}
virtual void Visit(const Symbol *node) = 0;
virtual void VisitCallsite(const Callsite &offset) {}
int level_;
private:
void Traverse(const Symbol *node) {
level_++;
Visit(node);
for (const auto &callsite_symbol : node->callsites) {
VisitCallsite(callsite_symbol.first);
Traverse(callsite_symbol.second);
}
level_--;
}
DISALLOW_COPY_AND_ASSIGN(SymbolTraverser);
};
typedef std::map<std::string, int> StringIndexMap;
class StringTableUpdater: public SymbolTraverser {
public:
static void Update(const SymbolMap &symbol_map, StringIndexMap *map) {
StringTableUpdater updater(map);
updater.Start(symbol_map);
}
protected:
void Visit(const Symbol *node) override {
for (const auto &pos_count : node->pos_counts) {
for (const auto &name_count : pos_count.second.target_map) {
(*map_)[name_count.first] = 0;
}
}
}
void VisitCallsite(const Callsite &callsite) {
(*map_)[Symbol::Name(callsite.second)] = 0;
}
void VisitTopSymbol(const std::string &name, const Symbol *node) override {
(*map_)[Symbol::Name(name.c_str())] = 0;
}
private:
explicit StringTableUpdater(StringIndexMap *map) : map_(map) {}
StringIndexMap *map_;
DISALLOW_COPY_AND_ASSIGN(StringTableUpdater);
};
} // namespace devtools_crosstool_autofdo
#endif // AUTOFDO_PROFILE_WRITER_H_
| 1,910 |
1,442 | <reponame>VersiraSec/epsilon-cfw
#ifndef PROBABILITY_CALCULATION_CONTROLLER_H
#define PROBABILITY_CALCULATION_CONTROLLER_H
#include "distribution/distribution.h"
#include "distribution_curve_view.h"
#include "calculation_cell.h"
#include "responder_image_cell.h"
#include "calculation/calculation.h"
#include "../shared/parameter_text_field_delegate.h"
namespace Probability {
class CalculationController : public Escher::ViewController, public Escher::TableViewDataSource, public Escher::SelectableTableViewDataSource, public Shared::ParameterTextFieldDelegate {
public:
CalculationController(Escher::Responder * parentResponder, Escher::InputEventHandlerDelegate * inputEventHandlerDelegate, Distribution * distribution, Calculation * calculation);
/* Responder */
void didEnterResponderChain(Escher::Responder * previousResponder) override;
void didBecomeFirstResponder() override;
/* ViewController */
Escher::View * view() override;
const char * title() override;
void viewWillAppear() override;
void viewDidDisappear() override;
TELEMETRY_ID("Calculation");
/* TableViewDataSource */
int numberOfRows() const override;
int numberOfColumns() const override;
KDCoordinate columnWidth(int i) override;
KDCoordinate rowHeight(int j) override;
KDCoordinate cumulatedHeightFromIndex(int j) override;
int indexFromCumulatedHeight(KDCoordinate offsetY) override;
Escher::HighlightCell * reusableCell(int index, int type) override;
int reusableCellCount(int type) override;
int typeAtLocation(int i, int j) override;
void willDisplayCellAtLocation(Escher::HighlightCell * cell, int i, int j) override;
/* TextField delegate */
bool textFieldDidHandleEvent(Escher::TextField * textField, bool returnValue, bool textSizeDidChange) override;
bool textFieldShouldFinishEditing(Escher::TextField * textField, Ion::Events::Event event) override;
bool textFieldDidFinishEditing(Escher::TextField * textField, const char * text, Ion::Events::Event event) override;
void reloadDistributionCurveView();
void reload();
void setCalculationAccordingToIndex(int index, bool forceReinitialisation = false);
private:
constexpr static int k_numberOfCalculationCells = 3;
constexpr static KDCoordinate k_tableMargin = 3;
void updateTitle();
class ContentView : public Escher::View {
public:
ContentView(Escher::SelectableTableView * selectableTableView, Distribution * distribution, Calculation * calculation);
DistributionCurveView * distributionCurveView() {
return &m_distributionCurveView;
}
private:
int numberOfSubviews() const override;
Escher::View * subviewAtIndex(int index) override;
void layoutSubviews(bool force = false) override;
Escher::SelectableTableView * m_selectableTableView;
DistributionCurveView m_distributionCurveView;
};
ContentView m_contentView;
Escher::SelectableTableView m_selectableTableView;
ResponderImageCell m_imageCell;
CalculationCell m_calculationCells[k_numberOfCalculationCells];
Calculation * m_calculation;
Distribution * m_distribution;
constexpr static int k_titleBufferSize = 30;
char m_titleBuffer[k_titleBufferSize];
};
}
#endif
| 955 |
11,010 | package com.google.inject.servlet;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.Singleton;
import java.io.IOException;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import junit.framework.TestCase;
/**
* This tests that filter stage of the pipeline dispatches correctly to guice-managed filters with
* multiple modules.
*
* <p>WARNING(dhanji): Non-parallelizable test =(
*
* @author <EMAIL> (<NAME>)
*/
public class MultiModuleDispatchIntegrationTest extends TestCase {
private static int inits, doFilters, destroys;
@Override
public final void setUp() {
inits = 0;
doFilters = 0;
destroys = 0;
GuiceFilter.reset();
}
public final void testDispatchRequestToManagedPipeline() throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
filter("/*").through(TestFilter.class);
// These filters should never fire
filter("*.jsp").through(Key.get(TestFilter.class));
}
},
new ServletModule() {
@Override
protected void configureServlets() {
filter("*.html").through(TestFilter.class);
filter("/*").through(Key.get(TestFilter.class));
// These filters should never fire
filter("/index/*").through(Key.get(TestFilter.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
//create ourselves a mock request with test URI
HttpServletRequest requestMock = createMock(HttpServletRequest.class);
expect(requestMock.getRequestURI()).andReturn("/index.html").anyTimes();
expect(requestMock.getContextPath()).andReturn("").anyTimes();
//dispatch request
replay(requestMock);
pipeline.dispatch(requestMock, null, createMock(FilterChain.class));
pipeline.destroyPipeline();
verify(requestMock);
assertTrue(
"lifecycle states did not"
+ " fire correct number of times-- inits: "
+ inits
+ "; dos: "
+ doFilters
+ "; destroys: "
+ destroys,
inits == 1 && doFilters == 3 && destroys == 1);
}
@Singleton
public static class TestFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
inits++;
}
@Override
public void doFilter(
ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
doFilters++;
filterChain.doFilter(servletRequest, servletResponse);
}
@Override
public void destroy() {
destroys++;
}
}
}
| 1,310 |
407 | <filename>saas/dataops/api/metric-flink/metric-alarm/src/main/java/com/elasticsearch/cloud/monitor/metric/alarm/blink/utils/cache/RuleConditionTsdbCache.java
package com.elasticsearch.cloud.monitor.metric.alarm.blink.utils.cache;
import com.elasticsearch.cloud.monitor.commons.core.PlottQueryClient;
import com.elasticsearch.cloud.monitor.commons.datapoint.DataPoint;
import com.elasticsearch.cloud.monitor.commons.rule.Rule;
import com.elasticsearch.cloud.monitor.commons.rule.expression.SelectedMetric;
import com.elasticsearch.cloud.monitor.commons.rule.filter.TagVFilter;
import com.elasticsearch.cloud.monitor.commons.utils.TimeUtils;
import com.elasticsearch.cloud.monitor.metric.alarm.blink.constant.MetricConstants;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.util.*;
/**
* 规则缓存(Tsdb源)
*
* @author: fangzong.ly
* @date: 2021/08/31 19:58
*/
public class RuleConditionTsdbCache extends RuleConditionCache{
private String tsdbAddr;
public RuleConditionTsdbCache(Rule rule, long interval, String tsdbAddr) {
super(rule, interval);
this.tsdbAddr = tsdbAddr;
}
@SuppressWarnings("Duplicates")
@Override
public void recovery(DataPoint dataPoint) throws IOException {
//already cache, just return
if (stateMap.get(dataPoint.getTags()) != null) {
return;
}
long currentEventTimeMs = TimeUtils.toMillisecond(dataPoint.getTimestamp());
Preconditions.checkArgument(currentEventTimeMs % interval == 0, "currentEventTimeMs(" + currentEventTimeMs + ") must be a multiple of " + interval);
long queryStartMs = currentEventTimeMs - rule.getDurationCondition().getCrossSpan() + interval;
if (queryStartMs < currentEventTimeMs) {
try {
queryAndCache(queryStartMs, currentEventTimeMs, rule, dataPoint);
} catch (Exception e) {
throw e;
}
}
}
@SuppressWarnings("Duplicates")
private void queryAndCache(final long queryStartMs, final long queryEndMs, final Rule rule, DataPoint dataPoint) throws IOException {
long start = System.currentTimeMillis();
if (queryClient == null) {
queryClient = new PlottQueryClient(tsdbAddr, "application/x-www-form-urlencoded");
}
Map<String, TagVFilter> filters = new HashMap<>();
List<TagVFilter> tagsFilterList = new LinkedList<>();
TagVFilter.tagsToFilters(dataPoint.getTags(), tagsFilterList);
for (TagVFilter tagVFilter : tagsFilterList) {
filters.put(tagVFilter.getTagk(), tagVFilter);
}
if (!rule.isCompose()) {
List<DataPoint> dps = queryClient.query(queryStartMs, queryEndMs, dataPoint.getName(), filters, interval,
rule.getDsAggregator(), rule.getAggregator(), rule.isRate());
for (DataPoint d : dps) {
put(d);
}
} else {
Map<String, List<DataPoint>> dmap = new HashMap<>();
for (SelectedMetric selectedMetric : rule.getMetricCompose().getMetrics()) {
List<DataPoint> dps;
if (rule.getMetricCompose().isCrossJoin()) {
dps = queryClient.query(queryStartMs, queryEndMs, dataPoint.getName(), selectedMetric.getFilterMap(), interval,
selectedMetric.getDsAggregator(), selectedMetric.getAggregator(), selectedMetric.isRate());
} else {
dps = queryClient.query(queryStartMs, queryEndMs, dataPoint.getName(), filters, interval,
selectedMetric.getDsAggregator(), selectedMetric.getAggregator(), rule.isRate());
}
dmap.put(selectedMetric.getId(), dps);
}
mergePut(dmap);
}
if (monitor != null) {
monitor.reportLatency(MetricConstants.ALARM_CACHE_QUERY_LATENCY, start, globalTags);
monitor.increment(MetricConstants.ALARM_CACHE_QUERY_QPS, 1, globalTags);
}
}
private void mergePut(Map<String, List<DataPoint>> dmap) {
int size = rule.getMetricCompose().getMetrics().size();
Map<Long, List<DataPoint>> mergeMap = new HashMap<>();
for (List<DataPoint> dps : dmap.values()) {
for (DataPoint dataPoint : dps) {
List<DataPoint> list = mergeMap.get(dataPoint.getTimestamp());
if (list == null) {
list = new ArrayList<>(size);
}
list.add(dataPoint);
mergeMap.put(dataPoint.getTimestamp(), list);
}
}
for (List<DataPoint> dps : mergeMap.values()) {
DataPoint compose = rule.evaluate(dps);
put(compose);
}
}
}
| 2,112 |
1,178 | <gh_stars>1000+
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network client for reading (and eventually writing) parameters."""
from makani.avionics.common import aio
from makani.avionics.common import pack_avionics_messages
from makani.avionics.firmware.params import codec
SECTION_CONFIG = pack_avionics_messages.kParamSectionConfig
SECTION_CALIB = pack_avionics_messages.kParamSectionCalib
SECTION_SERIAL = pack_avionics_messages.kParamSectionSerial
SECTION_CARRIER_SERIAL = pack_avionics_messages.kParamSectionCarrierSerial
class Client(object):
"""Network client for reading (and eventually writing) parameters."""
def __init__(self, timeout=None):
self.aio_client = aio.AioClient(['kMessageTypeParamRequest',
'kMessageTypeParamResponse'],
timeout=timeout)
def _SendBlockRequest(self, node_id, section, offset):
"""Fill out and send a ParamRequestMessage."""
request = pack_avionics_messages.ParamRequestMessage()
request.node_id = node_id
request.section = section
request.offset = offset
self.aio_client.Send(request, 'kMessageTypeParamRequest',
'kAioNodeOperator')
def _GetBlock(self, node_id, section, data, offset):
"""Query a node for a block of parameters from the specified section."""
self._SendBlockRequest(node_id, section, offset)
while True:
(_, _, msg) = self.aio_client.Recv()
if isinstance(msg, pack_avionics_messages.ParamResponseMessage):
break
# TODO: Verify section, offset, and length.
if msg.length > 0:
data[offset:offset + msg.length] = msg.data[0:msg.length]
return msg.length
def GetSection(self, node_id, section):
"""Obtain parameters from the specified section in the node node_id.
Args:
node_id: AIO node number integer.
section: Parameter section identifier, e.g. SECTION_CALIB.
Returns:
A parameter object for the particular node.
Raises:
socket.timeout if a timeout was specified in the constructor and the
timeout was exceeded while querying parameters.
"""
offset = 0
data = bytearray(64 * 1024) # TODO: Define a max param size.
while offset < len(data):
length = self._GetBlock(node_id, section, data, offset)
offset += length
if length < 1024:
break
return codec.DecodeBin(data)
| 1,026 |
1,577 | <reponame>hoaihuongbk/jvm-profiler<filename>src/main/java/com/uber/profiling/reporters/GraphiteOutputReporter.java
package com.uber.profiling.reporters;
import com.uber.profiling.Reporter;
import com.uber.profiling.util.AgentLogger;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.Socket;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Metrics reporter class for Graphite.
*
* Check the "host" and "port" properties for Graphite and update accordingly.
*
* You can also pass Graphite connection properties from yaml file and those properties will be used
* by this reporter.
*
* To uses GraphiteOutputReporter with properties pass it in command.
*
* reporter=com.uber.profiling.reporters.GraphiteOutputReporter
*
* To use properties from yaml file use below command.
*
* reporter=com.uber.profiling.reporters.GraphiteOutputReporter,configProvider=com.uber.profiling
* .YamlConfigProvider,configFile=/opt/graphite.yaml
*/
public class GraphiteOutputReporter implements Reporter {
private static final AgentLogger logger = AgentLogger
.getLogger(GraphiteOutputReporter.class.getName());
private String host = "127.0.0.1";
private int port = 2003;
private String prefix = "jvm";
private Socket socket = null;
private PrintWriter out = null;
private Set whiteList = new HashSet();
@Override
public void report(String profilerName, Map<String, Object> metrics) {
// get DB connection
ensureGraphiteConnection();
// format metrics
logger.debug("Profiler Name : " + profilerName);
String tag = ((String) metrics.computeIfAbsent("tag", v -> "default_tag"))
.replaceAll("\\.", "-");
String appId = ((String) metrics.computeIfAbsent("appId", v -> "default_app"))
.replaceAll("\\.", "-");
String host = ((String) metrics.computeIfAbsent("host", v -> "unknown_host"))
.replaceAll("\\.", "-");
String process = ((String) metrics.computeIfAbsent("processUuid", v -> "unknown_process"))
.replaceAll("\\.", "-");
String newPrefix = String.join(".", prefix, tag, appId, host, process);
Map<String, Object> formattedMetrics = getFormattedMetrics(metrics);
formattedMetrics.remove("tag");
formattedMetrics.remove("appId");
formattedMetrics.remove("host");
formattedMetrics.remove("processUuid");
long timestamp = System.currentTimeMillis() / 1000;
for (Map.Entry<String, Object> entry : formattedMetrics.entrySet()) {
try {
if (whiteList.contains(entry.getKey())) {
out.printf(
newPrefix + "." + entry.getKey() + " " + entry.getValue() + " " + timestamp + "%n");
}
} catch (Exception e) {
logger.warn("Unable to print metrics, newPrefix=" + newPrefix
+ ", entry.getKey()= " + entry.getKey()
+ ", entry.getValue()= " + entry.getValue()
+ ", timestamp= " + timestamp);
}
}
}
// Format metrics in key=value (line protocol)
public Map<String, Object> getFormattedMetrics(Map<String, Object> metrics) {
Map<String, Object> formattedMetrics = new HashMap<>();
for (Map.Entry<String, Object> entry : metrics.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
logger.debug("Raw Metric-Name = " + key + ", Metric-Value = " + value);
if (value != null) {
if (value instanceof List) {
List listValue = (List) value;
addListMetrics(formattedMetrics, listValue, key);
} else if (value instanceof Map) {
Map<String, Object> metricMap = (Map<String, Object>) value;
addMapMetrics(formattedMetrics, metricMap, key);
} else {
formattedMetrics.put(key, value);
}
}
}
return formattedMetrics;
}
private void addMapMetrics(Map<String, Object> formattedMetrics, Map<String, Object> metricMap,
String keyPrefix) {
for (Map.Entry<String, Object> entry1 : metricMap.entrySet()) {
String key1 = entry1.getKey();
Object value1 = entry1.getValue();
if (value1 != null) {
if (value1 instanceof List) {
addListMetrics(formattedMetrics, (List) value1, keyPrefix + "." + key1);
} else if (value1 instanceof Map) {
addMapMetrics(formattedMetrics, (Map<String, Object>) value1, keyPrefix + "." + key1);
} else {
formattedMetrics.put(keyPrefix + "." + key1, value1);
}
}
}
}
private void addListMetrics(Map<String, Object> formattedMetrics,
List listValue, String keyPrefix) {
if (listValue != null && !listValue.isEmpty()) {
if (listValue.get(0) instanceof List) {
for (int i = 0; i < listValue.size(); i++) {
addListMetrics(formattedMetrics, (List) listValue.get(i), keyPrefix + "." + i);
}
} else if (listValue.get(0) instanceof Map) {
for (int i = 0; i < listValue.size(); i++) {
Map<String, Object> metricMap = (Map<String, Object>) listValue.get(i);
if (metricMap != null) {
String name = null;
Object nameValue = metricMap.get("name");
if (nameValue != null && nameValue instanceof String) {
name = ((String) nameValue).replaceAll("\\s", "");
}
if (StringUtils.isNotEmpty(name)) {
metricMap.remove("name");
addMapMetrics(formattedMetrics, metricMap, keyPrefix + "." + name);
} else {
addMapMetrics(formattedMetrics, metricMap, keyPrefix + "." + i);
}
}
}
} else {
List<String> metricList = (List<String>) listValue;
formattedMetrics.put(keyPrefix, String.join(",", metricList));
}
}
}
private void ensureGraphiteConnection() {
if (socket == null) {
synchronized (this) {
if (socket == null) {
try {
logger.info("connecting to graphite(" + host + ":" + port + ")!");
socket = new Socket(host, port);
OutputStream s = socket.getOutputStream();
out = new PrintWriter(s, true);
} catch (IOException e) {
logger.warn("connect to graphite error!", e);
}
}
}
}
}
@Override
public void close() {
try {
if (out != null) {
out.close();
}
if (socket != null) {
socket.close();
}
} catch (IOException e) {
logger.warn("close connection to graphite error!", e);
}
}
// properties from yaml file
@Override
public void updateArguments(Map<String, List<String>> connectionProperties) {
for (Map.Entry<String, List<String>> entry : connectionProperties.entrySet()) {
String key = entry.getKey();
List<String> value = entry.getValue();
if (StringUtils.isNotEmpty(key) && value != null && !value.isEmpty()) {
String stringValue = value.get(0);
if (key.equals("graphite.host")) {
logger.info("Got value for host = " + stringValue);
this.host = stringValue;
} else if (key.equals("graphite.port")) {
logger.info("Got value for port = " + stringValue);
this.port = Integer.parseInt(stringValue);
} else if (key.equals("graphite.prefix")) {
logger.info("Got value for database = " + stringValue);
this.prefix = stringValue;
} else if (key.equals("graphite.whiteList")) {
logger.info("Got value for whiteList = " + stringValue);
if (stringValue != null && stringValue.length() > 0) {
for (String pattern : stringValue.split(",")) {
this.whiteList.add(pattern.trim());
}
}
}
}
}
}
}
| 3,154 |
1,253 | <reponame>CarbonDDR/al-go-rithms<filename>operating_system/File-Organization/C/file.c
/*
* This is are the implementation of 3 File organization stratagies such as
* Single level organization, two level organization, and Hierarichal file organization
* This File is for the menu for the 3 types of stratagies/
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "single.c"
#include "double.c"
#include "hierach.c"
void main() {
system("clear");
printf("File organization stratagies\n" );
printf(" 1. Single Level \n");
printf(" 2. Two Level \n");
printf(" 3. Hierarichal\n");
printf(" 0. Exit\n");
printf("\t Choose :");
int opt;
scanf("%d", &opt);
system("clear");
switch (opt) {
case 1: single();break;
case 2: two();break;
case 3: hierarichal();break;
case 0: exit(0);
default: printf("Invalid option!! TRY AGAIN\n" );main();break;
}
}
| 335 |
852 | <filename>PhysicsTools/PatAlgos/plugins/PATPackedGenParticleProducer.cc
#include <string>
#include "DataFormats/Candidate/interface/Candidate.h"
#include "DataFormats/HepMCCandidate/interface/GenParticle.h"
#include "DataFormats/HepMCCandidate/interface/GenParticleFwd.h"
#include "DataFormats/VertexReco/interface/Vertex.h"
#include "DataFormats/VertexReco/interface/VertexFwd.h"
#include "DataFormats/PatCandidates/interface/PackedGenParticle.h"
#include "DataFormats/PatCandidates/interface/Jet.h"
#include "DataFormats/Common/interface/Association.h"
#include "FWCore/Framework/interface/global/EDProducer.h"
#include "DataFormats/Common/interface/View.h"
#include "FWCore/Framework/interface/ESHandle.h"
#include "FWCore/Framework/interface/Event.h"
#include "FWCore/Framework/interface/EventSetup.h"
#include "FWCore/Framework/interface/Frameworkfwd.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "FWCore/Utilities/interface/Exception.h"
#include "DataFormats/GsfTrackReco/interface/GsfTrack.h"
#include "DataFormats/MuonReco/interface/Muon.h"
#include "TrackingTools/IPTools/interface/IPTools.h"
#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h"
#include "TrackingTools/Records/interface/TransientTrackRecord.h"
#include "TrackingTools/TrajectoryState/interface/TrajectoryStateOnSurface.h"
#include "TrackingTools/GeomPropagators/interface/AnalyticalTrajectoryExtrapolatorToLine.h"
#include "TrackingTools/GeomPropagators/interface/AnalyticalImpactPointExtrapolator.h"
#include "DataFormats/GeometryCommonDetAlgo/interface/Measurement1D.h"
#include "TrackingTools/TransientTrack/interface/TransientTrack.h"
#include "TrackingTools/IPTools/interface/IPTools.h"
#include "CLHEP/Vector/ThreeVector.h"
#include "CLHEP/Vector/LorentzVector.h"
#include "CLHEP/Matrix/Vector.h"
#include <string>
namespace pat {
class PATPackedGenParticleProducer : public edm::global::EDProducer<> {
public:
explicit PATPackedGenParticleProducer(const edm::ParameterSet&);
~PATPackedGenParticleProducer() override;
void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override;
private:
const edm::EDGetTokenT<reco::GenParticleCollection> Cands_;
const edm::EDGetTokenT<reco::GenParticleCollection> GenOrigs_;
const edm::EDGetTokenT<edm::Association<reco::GenParticleCollection>> Asso_;
const edm::EDGetTokenT<edm::Association<reco::GenParticleCollection>> AssoOriginal_;
const double maxRapidity_;
};
} // namespace pat
pat::PATPackedGenParticleProducer::PATPackedGenParticleProducer(const edm::ParameterSet& iConfig)
: Cands_(consumes<reco::GenParticleCollection>(iConfig.getParameter<edm::InputTag>("inputCollection"))),
GenOrigs_(consumes<reco::GenParticleCollection>(iConfig.getParameter<edm::InputTag>("inputOriginal"))),
Asso_(consumes<edm::Association<reco::GenParticleCollection>>(iConfig.getParameter<edm::InputTag>("map"))),
AssoOriginal_(consumes<edm::Association<reco::GenParticleCollection>>(
iConfig.getParameter<edm::InputTag>("inputCollection"))),
maxRapidity_(iConfig.getParameter<double>("maxRapidity")) {
produces<std::vector<pat::PackedGenParticle>>();
produces<edm::Association<std::vector<pat::PackedGenParticle>>>();
}
pat::PATPackedGenParticleProducer::~PATPackedGenParticleProducer() {}
void pat::PATPackedGenParticleProducer::produce(edm::StreamID,
edm::Event& iEvent,
const edm::EventSetup& iSetup) const {
edm::Handle<reco::GenParticleCollection> cands;
iEvent.getByToken(Cands_, cands);
//from prunedGenParticlesWithStatusOne to prunedGenParticles
edm::Handle<edm::Association<reco::GenParticleCollection>> asso;
iEvent.getByToken(Asso_, asso);
edm::Handle<edm::Association<reco::GenParticleCollection>> assoOriginal;
iEvent.getByToken(AssoOriginal_, assoOriginal);
edm::Handle<reco::GenParticleCollection> genOrigs;
iEvent.getByToken(GenOrigs_, genOrigs);
std::vector<int> mapping(genOrigs->size(), -1);
//invert the value map from Orig2New to New2Orig
std::map<edm::Ref<reco::GenParticleCollection>, edm::Ref<reco::GenParticleCollection>> reverseMap;
for (unsigned int ic = 0, nc = genOrigs->size(); ic < nc; ++ic) {
edm::Ref<reco::GenParticleCollection> originalRef = edm::Ref<reco::GenParticleCollection>(genOrigs, ic);
edm::Ref<reco::GenParticleCollection> newRef = (*assoOriginal)[originalRef];
reverseMap.insert(
std::pair<edm::Ref<reco::GenParticleCollection>, edm::Ref<reco::GenParticleCollection>>(newRef, originalRef));
}
auto outPtrP = std::make_unique<std::vector<pat::PackedGenParticle>>();
unsigned int packed = 0;
for (unsigned int ic = 0, nc = cands->size(); ic < nc; ++ic) {
const reco::GenParticle& cand = (*cands)[ic];
if (cand.status() == 1 && std::abs(cand.y()) < maxRapidity_) {
// Obtain original gen particle collection reference from input reference and map
edm::Ref<reco::GenParticleCollection> inputRef = edm::Ref<reco::GenParticleCollection>(cands, ic);
edm::Ref<reco::GenParticleCollection> originalRef = reverseMap[inputRef];
edm::Ref<reco::GenParticleCollection> finalPrunedRef = (*asso)[inputRef];
mapping[originalRef.key()] = packed;
packed++;
if (finalPrunedRef.isNonnull()) { //this particle exists also in the final pruned
outPtrP->push_back(pat::PackedGenParticle(cand, finalPrunedRef));
} else {
if (cand.numberOfMothers() > 0) {
edm::Ref<reco::GenParticleCollection> newRef = (*asso)[cand.motherRef(0)];
outPtrP->push_back(pat::PackedGenParticle(cand, newRef));
} else {
outPtrP->push_back(pat::PackedGenParticle(cand, edm::Ref<reco::GenParticleCollection>()));
}
}
}
}
edm::OrphanHandle<std::vector<pat::PackedGenParticle>> oh = iEvent.put(std::move(outPtrP));
auto gp2pgp = std::make_unique<edm::Association<std::vector<pat::PackedGenParticle>>>(oh);
edm::Association<std::vector<pat::PackedGenParticle>>::Filler gp2pgpFiller(*gp2pgp);
gp2pgpFiller.insert(genOrigs, mapping.begin(), mapping.end());
gp2pgpFiller.fill();
iEvent.put(std::move(gp2pgp));
}
using pat::PATPackedGenParticleProducer;
#include "FWCore/Framework/interface/MakerMacros.h"
DEFINE_FWK_MODULE(PATPackedGenParticleProducer);
| 2,410 |
6,098 | <filename>h2o-core/src/test/java/water/rapids/ast/prims/advmath/AstKFoldTest.java
package water.rapids.ast.prims.advmath;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import water.TestUtil;
import water.fvec.Frame;
import water.fvec.TestFrameBuilder;
import water.fvec.Vec;
import water.rapids.Rapids;
import water.rapids.Val;
import java.util.Random;
import static org.junit.Assert.assertTrue;
public class AstKFoldTest extends TestUtil {
@BeforeClass
static public void setup() { stall_till_cloudsize(1); }
private Frame fr = null;
@Test public void basicKFoldTest() {
fr = new TestFrameBuilder()
.withName("testFrame")
.withColNames("ColA")
.withVecTypes(Vec.T_NUM)
.withDataForCol(0, ard(1, 2, 3, 4, 5))
.build();
int numberOfFolds = 5;
int randomSeed = new Random().nextInt();
String tree = String.format("(kfold_column testFrame %d %d )", numberOfFolds, randomSeed);
Val val = Rapids.exec(tree);
Frame results = val.getFrame();
fr = fr.add(results);
assertTrue(fr.vec(1).at(0) < 5);
assertTrue(fr.vec(1).at(1) < 5);
assertTrue(fr.vec(1).at(2) < 5);
assertTrue(fr.vec(1).at(3) < 5);
assertTrue(fr.vec(1).at(4) < 5);
results.delete();
}
@After
public void afterEach() {
fr.delete();
}
}
| 667 |
585 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.util;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import org.noggit.CharArr;
/**A mutable byte[] backed Utf8CharSequence. This is quite similar to the BytesRef of Lucene
* Do not alter the contents of the byte[] . it may be inconsistent with the cached String
* This is designed for single-threaded use
*
*/
public class ByteArrayUtf8CharSequence implements Utf8CharSequence {
protected byte[] buf;
protected int offset;
protected int hashCode = Integer.MIN_VALUE;
protected int length;
protected volatile String utf16;
public Function<ByteArrayUtf8CharSequence, String> stringProvider;
public ByteArrayUtf8CharSequence(String utf16) {
buf = new byte[Math.multiplyExact(utf16.length(), 3)];
offset = 0;
length = ByteUtils.UTF16toUTF8(utf16, 0, utf16.length(), buf, 0);
if (buf.length > length) {
byte[] copy = new byte[length];
System.arraycopy(buf, 0, copy, 0, length);
buf = copy;
}
assert isValid();
}
public byte[] getBuf() {
return buf;
}
public int offset() {
return offset;
}
public ByteArrayUtf8CharSequence(byte[] buf, int offset, int length) {
this.buf = buf;
this.offset = offset;
this.length = length;
}
@Override
public byte byteAt(int idx) {
if (idx >= length || idx < 0) throw new ArrayIndexOutOfBoundsException("idx must be >=0 and < " + length);
return buf[offset + idx];
}
/**
* this is for internal use to get a cached string value.
* returns null if There is no cached String value
*/
public String getStringOrNull() {
return utf16;
}
@Override
public int write(int start, byte[] buffer, int pos) {
return _writeBytes(buf, offset, length, start, buffer, pos);
}
static int _writeBytes(byte[] src, int srcOffset, int srcLength, int start, byte[] buffer, int pos) {
if (srcOffset == -1 || start >= srcLength) return -1;
int writableBytes = Math.min(srcLength - start, buffer.length - pos);
System.arraycopy(src, srcOffset + start, buffer, pos, writableBytes);
return writableBytes;
}
@Override
public int size() {
return length;
}
private ByteArrayUtf8CharSequence(byte[] buf, int offset, int length, String utf16, int hashCode) {
this.buf = buf;
this.offset = offset;
this.length = length;
this.utf16 = utf16;
this.hashCode = hashCode;
}
@Override
public int hashCode() {
if (hashCode == Integer.MIN_VALUE) {
hashCode = MurmurHash2.hash32(buf, offset, length);
}
return hashCode;
}
@Override
public int length() {
return _getStr().length();
}
@Override
public boolean equals(Object other) {
if (other instanceof Utf8CharSequence) {
if (size() != ((Utf8CharSequence) other).size()) return false;
if (other instanceof ByteArrayUtf8CharSequence) {
if (this.length != ((ByteArrayUtf8CharSequence) other).length) return false;
ByteArrayUtf8CharSequence that = (ByteArrayUtf8CharSequence) other;
return _equals(this.buf, this.offset, this.offset + this.length,
that.buf, that.offset, that.offset + that.length);
}
return utf8Equals(this, (Utf8CharSequence) other);
} else {
return false;
}
}
public static boolean utf8Equals(Utf8CharSequence utf8_1, Utf8CharSequence utf8_2) {
if (utf8_1.size() != utf8_2.size()) return false;
for (int i = 0; i < utf8_1.size(); i++) {
if (utf8_1.byteAt(i) != utf8_2.byteAt(i)) return false;
}
return true;
}
@Override
public char charAt(int index) {
return _getStr().charAt(index);
}
private String _getStr() {
String utf16 = this.utf16;
if (utf16 == null) {
if (stringProvider != null) {
this.utf16 = utf16 = stringProvider.apply(this);
} else {
CharArr arr = new CharArr();
ByteUtils.UTF8toUTF16(buf, offset, length, arr);
this.utf16 = utf16 = arr.toString();
}
}
return utf16;
}
@Override
public CharSequence subSequence(int start, int end) {
return new ByteArrayUtf8CharSequence(_getStr().subSequence(start, end).toString());
}
@Override
public ByteArrayUtf8CharSequence clone() {
return new ByteArrayUtf8CharSequence(buf, offset, length, utf16, hashCode);
}
public ByteArrayUtf8CharSequence deepCopy() {
byte[] bytes = new byte[length];
System.arraycopy(buf, offset, bytes, 0, length);
return new ByteArrayUtf8CharSequence(bytes, 0, length, utf16, hashCode);
}
@SuppressWarnings({"rawtypes"})
public static Map.Entry convertCharSeq(Map.Entry e) {
if (e.getKey() instanceof Utf8CharSequence || e.getValue() instanceof Utf8CharSequence) {
return new AbstractMap.SimpleEntry<>(convertCharSeq(e.getKey()), convertCharSeq(e.getValue()));
}
return e;
}
@SuppressWarnings("rawtypes")
public static Collection convertCharSeq(Collection<?> vals) {
if (vals == null) return null;
boolean needsCopy = false;
for (Object o : vals) {
if (o instanceof Utf8CharSequence) {
needsCopy = true;
break;
}
}
if (needsCopy) {
Collection<Object> copy = null;
if (vals instanceof Set){
copy = new HashSet<>(vals.size());
} else {
copy = new ArrayList<>(vals.size());
}
for (Object o : vals) copy.add(convertCharSeq(o));
return copy;
}
return vals;
}
public static Object convertCharSeq(Object o) {
if (o == null) return null;
if (o instanceof Utf8CharSequence) return ((Utf8CharSequence) o).toString();
if (o instanceof Collection) return convertCharSeq((Collection<?>) o);
return o;
}
// methods in Arrays are defined stupid: they cannot use Objects.checkFromToIndex
// they throw IAE (vs IOOBE) in the case of fromIndex > toIndex.
// so this method works just like checkFromToIndex, but with that stupidity added.
private static void checkFromToIndex(int fromIndex, int toIndex, int length) {
if (fromIndex > toIndex) {
throw new IllegalArgumentException("fromIndex " + fromIndex + " > toIndex " + toIndex);
}
if (fromIndex < 0 || toIndex > length) {
throw new IndexOutOfBoundsException("Range [" + fromIndex + ", " + toIndex + ") out-of-bounds for length " + length);
}
}
@Override
public String toString() {
return _getStr();
}
/**
* Behaves like Java 9's Arrays.equals
*
* @see <a href="https://docs.oracle.com/javase/9/docs/api/java/util/Arrays.html#equals-byte:A-int-int-byte:A-int-int-">Arrays.equals</a>
*/
public static boolean _equals(byte[] a, int aFromIndex, int aToIndex, byte[] b, int bFromIndex, int bToIndex) {
checkFromToIndex(aFromIndex, aToIndex, a.length);
checkFromToIndex(bFromIndex, bToIndex, b.length);
int aLen = aToIndex - aFromIndex;
int bLen = bToIndex - bFromIndex;
// lengths differ: cannot be equal
if (aLen != bLen) {
return false;
}
for (int i = 0; i < aLen; i++) {
if (a[i + aFromIndex] != b[i + bFromIndex]) {
return false;
}
}
return true;
}
public ByteArrayUtf8CharSequence reset(byte[] bytes, int offset, int length, String str) {
this.buf = bytes;
this.offset = offset;
this.length = length;
this.utf16 = str;
this.hashCode = Integer.MIN_VALUE;
return this;
}
/**
* Performs internal consistency checks.
* Always returns true (or throws IllegalStateException)
*/
public boolean isValid() {
if (buf == null) {
throw new IllegalStateException("bytes is null");
}
if (length < 0) {
throw new IllegalStateException("length is negative: " + length);
}
if (length > buf.length) {
throw new IllegalStateException("length is out of bounds: " + length + ",bytes.length=" + buf.length);
}
if (offset < 0) {
throw new IllegalStateException("offset is negative: " + offset);
}
if (offset > buf.length) {
throw new IllegalStateException("offset out of bounds: " + offset + ",bytes.length=" + buf.length);
}
if (offset + length < 0) {
throw new IllegalStateException("offset+length is negative: offset=" + offset + ",length=" + length);
}
if (offset + length > buf.length) {
throw new IllegalStateException("offset+length out of bounds: offset=" + offset + ",length=" + length + ",bytes.length=" + buf.length);
}
return true;
}
}
| 3,398 |
664 | /*
* Copyright 2013-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jms.annotation.JmsListener;
import org.springframework.jms.core.JmsTemplate;
import org.springframework.stereotype.Service;
@Service
public class BookService {
private static final Logger log = LoggerFactory.getLogger(BookService.class);
private final JmsTemplate jmsTemplate;
public BookService(JmsTemplate jmsTemplate) {
this.jmsTemplate = jmsTemplate;
}
/**
* Scenario for "should generate tests triggered by a method": client side: must have
* a possibility to "trigger" sending of a message to the given messageFrom server
* side: will run the method and await upon receiving message on the output
* messageFrom. Method triggers sending a message to a source
*/
@JmsListener(destination = "input2")
public void returnBook() {
BookReturned bookReturned = new BookReturned("foo");
jmsTemplate.convertAndSend("output2", "{\"bookName\":\"foo\"}", message -> {
message.setStringProperty("BOOK-NAME", bookReturned.bookName);
return message;
});
}
}
| 493 |
695 | <gh_stars>100-1000
package model.builders.entity.actors;
import model.battlefield.actors.Actor;
import model.battlefield.actors.SoundActor;
import model.builders.entity.definitions.DefElement;
import model.builders.entity.definitions.Definition;
/**
* @author Benoît
*/
public class SoundActorBuilder extends ActorBuilder {
private static final String SOUND_PATH = "SoundPath";
private static final String VOLUME = "Volume";
private static final String LOOPING = "Looping";
private static final String POSITIONAL = "Positional";
private String soundPath;
private double volume = 1;
private boolean positional = false;
private boolean looping = false;
public SoundActorBuilder(Definition def) {
super(def);
for (DefElement de : def.getElements()) {
switch (de.name) {
case TYPE:
case TRIGGER:
case ACTOR_LIST:
break;
case SOUND_PATH:
soundPath = de.getVal();
break;
case VOLUME:
volume = de.getDoubleVal();
break;
case LOOPING:
looping = de.getBoolVal();
break;
case POSITIONAL:
positional = de.getBoolVal();
break;
default:
printUnknownElement(de.name);
}
}
}
@Override
public Actor build(String trigger, Actor parent) {
Actor res = new SoundActor(parent, trigger, childrenTriggers, childrenActorBuilders, soundPath, looping,
volume, positional);
res.debbug_id = getId();
return res;
}
}
| 568 |
803 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ================================================================
class DataBinding
// ================================================================
//
// A data binding specifies how a variable can be serialized. It
// provides a column name, column type and the ability to set/get
// the value from pv/cb pairs.
//
//-
{
public:
virtual ~DataBinding() {}
// name of the column to store the information in
const char * const SzColumn() const { return m_szColumn; }
// type of the column
virtual JET_COLTYP Coltyp() const = 0;
// Get the contents of the bound variable as a pv/cb pair,
// suitable for storage
virtual void GetPvCb(__out const void ** ppv, __out size_t * pcb) const = 0;
// Set the bound variable from a stored pv/cb pair
virtual ERR ErrSetFromPvCb(const void * const pv, const size_t cb) = 0;
// Set the bound variable to its default value
virtual void SetToDefault() = 0;
// Print the data binding
void Print(CPRINTF * const pcprintf) const;
protected:
DataBinding(const char * const szColumn) : m_szColumn(szColumn) {}
private:
const char * const m_szColumn;
};
// ================================================================
template<class T>
class DataBindingOf : public DataBinding
// ================================================================
//
// Data binding for a specific type. This converts between the
// type T and pv/cb pairs.
//
// This template can be used for simple data types (int, long). There
// is a separate specialization for strings.
//
//-
{
public:
// Create an object/name binding
DataBindingOf(T * const pt, const char * const szColumn) :
DataBinding(szColumn),
m_pt(pt)
{
Assert(m_pt);
Assert(SzColumn());
}
virtual ~DataBindingOf() {}
// this is the default column type, it can be specialized
virtual JET_COLTYP Coltyp() const { return JET_coltypLongBinary; }
virtual void GetPvCb(__out const void ** ppv, __out size_t * pcb) const
{
Assert(ppv);
Assert(pcb);
*ppv = m_pt;
*pcb = sizeof(T);
}
virtual ERR ErrSetFromPvCb(const void * const pv, const size_t cb)
{
Assert(pv);
ERR err = JET_errSuccess;
if(cb != sizeof(T))
{
Call(ErrERRCheck(JET_errInvalidBufferSize));
}
*m_pt = *((const T *)pv);
HandleError:
return err;
}
virtual void SetToDefault()
{
*m_pt = T();
}
private:
T * const m_pt;
};
// Specialized templates for different types of bindings
template<>
JET_COLTYP DataBindingOf<unsigned char>::Coltyp() const { return JET_coltypUnsignedByte; }
template<>
JET_COLTYP DataBindingOf<SHORT>::Coltyp() const { return JET_coltypShort; }
template<>
JET_COLTYP DataBindingOf<USHORT>::Coltyp() const { return JET_coltypUnsignedShort; }
template<>
JET_COLTYP DataBindingOf<INT>::Coltyp() const { return JET_coltypLong; }
template<>
JET_COLTYP DataBindingOf<UINT>::Coltyp() const { return JET_coltypUnsignedLong; }
template<>
JET_COLTYP DataBindingOf<LONG>::Coltyp() const { return JET_coltypLong; }
template<>
JET_COLTYP DataBindingOf<ULONG>::Coltyp() const { return JET_coltypUnsignedLong; }
template<>
JET_COLTYP DataBindingOf<__int64>::Coltyp() const { return JET_coltypLongLong; }
// ================================================================
template<>
class DataBindingOf<char*> : public DataBinding
// ================================================================
//
// DataBindingOf specialization for strings.
//
//-
{
public:
// Create a char* binding to the given variable with the given maximum size.
// The maximum size includes the terminating NULL
DataBindingOf(char * const sz, const size_t cchMax, const char * const szColumn) :
DataBinding(szColumn),
m_sz(sz),
m_cchMax(cchMax)
{
Assert(m_sz);
Assert(0 != m_cchMax);
Assert(SzColumn());
}
virtual ~DataBindingOf() {}
virtual JET_COLTYP Coltyp() const { return JET_coltypLongText; }
virtual void GetPvCb(__out const void ** ppv, __out size_t * pcb) const
{
Assert(ppv);
Assert(pcb);
*ppv = m_sz;
(VOID) StringCbLengthA(m_sz, m_cchMax, pcb);
}
virtual ERR ErrSetFromPvCb(const void * const pv, const size_t cb)
{
Assert(pv);
ERR err = JET_errSuccess;
if(cb >= m_cchMax) // need space for a NULL terminator
{
Call(ErrERRCheck(JET_errInvalidBufferSize));
}
memcpy(m_sz, pv, cb);
m_sz[cb] = 0;
HandleError:
return err;
}
virtual void SetToDefault()
{
m_sz[0] = 0;
}
private:
char * const m_sz;
const size_t m_cchMax;
};
// ================================================================
class DataBindings
// ================================================================
//
// This is a set of column bindings, which can be iterated over.
//
//-
{
public:
DataBindings();
~DataBindings();
void AddBinding(DataBinding * const pbinding);
typedef DataBinding* const * iterator;
iterator begin() const;
iterator end() const;
private:
static const INT m_cbindingsMax = 64;
INT m_cbindings;
DataBinding* m_rgpbindings[m_cbindingsMax];
};
// ================================================================
class IDataStore
// ================================================================
//
// An interface which set and retrieve pv/cb pairs by name.
//
//-
{
public:
virtual ~IDataStore() {}
// DDL
virtual ERR ErrColumnExists(
const char * const szColumn,
__out bool * const pfExists,
__out JET_COLTYP * const pcoltyp) const = 0;
virtual ERR ErrCreateColumn(const char * const szColumn, const JET_COLTYP coltyp) = 0;
// DML
// Get the column data. Returns JET_wrnColumnNull if the column isn't present or has no data
virtual ERR ErrLoadDataFromColumn(
const char * const szColumn,
__out void * pv,
__out size_t * const pcbActual,
const size_t cbMax) const = 0;
virtual ERR ErrStoreDataToColumn(const char * const szColumn, const void * const pv, const size_t cb) = 0;
virtual ERR ErrPrepareUpdate() = 0;
virtual ERR ErrCancelUpdate() = 0;
virtual ERR ErrUpdate() = 0;
ERR ErrDataStoreUnavailable() const { return m_errDataStoreUnavailable; }
protected:
IDataStore() : m_errDataStoreUnavailable(JET_errSuccess) { }
ERR m_errDataStoreUnavailable;
};
// ================================================================
class DataSerializer
// ================================================================
//
// Serialize/deserialize a set of variables. The variables, and their
// serialization mechanisms are specified by the given DataBindings.
// Storage for the serialized data is given by the IDataStore object.
//
//-
{
public:
// Create a serializer to serialize/deserialize the given bindings
DataSerializer(const DataBindings& bindings);
~DataSerializer();
// Sets all bindings to default values
void SetBindingsToDefault();
// Save the bindings to the given store
ERR ErrSaveBindings(IDataStore * const pstore);
// Load the bindings from the given store
ERR ErrLoadBindings(const IDataStore * const pstore);
// Print all the variables
void Print(CPRINTF * const pcprintf);
private:
ERR ErrCreateAllColumns_(IDataStore * const pstore);
private:
const DataBindings& m_bindings;
};
// ================================================================
namespace TableDataStoreFactory
// ================================================================
//
// Used to create a TableDataStore. The table is created if necessary
// and a new record inserted if it is empty. A new SESID and TABLEID
// are created, which are closed in the destructor of the
// TableDataStore.
//
//-
{
ERR ErrOpenOrCreate(
INST * const pinst,
const wchar_t * wszDatabase,
const char * const szTable,
IDataStore ** ppstore);
ERR ErrOpenExisting(
INST * const pinst,
const wchar_t * wszDatabase,
const char * const szTable,
IDataStore ** ppstore);
}
// ================================================================
class MemoryDataStore : public IDataStore
// ================================================================
//
// Used for testing serialization. This simply stores data to an
// in-memory object.
//
//-
{
public:
MemoryDataStore();
~MemoryDataStore();
ERR ErrColumnExists(
const char * const szColumn,
__out bool * const pfExists,
__out JET_COLTYP * const pcoltyp) const;
ERR ErrCreateColumn(const char * const szColumn, const JET_COLTYP coltyp);
ERR ErrLoadDataFromColumn(
const char * const szColumn,
__out void * pv, __out size_t * const pcbActual,
const size_t cbMax) const;
ERR ErrPrepareUpdate();
ERR ErrStoreDataToColumn(const char * const szColumn, const void * const pv, const size_t cb);
ERR ErrCancelUpdate();
ERR ErrUpdate();
private:
// find the index of the given column, -1 if the column doesn't exist
INT IColumn(const char * const szColumn) const;
private:
bool m_fInUpdate;
static const INT m_ccolumnsMax = 64;
const char * m_rgszColumns[m_ccolumnsMax];
JET_COLTYP m_rgcoltyps[m_ccolumnsMax];
unique_ptr<BYTE> m_rgpbData[m_ccolumnsMax];
size_t m_rgcbData[m_ccolumnsMax];
INT m_ccolumns;
private: // not implemented
MemoryDataStore(const MemoryDataStore&);
MemoryDataStore& operator=(const MemoryDataStore&);
};
| 3,643 |
3,428 | <reponame>ghalimi/stdlib
{"id":"01638","group":"easy-ham-1","checksum":{"type":"MD5","value":"1025c8d81a3ce398f65fb401537214fb"},"text":"Return-Path: [email protected]\nDelivery-Date: Fri Sep 6 15:23:19 2002\nFrom: <EMAIL> (<NAME>)\nDate: Fri, 6 Sep 2002 10:23:19 -0400\nSubject: [Spambayes] test sets?\nReferences: <[email protected]>\n\t<<EMAIL>>\nMessage-ID: <<EMAIL>>\n\n\n>>>>> \"TP\" == <NAME> <<EMAIL>> writes:\n\n >> Any thought to wrapping up your spam and ham test sets for\n >> inclusion w/ the spambayes project?\n\n TP> I gave it all the thought it deserved <wink>. It would be\n TP> wonderful to get several people cranking on the same test\n TP> data, and I'm all in favor of that. OTOH, my Data/ subtree\n TP> currently has more than 35,000 files slobbering over 134\n TP> million bytes -- even if I had a place to put that much stuff,\n TP> I'm not sure my ISP would let me email it in one msg <wink>.\n\nCheck it into the spambayes project. SF's disks are cheap <wink>.\n\n-Barry\n"} | 401 |
480 | <filename>polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/core/datatype/IntervalType.java<gh_stars>100-1000
/*
* Copyright [2013-2021], Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.polardbx.optimizer.core.datatype;
import com.alibaba.polardbx.common.utils.time.old.DateUtils;
import com.taobao.tddl.common.utils.TddlToStringStyle;
import org.apache.commons.lang.builder.ToStringBuilder;
import java.time.LocalDateTime;
import java.util.Calendar;
/**
* mysql interval时间类型
*
* @author jianghang 2014-4-16 下午1:34:00
* @since 5.0.7
*/
public class IntervalType extends TimestampType {
private int year = 0;
private boolean isYearSet = false;
private int month = 0;
private boolean isMonthSet = false;
private int day = 0;
private boolean isDaySet = false;
private int hour = 0;
private boolean isHourSet = false;
private int minute = 0;
private boolean isMinuteSet = false;
private int second = 0;
private boolean isSecondSet = false;
private int microsecond = 0;
private boolean isMicrosecondSet = false;
private int milliSeconds = 0;
private boolean isMilliSecondSet = false;
private int factor = 1;
public void process(Calendar cal, int factor) {
factor = this.factor * factor;
if (isYearSet) {
cal.add(Calendar.YEAR, year * factor);
}
if (isMonthSet) {
cal.add(Calendar.MONTH, month * factor);
}
if (isDaySet) {
cal.add(Calendar.DAY_OF_YEAR, day * factor);
}
if (isHourSet) {
cal.add(Calendar.HOUR_OF_DAY, hour * factor);
}
if (isMinuteSet) {
cal.add(Calendar.MINUTE, minute * factor);
}
if (isSecondSet) {
cal.add(Calendar.SECOND, second * factor);
}
if (isMilliSecondSet) {
cal.add(Calendar.MILLISECOND, milliSeconds * factor);
}
// 微秒的支持会有问题,目前只能到毫秒单位
if (isMicrosecondSet) {
cal.add(Calendar.MILLISECOND, (microsecond / 1000) * factor);
}
}
public LocalDateTime process(LocalDateTime time, int factor) {
factor = this.factor * factor;
if (isYearSet) {
time = time.plusYears(year * factor);
}
if (isMonthSet) {
time = time.plusMonths(month * factor);
}
if (isDaySet) {
time = time.plusDays(day * factor);
}
if (isHourSet) {
time = time.plusHours(hour * factor);
}
if (isMinuteSet) {
time = time.plusMinutes(minute * factor);
}
if (isSecondSet) {
time = time.plusSeconds(second * factor);
}
if (isMilliSecondSet) {
time = time.plusNanos(milliSeconds * factor * 1000000);
}
if (isMicrosecondSet) {
time = time.plusNanos(microsecond * factor * 1000);
}
return time;
}
public java.sql.Time process(java.sql.Time time, int factor) {
factor = this.factor * factor;
long ms = microsecond;
while (ms > 999999) {
ms = ms / 10L;
}
int maxHour = (day * 24 + hour);
boolean outRange = maxHour > DateUtils.MAX_HOUR;
if (outRange) {
maxHour = DateUtils.MAX_HOUR;
second = 59;
minute = 59;
microsecond = 0;
ms = 0;
}
long diff = factor * (maxHour * 60 * 60 * 1000L + minute * 60 * 1000L + second * 1000L + ms / 1000L);
time.setTime(time.getTime() + diff);
return time;
}
public int getYear() {
return year;
}
public void setYear(int year) {
this.year = year;
this.isYearSet = true;
}
public int getMonth() {
return month;
}
public void setMonth(int month) {
this.month = month;
this.isMonthSet = true;
}
public int getDay() {
return day;
}
public void setDay(int day) {
this.day = day;
this.isDaySet = true;
}
public int getHour() {
return hour;
}
public void setHour(int hour) {
this.hour = hour;
this.isHourSet = true;
}
public int getMinute() {
return minute;
}
public void setMinute(int minute) {
this.minute = minute;
this.isMinuteSet = true;
}
public int getSecond() {
return second;
}
public void setSecond(int second) {
this.second = second;
this.isSecondSet = true;
}
public int getMicrosecond() {
return microsecond;
}
public void setMicrosecond(int microsecond) {
this.microsecond = microsecond;
this.isMicrosecondSet = true;
}
public void setMilliSecond(int milliSeconds) {
this.milliSeconds = milliSeconds;
this.isMilliSecondSet = true;
}
public void setFactor(int factor) {
this.factor = factor;
}
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this, TddlToStringStyle.DEFAULT_STYLE);
}
public boolean isFineGrainedTimeUintSet() {
return isMilliSecondSet || isHourSet || isMinuteSet || isSecondSet || isMicrosecondSet;
}
}
| 2,598 |
3,508 | package com.fishercoder.solutions;
public class _1134 {
public static class Solution1 {
public boolean isArmstrong(int N) {
int numOfDigits = 0;
int copyN = N;
while (copyN > 0) {
copyN /= 10;
numOfDigits++;
}
int sum = 0;
copyN = N;
while (N > 0) {
int digit = N % 10;
sum += Math.pow(digit, numOfDigits);
N /= 10;
}
return sum == copyN;
}
}
}
| 336 |
2,151 | <gh_stars>1000+
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/paint/paint_timing.h"
#include <memory>
#include <utility>
#include "third_party/blink/public/platform/web_layer_tree_view.h"
#include "third_party/blink/renderer/core/dom/document.h"
#include "third_party/blink/renderer/core/frame/local_dom_window.h"
#include "third_party/blink/renderer/core/frame/local_frame.h"
#include "third_party/blink/renderer/core/frame/local_frame_view.h"
#include "third_party/blink/renderer/core/loader/document_loader.h"
#include "third_party/blink/renderer/core/loader/interactive_detector.h"
#include "third_party/blink/renderer/core/loader/progress_tracker.h"
#include "third_party/blink/renderer/core/page/chrome_client.h"
#include "third_party/blink/renderer/core/page/page.h"
#include "third_party/blink/renderer/core/probe/core_probes.h"
#include "third_party/blink/renderer/core/timing/dom_window_performance.h"
#include "third_party/blink/renderer/core/timing/window_performance.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
#include "third_party/blink/renderer/platform/scheduler/public/frame_scheduler.h"
namespace blink {
namespace {
WindowPerformance* GetPerformanceInstance(LocalFrame* frame) {
WindowPerformance* performance = nullptr;
if (frame && frame->DomWindow()) {
performance = DOMWindowPerformance::performance(*frame->DomWindow());
}
return performance;
}
} // namespace
// static
const char PaintTiming::kSupplementName[] = "PaintTiming";
// static
PaintTiming& PaintTiming::From(Document& document) {
PaintTiming* timing = Supplement<Document>::From<PaintTiming>(document);
if (!timing) {
timing = new PaintTiming(document);
ProvideTo(document, timing);
}
return *timing;
}
void PaintTiming::MarkFirstPaint() {
// Test that m_firstPaint is non-zero here, as well as in setFirstPaint, so
// we avoid invoking monotonicallyIncreasingTime() on every call to
// markFirstPaint().
if (!first_paint_.is_null())
return;
SetFirstPaint(CurrentTimeTicks());
}
void PaintTiming::MarkFirstContentfulPaint() {
// Test that m_firstContentfulPaint is non-zero here, as well as in
// setFirstContentfulPaint, so we avoid invoking
// monotonicallyIncreasingTime() on every call to
// markFirstContentfulPaint().
if (!first_contentful_paint_.is_null())
return;
SetFirstContentfulPaint(CurrentTimeTicks());
}
void PaintTiming::MarkFirstTextPaint() {
if (!first_text_paint_.is_null())
return;
first_text_paint_ = CurrentTimeTicks();
SetFirstContentfulPaint(first_text_paint_);
RegisterNotifySwapTime(PaintEvent::kFirstTextPaint);
}
void PaintTiming::MarkFirstImagePaint() {
if (!first_image_paint_.is_null())
return;
first_image_paint_ = CurrentTimeTicks();
SetFirstContentfulPaint(first_image_paint_);
RegisterNotifySwapTime(PaintEvent::kFirstImagePaint);
}
void PaintTiming::SetFirstMeaningfulPaintCandidate(TimeTicks timestamp) {
if (!first_meaningful_paint_candidate_.is_null())
return;
first_meaningful_paint_candidate_ = timestamp;
if (GetFrame() && GetFrame()->View() && !GetFrame()->View()->IsAttached()) {
GetFrame()->GetFrameScheduler()->OnFirstMeaningfulPaint();
}
}
void PaintTiming::SetFirstMeaningfulPaint(
TimeTicks stamp,
TimeTicks swap_stamp,
FirstMeaningfulPaintDetector::HadUserInput had_input) {
DCHECK(first_meaningful_paint_.is_null());
DCHECK(first_meaningful_paint_swap_.is_null());
DCHECK(!stamp.is_null());
DCHECK(!swap_stamp.is_null());
TRACE_EVENT_MARK_WITH_TIMESTAMP2(
"loading,rail,devtools.timeline", "firstMeaningfulPaint", swap_stamp,
"frame", ToTraceValue(GetFrame()), "afterUserInput", had_input);
InteractiveDetector* interactive_detector(
InteractiveDetector::From(*GetSupplementable()));
if (interactive_detector) {
interactive_detector->OnFirstMeaningfulPaintDetected(swap_stamp, had_input);
}
// Notify FMP for UMA only if there's no user input before FMP, so that layout
// changes caused by user interactions wouldn't be considered as FMP.
if (had_input == FirstMeaningfulPaintDetector::kNoUserInput) {
first_meaningful_paint_ = stamp;
first_meaningful_paint_swap_ = swap_stamp;
NotifyPaintTimingChanged();
}
ReportUserInputHistogram(had_input);
}
void PaintTiming::ReportUserInputHistogram(
FirstMeaningfulPaintDetector::HadUserInput had_input) {
DEFINE_STATIC_LOCAL(EnumerationHistogram, had_user_input_histogram,
("PageLoad.Internal.PaintTiming."
"HadUserInputBeforeFirstMeaningfulPaint",
FirstMeaningfulPaintDetector::kHadUserInputEnumMax));
if (GetFrame() && GetFrame()->IsMainFrame())
had_user_input_histogram.Count(had_input);
}
void PaintTiming::NotifyPaint(bool is_first_paint,
bool text_painted,
bool image_painted) {
if (is_first_paint)
MarkFirstPaint();
if (text_painted)
MarkFirstTextPaint();
if (image_painted)
MarkFirstImagePaint();
fmp_detector_->NotifyPaint();
}
void PaintTiming::Trace(blink::Visitor* visitor) {
visitor->Trace(fmp_detector_);
Supplement<Document>::Trace(visitor);
}
PaintTiming::PaintTiming(Document& document)
: Supplement<Document>(document),
fmp_detector_(new FirstMeaningfulPaintDetector(this, document)) {}
LocalFrame* PaintTiming::GetFrame() const {
return GetSupplementable()->GetFrame();
}
void PaintTiming::NotifyPaintTimingChanged() {
if (GetSupplementable()->Loader())
GetSupplementable()->Loader()->DidChangePerformanceTiming();
}
void PaintTiming::SetFirstPaint(TimeTicks stamp) {
if (!first_paint_.is_null())
return;
first_paint_ = stamp;
RegisterNotifySwapTime(PaintEvent::kFirstPaint);
}
void PaintTiming::SetFirstContentfulPaint(TimeTicks stamp) {
if (!first_contentful_paint_.is_null())
return;
SetFirstPaint(stamp);
first_contentful_paint_ = stamp;
RegisterNotifySwapTime(PaintEvent::kFirstContentfulPaint);
}
void PaintTiming::RegisterNotifySwapTime(PaintEvent event) {
RegisterNotifySwapTime(
event, CrossThreadBind(&PaintTiming::ReportSwapTime,
WrapCrossThreadWeakPersistent(this), event));
}
void PaintTiming::RegisterNotifySwapTime(PaintEvent event,
ReportTimeCallback callback) {
// ReportSwapTime on layerTreeView will queue a swap-promise, the callback is
// called when the swap for current render frame completes or fails to happen.
if (!GetFrame() || !GetFrame()->GetPage())
return;
if (WebLayerTreeView* layerTreeView =
GetFrame()->GetPage()->GetChromeClient().GetWebLayerTreeView(
GetFrame())) {
layerTreeView->NotifySwapTime(ConvertToBaseCallback(std::move(callback)));
}
}
void PaintTiming::ReportSwapTime(PaintEvent event,
WebLayerTreeView::SwapResult result,
base::TimeTicks timestamp) {
// If the swap fails for any reason, we use the timestamp when the SwapPromise
// was broken. |result| == WebLayerTreeView::SwapResult::kDidNotSwapSwapFails
// usually means the compositor decided not swap because there was no actual
// damage, which can happen when what's being painted isn't visible. In this
// case, the timestamp will be consistent with the case where the swap
// succeeds, as they both capture the time up to swap. In other failure cases
// (aborts during commit), this timestamp is an improvement over the blink
// paint time, but does not capture some time we're interested in, e.g. image
// decoding.
//
// TODO(crbug.com/738235): Consider not reporting any timestamp when failing
// for reasons other than kDidNotSwapSwapFails.
ReportSwapResultHistogram(result);
switch (event) {
case PaintEvent::kFirstPaint:
SetFirstPaintSwap(timestamp);
return;
case PaintEvent::kFirstContentfulPaint:
SetFirstContentfulPaintSwap(timestamp);
return;
case PaintEvent::kFirstTextPaint:
SetFirstTextPaintSwap(timestamp);
return;
case PaintEvent::kFirstImagePaint:
SetFirstImagePaintSwap(timestamp);
return;
default:
NOTREACHED();
}
}
void PaintTiming::SetFirstPaintSwap(TimeTicks stamp) {
DCHECK(first_paint_swap_.is_null());
first_paint_swap_ = stamp;
probe::paintTiming(GetSupplementable(), "firstPaint",
TimeTicksInSeconds(first_paint_swap_));
WindowPerformance* performance = GetPerformanceInstance(GetFrame());
if (performance)
performance->AddFirstPaintTiming(first_paint_swap_);
NotifyPaintTimingChanged();
}
void PaintTiming::SetFirstContentfulPaintSwap(TimeTicks stamp) {
DCHECK(first_contentful_paint_swap_.is_null());
first_contentful_paint_swap_ = stamp;
probe::paintTiming(GetSupplementable(), "firstContentfulPaint",
TimeTicksInSeconds(first_contentful_paint_swap_));
WindowPerformance* performance = GetPerformanceInstance(GetFrame());
if (performance)
performance->AddFirstContentfulPaintTiming(first_contentful_paint_swap_);
if (GetFrame())
GetFrame()->Loader().Progress().DidFirstContentfulPaint();
NotifyPaintTimingChanged();
fmp_detector_->NotifyFirstContentfulPaint(first_contentful_paint_swap_);
}
void PaintTiming::SetFirstTextPaintSwap(TimeTicks stamp) {
DCHECK(first_text_paint_swap_.is_null());
first_text_paint_swap_ = stamp;
probe::paintTiming(GetSupplementable(), "firstTextPaint",
TimeTicksInSeconds(first_text_paint_swap_));
NotifyPaintTimingChanged();
}
void PaintTiming::SetFirstImagePaintSwap(TimeTicks stamp) {
DCHECK(first_image_paint_swap_.is_null());
first_image_paint_swap_ = stamp;
probe::paintTiming(GetSupplementable(), "firstImagePaint",
TimeTicksInSeconds(first_image_paint_swap_));
NotifyPaintTimingChanged();
}
void PaintTiming::ReportSwapResultHistogram(
const WebLayerTreeView::SwapResult result) {
DEFINE_STATIC_LOCAL(EnumerationHistogram, did_swap_histogram,
("PageLoad.Internal.Renderer.PaintTiming.SwapResult",
WebLayerTreeView::SwapResult::kSwapResultMax));
did_swap_histogram.Count(result);
}
} // namespace blink
| 3,924 |
335 | {
"word": "Testament",
"definitions": [
"A person's will, especially the part relating to personal property.",
"Something that serves as a sign or evidence of a specified fact, event, or quality.",
"(in biblical use) a covenant or dispensation.",
"A division of the Bible.",
"A copy of the New Testament."
],
"parts-of-speech": "Noun"
} | 142 |
365 | /*******************************************************************************
* Copyright 2009-2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
#pragma once
#include "IReader.h"
#ifdef LIBSNDFILE_PLUGIN
#define AUD_BUILD_PLUGIN
#endif
/**
* @file SndFileReader.h
* @ingroup plugin
* The SndFileReader class.
*/
#include <string>
#include <sndfile.h>
#include <memory>
AUD_NAMESPACE_BEGIN
class Buffer;
/**
* This class reads a sound file via libsndfile.
*/
class AUD_PLUGIN_API SndFileReader : public IReader
{
private:
/**
* The current position in samples.
*/
int m_position;
/**
* The sample count in the file.
*/
int m_length;
/**
* Whether the file is seekable.
*/
bool m_seekable;
/**
* The specification of the audio data.
*/
Specs m_specs;
/**
* The sndfile.
*/
SNDFILE* m_sndfile;
/**
* The virtual IO structure for memory file reading.
*/
SF_VIRTUAL_IO m_vio;
/**
* The pointer to the memory file.
*/
std::shared_ptr<Buffer> m_membuffer;
/**
* The current reading pointer of the memory file.
*/
int m_memoffset;
// Functions for libsndfile virtual IO functionality
AUD_LOCAL static sf_count_t vio_get_filelen(void* user_data);
AUD_LOCAL static sf_count_t vio_seek(sf_count_t offset, int whence, void* user_data);
AUD_LOCAL static sf_count_t vio_read(void* ptr, sf_count_t count, void* user_data);
AUD_LOCAL static sf_count_t vio_tell(void* user_data);
// delete copy constructor and operator=
SndFileReader(const SndFileReader&) = delete;
SndFileReader& operator=(const SndFileReader&) = delete;
public:
/**
* Creates a new reader.
* \param filename The path to the file to be read.
* \exception Exception Thrown if the file specified does not exist or
* cannot be read with libsndfile.
*/
SndFileReader(std::string filename);
/**
* Creates a new reader.
* \param buffer The buffer to read from.
* \exception Exception Thrown if the buffer specified cannot be read
* with libsndfile.
*/
SndFileReader(std::shared_ptr<Buffer> buffer);
/**
* Destroys the reader and closes the file.
*/
virtual ~SndFileReader();
virtual bool isSeekable() const;
virtual void seek(int position);
virtual int getLength() const;
virtual int getPosition() const;
virtual Specs getSpecs() const;
virtual void read(int& length, bool& eos, sample_t* buffer);
};
AUD_NAMESPACE_END
| 994 |
852 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
rpcDCSSummary = DQMEDHarvester("RPCDCSSummary",
NumberOfEndcapDisks = cms.untracked.int32(4),
)
| 142 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.