max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
852 | import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagDijet_EventContent_cff import *
btagDijetOutputModuleAODSIM = cms.OutputModule("PoolOutputModule",
AODSIMbtagDijetEventContent,
btagDijetEventSelection,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('btagDijetAODSIM'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('btagDijetAODSIM.root')
)
| 188 |
1,640 | <gh_stars>1000+
package com.gitblit.wicket.pages;
import java.io.Serializable;
public class Language implements Serializable {
private static final long serialVersionUID = 1L;
final String name;
final String code;
public Language(String name, String code) {
this.name = name;
this.code = code;
}
@Override
public String toString() {
return name + " (" + code + ")";
}
} | 128 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package ifc.text;
import lib.MultiMethodTest;
import com.sun.star.text.XTextField;
/**
* Testing <code>com.sun.star.text.XTextField</code>
* interface methods :
* <ul>
* <li><code> getPresentation()</code></li>
* </ul> <p>
* Test is multithread compilant. <p>
* @see com.sun.star.text.XTextField
*/
public class _XTextField extends MultiMethodTest{
public XTextField oObj = null;
/**
* Calls the method with <code>true</code> and <code>false</code>
* parameter. <p>
* Has <b>OK</b> status if in both cases not <code>null</code>
* value returned.
*/
public void _getPresentation() {
boolean result = true;
// begin test here
log.println("getting presentetion with bShowCommand flag...");
result &= oObj.getPresentation(true) != null;
log.println("getting presentetion without bShowCommand flag...");
result &= oObj.getPresentation(false) != null;
tRes.tested( "getPresentation()", result );
} // end getPresentation()
}
| 604 |
336 | package com.github.rawls238.scientist4j.metrics;
import io.dropwizard.metrics5.MetricRegistry;
import io.dropwizard.metrics5.Timer.Context;
import java.util.Arrays;
public class DropwizardMetricsProvider implements MetricsProvider<MetricRegistry> {
private MetricRegistry registry;
public DropwizardMetricsProvider() {
this(new MetricRegistry());
}
public DropwizardMetricsProvider(MetricRegistry metricRegistry) {
this.registry = metricRegistry;
}
@Override
public Timer timer(String... nameComponents) {
final io.dropwizard.metrics5.Timer timer = registry.timer(MetricRegistry.name(nameComponents[0], Arrays.copyOfRange(nameComponents, 1, nameComponents.length)));
return new Timer() {
long duration;
@Override
public void record(Runnable runnable) {
final Context context = timer.time();
try {
runnable.run();
} finally {
duration = context.stop();
}
}
@Override
public long getDuration() {
return duration;
}
};
}
@Override
public Counter counter(String... nameComponents) {
final io.dropwizard.metrics5.Counter counter = registry.counter(MetricRegistry.name(nameComponents[0], Arrays.copyOfRange(nameComponents, 1, nameComponents.length)));
return new Counter() {
@Override
public void increment() {
counter.inc();
}
};
}
@Override
public MetricRegistry getRegistry() {
return this.registry;
}
@Override
public void setRegistry(MetricRegistry registry) {
this.registry = registry;
}
}
| 795 |
552 | <filename>parlai/crowdsourcing/tasks/model_chat/utils.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import base64
import datetime
import json
import os
import random
import threading
import time
import unittest
from io import BytesIO
from typing import Any, Dict, Optional, Tuple
from PIL import Image
from parlai.core.message import Message
from parlai.core.metrics import Metric
from parlai.core.params import ParlaiParser
from parlai.crowdsourcing.utils.tests import AbstractParlAIChatTest
from parlai.tasks.blended_skill_talk.agents import ContextGenerator
class Compatibility(object):
"""
Class to address backward compatibility issues with older ParlAI models.
"""
@staticmethod
def backward_compatible_force_set(act, key, value):
if isinstance(act, Message):
act.force_set(key, value)
elif isinstance(act, dict):
act[key] = value
else:
raise Exception(f'Unknown type of act: {type(act)}')
return act
@staticmethod
def maybe_fix_act(incompatible_act):
if 'id' not in incompatible_act:
new_act = Compatibility.backward_compatible_force_set(
incompatible_act, 'id', 'NULL_ID'
)
return new_act
return incompatible_act
@staticmethod
def serialize_bot_message(bot_message):
if 'metrics' in bot_message:
metric_report = bot_message['metrics']
bot_message['metrics'] = {
k: v.value() if isinstance(v, Metric) else v
for k, v in metric_report.items()
}
return bot_message
class ImageStack:
"""
Represents a stack of images to run through.
Each element of the stack contains a list of the workers who have seen the given
image for a given model. The stack ensures that no worker will see the same image
twice.
"""
def __init__(self, opt):
# Input params
self.num_images = opt['num_images']
self.models = opt['models']
self.evals_per_combo = opt.get('evals_per_image_model_combo', 1)
# Paths
self.save_folder = opt['stack_folder']
self.save_name = 'stack.json'
self.backup_save_folder = os.path.join(self.save_folder, '_stack_backups')
for folder in [self.save_folder, self.backup_save_folder]:
os.makedirs(folder, exist_ok=True)
self.save_path = os.path.join(self.save_folder, self.save_name)
# Saving params
self.save_stack_interval = 60
self.last_save_time = time.time()
self.save_lock = threading.RLock()
self.next_image_lock = threading.RLock()
# Things that will be defined later
self.stack = None
self.pointer = self.build_or_load_stack()
self.conditionally_save_stack()
def load_stack(self) -> int:
print(f'[ Loading stack from file... {self.save_path}]')
with open(self.save_path, 'r') as f:
self.stack = json.load(f)
pointer = self.get_pointer()
# Check that the number of images is the same as before
if len(self.stack) != self.num_images:
raise ValueError(
f'The loaded stack has {len(self.stack):d} images instead of the '
f'desired {self.num_images:d}!'
)
# Make sure that the set of models is correct (i.e. in case we are loading in an
# older obsolete version of the stack)
if set(self.stack[0].keys()) == set(self.models):
return pointer
else:
input_ = input(
'\n\nWARNING: the currently saved stack has a different set of test '
'cases than what is currently being used. Do you want to back up this '
'stack file and stretch the stack to fit the new set of models? '
'(y/n) '
)
if input_.lower().strip() == 'y':
self.save_stack_backup()
return self.stretch_stack()
else:
raise ValueError('Mismatch in set of models in stack!')
def get_pointer(self) -> int:
"""
Return the index of the first entry in the stack that needs more conversations.
"""
pointer = 0
found = False
while not found:
if self._need_more_convos(self.stack[pointer]):
found = True
else:
pointer += 1
return pointer
def stretch_stack(self) -> int:
"""
"Stretch" the stack to handle the current set of models.
The goal is to preserve as many existing stack entries as possible while
matching the set of models in the stack with the new set of models in
self.models:
- (1) All stack entries belonging to models that are still in self.models will
be kept
- (2) All models not in self.models will be removed from the stack
- (3) All models in self.models not in the stack will be added to the stack
Return the new pointer value.
"""
# Stretch the stack
existing_models = set(self.stack[0].keys())
new_models = set(self.models)
models_to_add = new_models.difference(existing_models)
models_to_remove = existing_models.difference(new_models)
print('\nStarting to stretch the stack.')
print('Models to add: ', models_to_add)
print('Models to remove: ', models_to_remove)
models_to_add_list = sorted(list(models_to_add))
for stack_idx, orig_workers_by_model in enumerate(self.stack):
surviving_workers_by_model = {
model: workers
for model, workers in orig_workers_by_model.items()
if model in new_models
}
new_workers_by_model = {model: [] for model in models_to_add_list}
self.stack[stack_idx] = {
**surviving_workers_by_model,
**new_workers_by_model,
}
assert set(self.stack[stack_idx]) == new_models
pointer = self.get_pointer()
return pointer
def conditionally_save_stack(self):
if time.time() - self.last_save_time > self.save_stack_interval:
self.save_stack()
def save_stack(self):
"""
Save the stack to its regular location.
Mark down the save time.
"""
self._save_stack_to_path(self.save_path)
self.last_save_time = time.time()
def save_stack_backup(self):
"""
Save a backup copy of the stack to a path with a datetime suffix.
"""
suffix = datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
backup_path = os.path.join(
self.backup_save_folder, f'{self.save_name}.{suffix}'
)
self._save_stack_to_path(backup_path)
def _save_stack_to_path(self, path: str):
"""
Save stack to the specified path.
"""
with self.save_lock:
print(f'Saving all data to {path}.')
data = json.dumps(self.stack)
with open(path, 'w') as f:
f.write(data)
def _need_more_convos(self, workers_by_model: Dict[str, list]) -> bool:
"""
Returns True if, for the given image, we need at least 1 more conversation with
any of the models that we're testing.
"""
return any(
len(workers) < self.evals_per_combo for workers in workers_by_model.values()
)
def build_stack(self) -> int:
print('[ Building stack... ]')
self.stack = [
{model: [] for model in self.models} for _ in range(self.num_images)
]
return 0 # The pointer starts at 0
def build_or_load_stack(self) -> int:
# Check if this stack has been partially completed
if os.path.isfile(self.save_path):
return self.load_stack()
else:
return self.build_stack()
def _get_stack_entry(self, idx: int) -> Optional[Dict[str, Any]]:
"""
Return a stack entry if the input index is less than the length of the stack;
return None otherwise.
"""
if idx < len(self.stack):
return self.stack[idx]
else:
return None
def get_next_image(self, worker: str) -> Tuple[int, str, bool]:
"""
Returns the image name, persona strings, model name, etc. for the next HIT.
Finds an image that we don't currently have enough conversations for, ensuring
that the given worker will not have had a conversation employing this image
before, with any model. Returns the index of the given image, the name of the
model with which to have a conversation, and a flag indicating whether there are
no more image pairs to show this worker.
"""
with self.next_image_lock:
no_more_work = False
# Find the next entry in the stack that needs more workers
workers_by_model = self._get_stack_entry(self.pointer)
while workers_by_model is not None and not self._need_more_convos(
workers_by_model
):
self.pointer += 1
print(f'Pointer at {self.pointer}')
workers_by_model = self._get_stack_entry(self.pointer)
# Find the next entry in the stack that the worker hasn't completed before
worker_pointer = self.pointer
while workers_by_model is not None and (
any(worker in workers for workers in workers_by_model.values())
or not self._need_more_convos(workers_by_model)
):
print(f'Pointer for worker {worker} at {self.pointer}')
worker_pointer += 1
workers_by_model = self._get_stack_entry(worker_pointer)
# Deal with the case in which no entry is suitable for the worker
if workers_by_model is None:
print(f'WARNING: getting a random stack for worker {worker}.')
worker_pointer = random.randrange(len(self.stack))
workers_by_model = self.stack[worker_pointer]
no_more_work = True
# We'll want to assign this worker a qualification to prevent more work
self.conditionally_save_stack()
# Pick out a model for this worker, among the ones that we need more
# conversations for
available_models = [
model
for model, workers in workers_by_model.items()
if len(workers) < self.evals_per_combo
]
if len(available_models) == 0:
print(
f'WARNING: no more convos needed for any model for '
f'{worker_pointer:d}. Picking a random model for worker '
f'{worker}.'
)
available_models = list(workers_by_model.keys())
print(f'Available models: ' + ', '.join(available_models))
chosen_model = random.choice(available_models)
print(
f'Retrieving stack {worker_pointer:d} for worker {worker} and test '
f'case {chosen_model}.'
)
workers_by_model[chosen_model].append(worker)
return worker_pointer, chosen_model, no_more_work
def remove_worker_from_stack(self, worker: str, stack_idx: int):
if any(worker in workers for workers in self.stack[stack_idx].values()):
removed = False
print(f'Removing worker {worker} from stack {stack_idx:d}.')
for this_models_workers in self.stack[stack_idx].values():
if worker in this_models_workers:
this_models_workers.remove(worker)
removed = True
assert removed is True
if stack_idx < self.pointer:
print(f'Moving pointer from {self.pointer:d} to {stack_idx:d}.')
self.pointer = stack_idx
else:
raise ValueError(f'Worker {worker} not found in stack {stack_idx:d}!')
class AbstractModelChatTest(AbstractParlAIChatTest, unittest.TestCase):
"""
Abstract test class for testing model chat code.
"""
def _check_output_key(self, key: str, actual_value: Any, expected_value: Any):
"""
Special logic for handling the 'final_chat_data' key.
"""
if key == 'final_chat_data':
self._check_final_chat_data(
actual_value=actual_value, expected_value=expected_value
)
else:
super()._check_output_key(
key=key, actual_value=actual_value, expected_value=expected_value
)
def _check_final_chat_data(
self, actual_value: Dict[str, Any], expected_value: Dict[str, Any]
):
"""
Check the actual and expected values of the final chat data.
"""
for key_inner, expected_value_inner in expected_value.items():
if key_inner == 'dialog':
assert len(actual_value[key_inner]) == len(expected_value_inner)
for actual_message, expected_message in zip(
actual_value[key_inner], expected_value_inner
):
self.assertEqual(
{k: v for k, v in actual_message.items() if k != 'message_id'},
{
k: v
for k, v in expected_message.items()
if k != 'message_id'
},
)
elif key_inner == 'task_description':
for (key_inner2, expected_value_inner2) in expected_value_inner.items():
if key_inner2 == 'model_file':
pass
# The path to the model file depends on the random
# tmpdir
elif key_inner2 == 'model_opt':
keys_to_ignore = [
'datapath',
'dict_file',
'model_file',
'override',
'parlai_home',
'starttime',
]
# These paths depend on the random tmpdir and the host
# machine
for (
key_inner3,
expected_value_inner3,
) in expected_value_inner2.items():
if key_inner3 in keys_to_ignore:
pass
else:
self.assertEqual(
actual_value[key_inner][key_inner2][key_inner3],
expected_value_inner3,
f'Error in key {key_inner3}!',
)
else:
self.assertEqual(
actual_value[key_inner][key_inner2],
expected_value_inner2,
f'Error in key {key_inner2}!',
)
else:
self.assertEqual(
actual_value[key_inner],
expected_value_inner,
f'Error in key {key_inner}!',
)
def get_context_generator(
override_opt: Optional[Dict[str, Any]] = None
) -> ContextGenerator:
"""
Return an object to return BlendedSkillTalk-style context info (personas, etc.).
"""
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
if override_opt is not None:
argparser.set_params(**override_opt)
opt = argparser.parse_args([])
context_generator = ContextGenerator(opt, datatype='test', seed=0)
# We pull from the test set so that the model can't regurgitate
# memorized conversations
return context_generator
def get_image_src(
image: Optional[Image.Image] = None, path: Optional[str] = None
) -> str:
"""
Given an image or the path to an image, return a string of the encoded image that
can be used as the src field in an HTML img tag.
"""
if image is None:
image = Image.open(path)
rgb_image = image.convert('RGB')
buffered = BytesIO()
rgb_image.save(buffered, format='JPEG')
encoded = str(base64.b64encode(buffered.getvalue()).decode('ascii'))
image_src = 'data:image/jpeg;base64,' + encoded
return image_src
| 7,988 |
3,787 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: statistic-meta.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='statistic-meta.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\022StatisticMetaProto',
serialized_pb=b'\n\x14statistic-meta.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"e\n\rStatisticMeta\x12\x12\n\nstatistics\x18\x01 \x03(\t\x12\x16\n\x0estatic_columns\x18\x02 \x03(\t\x12\x16\n\x0equantile_error\x18\x03 \x01(\x01\x12\x10\n\x08need_run\x18\x04 \x01(\x08\x42\x14\x42\x12StatisticMetaProtob\x06proto3'
)
_STATISTICMETA = _descriptor.Descriptor(
name='StatisticMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='statistics', full_name='com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta.statistics', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='static_columns', full_name='com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta.static_columns', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantile_error', full_name='com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta.quantile_error', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='need_run', full_name='com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta.need_run', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=165,
)
DESCRIPTOR.message_types_by_name['StatisticMeta'] = _STATISTICMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StatisticMeta = _reflection.GeneratedProtocolMessageType('StatisticMeta', (_message.Message,), {
'DESCRIPTOR' : _STATISTICMETA,
'__module__' : 'statistic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.StatisticMeta)
})
_sym_db.RegisterMessage(StatisticMeta)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1,403 |
2,151 | /*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/md5_helper.h"
#include "test/util.h"
#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
#endif
namespace {
#if CONFIG_WEBM_IO
const int kLegacyByteAlignment = 0;
const int kLegacyYPlaneByteAlignment = 32;
const int kNumPlanesToCheck = 3;
const char kVP9TestFile[] = "vp90-2-02-size-lf-1920x1080.webm";
const char kVP9Md5File[] = "vp90-2-02-size-lf-1920x1080.webm.md5";
struct ByteAlignmentTestParam {
int byte_alignment;
vpx_codec_err_t expected_value;
bool decode_remaining;
};
const ByteAlignmentTestParam kBaTestParams[] = {
{ kLegacyByteAlignment, VPX_CODEC_OK, true },
{ 32, VPX_CODEC_OK, true },
{ 64, VPX_CODEC_OK, true },
{ 128, VPX_CODEC_OK, true },
{ 256, VPX_CODEC_OK, true },
{ 512, VPX_CODEC_OK, true },
{ 1024, VPX_CODEC_OK, true },
{ 1, VPX_CODEC_INVALID_PARAM, false },
{ -2, VPX_CODEC_INVALID_PARAM, false },
{ 4, VPX_CODEC_INVALID_PARAM, false },
{ 16, VPX_CODEC_INVALID_PARAM, false },
{ 255, VPX_CODEC_INVALID_PARAM, false },
{ 2048, VPX_CODEC_INVALID_PARAM, false },
};
// Class for testing byte alignment of reference buffers.
class ByteAlignmentTest
: public ::testing::TestWithParam<ByteAlignmentTestParam> {
protected:
ByteAlignmentTest() : video_(NULL), decoder_(NULL), md5_file_(NULL) {}
virtual void SetUp() {
video_ = new libvpx_test::WebMVideoSource(kVP9TestFile);
ASSERT_TRUE(video_ != NULL);
video_->Init();
video_->Begin();
const vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
OpenMd5File(kVP9Md5File);
}
virtual void TearDown() {
if (md5_file_ != NULL) fclose(md5_file_);
delete decoder_;
delete video_;
}
void SetByteAlignment(int byte_alignment, vpx_codec_err_t expected_value) {
decoder_->Control(VP9_SET_BYTE_ALIGNMENT, byte_alignment, expected_value);
}
vpx_codec_err_t DecodeOneFrame(int byte_alignment_to_check) {
const vpx_codec_err_t res =
decoder_->DecodeFrame(video_->cxdata(), video_->frame_size());
CheckDecodedFrames(byte_alignment_to_check);
if (res == VPX_CODEC_OK) video_->Next();
return res;
}
vpx_codec_err_t DecodeRemainingFrames(int byte_alignment_to_check) {
for (; video_->cxdata() != NULL; video_->Next()) {
const vpx_codec_err_t res =
decoder_->DecodeFrame(video_->cxdata(), video_->frame_size());
if (res != VPX_CODEC_OK) return res;
CheckDecodedFrames(byte_alignment_to_check);
}
return VPX_CODEC_OK;
}
private:
// Check if |data| is aligned to |byte_alignment_to_check|.
// |byte_alignment_to_check| must be a power of 2.
void CheckByteAlignment(const uint8_t *data, int byte_alignment_to_check) {
ASSERT_EQ(0u, reinterpret_cast<size_t>(data) % byte_alignment_to_check);
}
// Iterate through the planes of the decoded frames and check for
// alignment based off |byte_alignment_to_check|.
void CheckDecodedFrames(int byte_alignment_to_check) {
libvpx_test::DxDataIterator dec_iter = decoder_->GetDxData();
const vpx_image_t *img;
// Get decompressed data
while ((img = dec_iter.Next()) != NULL) {
if (byte_alignment_to_check == kLegacyByteAlignment) {
CheckByteAlignment(img->planes[0], kLegacyYPlaneByteAlignment);
} else {
for (int i = 0; i < kNumPlanesToCheck; ++i) {
CheckByteAlignment(img->planes[i], byte_alignment_to_check);
}
}
CheckMd5(*img);
}
}
// TODO(fgalligan): Move the MD5 testing code into another class.
void OpenMd5File(const std::string &md5_file_name_) {
md5_file_ = libvpx_test::OpenTestDataFile(md5_file_name_);
ASSERT_TRUE(md5_file_ != NULL)
<< "MD5 file open failed. Filename: " << md5_file_name_;
}
void CheckMd5(const vpx_image_t &img) {
ASSERT_TRUE(md5_file_ != NULL);
char expected_md5[33];
char junk[128];
// Read correct md5 checksums.
const int res = fscanf(md5_file_, "%s %s", expected_md5, junk);
ASSERT_NE(EOF, res) << "Read md5 data failed";
expected_md5[32] = '\0';
::libvpx_test::MD5 md5_res;
md5_res.Add(&img);
const char *const actual_md5 = md5_res.Get();
// Check md5 match.
ASSERT_STREQ(expected_md5, actual_md5) << "MD5 checksums don't match";
}
libvpx_test::WebMVideoSource *video_;
libvpx_test::VP9Decoder *decoder_;
FILE *md5_file_;
};
TEST_F(ByteAlignmentTest, SwitchByteAlignment) {
const int num_elements = 14;
const int byte_alignments[] = { 0, 32, 64, 128, 256, 512, 1024,
0, 1024, 32, 512, 64, 256, 128 };
for (int i = 0; i < num_elements; ++i) {
SetByteAlignment(byte_alignments[i], VPX_CODEC_OK);
ASSERT_EQ(VPX_CODEC_OK, DecodeOneFrame(byte_alignments[i]));
}
SetByteAlignment(byte_alignments[0], VPX_CODEC_OK);
ASSERT_EQ(VPX_CODEC_OK, DecodeRemainingFrames(byte_alignments[0]));
}
TEST_P(ByteAlignmentTest, TestAlignment) {
const ByteAlignmentTestParam t = GetParam();
SetByteAlignment(t.byte_alignment, t.expected_value);
if (t.decode_remaining) {
ASSERT_EQ(VPX_CODEC_OK, DecodeRemainingFrames(t.byte_alignment));
}
}
INSTANTIATE_TEST_CASE_P(Alignments, ByteAlignmentTest,
::testing::ValuesIn(kBaTestParams));
#endif // CONFIG_WEBM_IO
} // namespace
| 2,461 |
11,750 | import _plotly_utils.basevalidators
class ChoroplethmapboxValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="choroplethmapbox",
parent_name="layout.template.data",
**kwargs
):
super(ChoroplethmapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Choroplethmapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
| 314 |
17,703 | <filename>test/common/buffer/zero_copy_input_stream_test.cc<gh_stars>1000+
#include "source/common/buffer/buffer_impl.h"
#include "source/common/buffer/zero_copy_input_stream_impl.h"
#include "test/common/buffer/utility.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace Envoy {
namespace Buffer {
namespace {
class ZeroCopyInputStreamTest : public testing::Test {
public:
ZeroCopyInputStreamTest() {
Buffer::OwnedImpl buffer{"abcd"};
stream_.move(buffer);
}
std::string slice_data_{"abcd"};
ZeroCopyInputStreamImpl stream_;
const void* data_;
int size_;
};
TEST_F(ZeroCopyInputStreamTest, Move) {
Buffer::OwnedImpl buffer{"abcd"};
stream_.move(buffer);
EXPECT_EQ(0, buffer.length());
}
TEST_F(ZeroCopyInputStreamTest, Next) {
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, size_);
EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_));
}
TEST_F(ZeroCopyInputStreamTest, TwoSlices) {
// Make content larger than 512 bytes so it would not be coalesced when
// moved into the stream_ buffer.
Buffer::OwnedImpl buffer(std::string(1024, 'A'));
stream_.move(buffer);
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, size_);
EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_));
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(1024, size_);
EXPECT_THAT(absl::string_view(static_cast<const char*>(data_), size_),
testing::Each(testing::AllOf('A')));
}
TEST_F(ZeroCopyInputStreamTest, BackUp) {
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, size_);
stream_.BackUp(3);
EXPECT_EQ(1, stream_.ByteCount());
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(3, size_);
EXPECT_EQ(0, memcmp("bcd", data_, size_));
EXPECT_EQ(4, stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamTest, BackUpFull) {
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, size_);
stream_.BackUp(4);
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, size_);
EXPECT_EQ(0, memcmp("abcd", data_, size_));
EXPECT_EQ(4, stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamTest, ByteCount) {
EXPECT_EQ(0, stream_.ByteCount());
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(4, stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamTest, Finish) {
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(0, size_);
stream_.finish();
EXPECT_FALSE(stream_.Next(&data_, &size_));
}
class ZeroCopyInputStreamSkipTest : public testing::Test {
public:
ZeroCopyInputStreamSkipTest() {
Buffer::OwnedImpl buffer;
buffer.addBufferFragment(buffer1_);
buffer.addBufferFragment(buffer2_);
buffer.addBufferFragment(buffer3_);
buffer.addBufferFragment(buffer4_);
stream_.move(buffer);
}
const std::string slice1_{"This is the first slice of the message."};
const std::string slice2_{"This is the second slice of the message."};
const std::string slice3_{"This is the third slice of the message."};
const std::string slice4_{"This is the fourth slice of the message."};
BufferFragmentImpl buffer1_{slice1_.data(), slice1_.size(), nullptr};
BufferFragmentImpl buffer2_{slice2_.data(), slice2_.size(), nullptr};
BufferFragmentImpl buffer3_{slice3_.data(), slice3_.size(), nullptr};
BufferFragmentImpl buffer4_{slice4_.data(), slice4_.size(), nullptr};
const size_t total_bytes_{slice1_.size() + slice2_.size() + slice3_.size() + slice4_.size()};
ZeroCopyInputStreamImpl stream_;
const void* data_;
int size_;
// Convert data_ buffer into a string
absl::string_view dataString() const {
return absl::string_view{reinterpret_cast<const char*>(data_), static_cast<size_t>(size_)};
}
};
TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstPartialSlice) {
// Only skip the 10 bytes in the first slice.
constexpr int skip_count = 10;
EXPECT_TRUE(stream_.Skip(skip_count));
EXPECT_EQ(skip_count, stream_.ByteCount());
// Read the first slice
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice1_.size() - skip_count, size_);
EXPECT_EQ(slice1_.substr(skip_count), dataString());
EXPECT_EQ(slice1_.size(), stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstFullSlice) {
// Skip the full first slice
EXPECT_TRUE(stream_.Skip(slice1_.size()));
EXPECT_EQ(slice1_.size(), stream_.ByteCount());
// Read the second slice
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice2_.size(), size_);
EXPECT_EQ(slice2_, dataString());
EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamSkipTest, BackUpAndSkipToEndOfSlice) {
// Read the first slice, backUp 10 byes, skip 10 bytes to the end of the first slice.
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice1_.size(), size_);
EXPECT_EQ(slice1_, dataString());
constexpr int backup_count = 10;
stream_.BackUp(backup_count);
EXPECT_TRUE(stream_.Skip(backup_count));
EXPECT_EQ(slice1_.size(), stream_.ByteCount());
// Next read is the second slice
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice2_.size(), size_);
EXPECT_EQ(slice2_, dataString());
EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossTwoSlices) {
// Read the first slice, backUp 10 byes, skip 15 bytes; 5 bytes into the second slice.
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice1_.size(), size_);
EXPECT_EQ(slice1_, dataString());
constexpr int backup_count = 10; // the backup bytes to the end of first slice.
constexpr int skip_count = 5; // The skip bytes in the second slice
stream_.BackUp(backup_count);
EXPECT_TRUE(stream_.Skip(backup_count + skip_count));
EXPECT_EQ(slice1_.size() + skip_count, stream_.ByteCount());
// Read the remain second slice
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice2_.size() - skip_count, size_);
EXPECT_EQ(slice2_.substr(skip_count), dataString());
EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossThreeSlices) {
// Read the first slice, backUp 10 byes, skip 10 + slice2.size + 5; 5 bytes into the third slice.
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice1_.size(), size_);
EXPECT_EQ(slice1_, dataString());
constexpr int backup_count = 10; // the backup bytes to the end of first slice.
constexpr int skip_count = 5; // The skip bytes in the third slice
stream_.BackUp(backup_count);
EXPECT_TRUE(stream_.Skip(backup_count + slice2_.size() + skip_count));
EXPECT_EQ(slice1_.size() + slice2_.size() + skip_count, stream_.ByteCount());
// Read the remain third slice
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice3_.size() - skip_count, size_);
EXPECT_EQ(slice3_.substr(skip_count), dataString());
EXPECT_EQ(slice1_.size() + slice2_.size() + slice3_.size(), stream_.ByteCount());
}
TEST_F(ZeroCopyInputStreamSkipTest, SkipToEndOfBuffer) {
// Failed to skip one extra byte
EXPECT_FALSE(stream_.Skip(total_bytes_ + 1));
EXPECT_TRUE(stream_.Skip(total_bytes_));
EXPECT_EQ(total_bytes_, stream_.ByteCount());
// Failed to skip one extra byte
EXPECT_FALSE(stream_.Skip(1));
}
TEST_F(ZeroCopyInputStreamSkipTest, ReadFirstSkipToTheEnd) {
// Read the first slice, backUp 10 byes, skip to the end of buffer
EXPECT_TRUE(stream_.Next(&data_, &size_));
EXPECT_EQ(slice1_.size(), size_);
EXPECT_EQ(slice1_, dataString());
constexpr int backup_count = 10; // the backup bytes to the end of first slice.
stream_.BackUp(backup_count);
EXPECT_TRUE(stream_.Skip(total_bytes_ - slice1_.size() + backup_count));
EXPECT_EQ(total_bytes_, stream_.ByteCount());
// Failed to skip one extra byte
EXPECT_FALSE(stream_.Skip(1));
}
} // namespace
} // namespace Buffer
} // namespace Envoy
| 2,891 |
575 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_MARKER_MARKER_CONTROLLER_H_
#define ASH_MARKER_MARKER_CONTROLLER_H_
#include "ash/ash_export.h"
#include "ash/fast_ink/fast_ink_pointer_controller.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
#include "base/observer_list_types.h"
#include "ui/views/widget/unique_widget_ptr.h"
namespace ash {
class HighlighterView;
// A checked observer which receives notification of changes to the marker
// activation state.
class ASH_EXPORT MarkerObserver : public base::CheckedObserver {
public:
virtual void OnMarkerStateChanged(bool enabled) {}
};
// Controller for the Marker functionality. Enables/disables Marker as well as
// receives points and passes them off to be rendered.
class ASH_EXPORT MarkerController : public fast_ink::FastInkPointerController {
public:
MarkerController();
MarkerController(const MarkerController&) = delete;
MarkerController& operator=(const MarkerController&) = delete;
~MarkerController() override;
static MarkerController* Get();
// Adds/removes the specified `observer`.
void AddObserver(MarkerObserver* observer);
void RemoveObserver(MarkerObserver* observer);
// Clears marker pointer.
void Clear();
// fast_ink::FastInkPointerController:
void SetEnabled(bool enabled) override;
private:
friend class MarkerControllerTestApi;
// Destroys `marker_view_widget_`, if it exists.
void DestroyMarkerView();
// Returns the marker view in use, or nullptr.
// TODO(llin): Consider renaming HighlighterView to DrawingView.
HighlighterView* GetMarkerView();
// Notifies observers when state changed.
void NotifyStateChanged(bool enabled);
// fast_ink::FastInkPointerController:
views::View* GetPointerView() const override;
void CreatePointerView(base::TimeDelta presentation_delay,
aura::Window* root_window) override;
void UpdatePointerView(ui::TouchEvent* event) override;
void UpdatePointerView(ui::MouseEvent* event) override;
void DestroyPointerView() override;
bool CanStartNewGesture(ui::LocatedEvent* event) override;
bool ShouldProcessEvent(ui::LocatedEvent* event) override;
// `marker_view_widget_` will only hold an instance when the Marker is enabled
// and activated (pressed or dragged) and until cleared.
views::UniqueWidgetPtr marker_view_widget_;
HighlighterView* marker_view_ = nullptr;
base::ObserverList<MarkerObserver> observers_;
base::WeakPtrFactory<MarkerController> weak_factory_{this};
};
} // namespace ash
#endif // ASH_MARKER_MARKER_CONTROLLER_H_
| 847 |
3,017 | <filename>sagan-site/src/main/java/sagan/site/webapi/repository/RepositoryMetadataController.java
package sagan.site.webapi.repository;
import java.util.Arrays;
import sagan.site.projects.Repository;
import sagan.site.support.ResourceNotFoundException;
import org.springframework.hateoas.CollectionModel;
import org.springframework.hateoas.EntityModel;
import org.springframework.hateoas.MediaTypes;
import org.springframework.hateoas.server.ExposesResourceFor;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
/**
*
*/
@RestController
@RequestMapping(path = "/api/repositories", produces = MediaTypes.HAL_JSON_VALUE)
@ExposesResourceFor(RepositoryMetadata.class)
public class RepositoryMetadataController {
private final RepositoryMetadataAssembler resourceAssembler;
public RepositoryMetadataController(RepositoryMetadataAssembler resourceAssembler) {
this.resourceAssembler = resourceAssembler;
}
@GetMapping("")
public CollectionModel<RepositoryMetadata> listRepositories() {
return this.resourceAssembler.toCollectionModel(Arrays.asList(Repository.values()));
}
@GetMapping("/{id}")
public EntityModel<RepositoryMetadata> showRepository(@PathVariable String id) {
Repository repository = Arrays.stream(Repository.values())
.filter(r -> r.getId().equals(id))
.findFirst()
.orElseThrow(() -> new ResourceNotFoundException("No artifact repository found with id: " + id));
return EntityModel.of(this.resourceAssembler.toModel(repository));
}
}
| 525 |
15,577 | #include <Access/SettingsProfilesCache.h>
#include <Access/AccessControlManager.h>
#include <Access/SettingsProfile.h>
#include <Access/SettingsProfilesInfo.h>
#include <Common/quoteString.h>
namespace DB
{
namespace ErrorCodes
{
extern const int THERE_IS_NO_PROFILE;
}
SettingsProfilesCache::SettingsProfilesCache(const AccessControlManager & manager_)
: manager(manager_) {}
SettingsProfilesCache::~SettingsProfilesCache() = default;
void SettingsProfilesCache::ensureAllProfilesRead()
{
/// `mutex` is already locked.
if (all_profiles_read)
return;
all_profiles_read = true;
subscription = manager.subscribeForChanges<SettingsProfile>(
[&](const UUID & id, const AccessEntityPtr & entity)
{
if (entity)
profileAddedOrChanged(id, typeid_cast<SettingsProfilePtr>(entity));
else
profileRemoved(id);
});
for (const UUID & id : manager.findAll<SettingsProfile>())
{
auto profile = manager.tryRead<SettingsProfile>(id);
if (profile)
{
all_profiles.emplace(id, profile);
profiles_by_name[profile->getName()] = id;
}
}
}
void SettingsProfilesCache::profileAddedOrChanged(const UUID & profile_id, const SettingsProfilePtr & new_profile)
{
std::lock_guard lock{mutex};
auto it = all_profiles.find(profile_id);
if (it == all_profiles.end())
{
all_profiles.emplace(profile_id, new_profile);
profiles_by_name[new_profile->getName()] = profile_id;
}
else
{
auto old_profile = it->second;
it->second = new_profile;
if (old_profile->getName() != new_profile->getName())
profiles_by_name.erase(old_profile->getName());
profiles_by_name[new_profile->getName()] = profile_id;
}
profile_infos_cache.clear();
mergeSettingsAndConstraints();
}
void SettingsProfilesCache::profileRemoved(const UUID & profile_id)
{
std::lock_guard lock{mutex};
auto it = all_profiles.find(profile_id);
if (it == all_profiles.end())
return;
profiles_by_name.erase(it->second->getName());
all_profiles.erase(it);
profile_infos_cache.clear();
mergeSettingsAndConstraints();
}
void SettingsProfilesCache::setDefaultProfileName(const String & default_profile_name)
{
std::lock_guard lock{mutex};
ensureAllProfilesRead();
if (default_profile_name.empty())
{
default_profile_id = {};
return;
}
auto it = profiles_by_name.find(default_profile_name);
if (it == profiles_by_name.end())
throw Exception("Settings profile " + backQuote(default_profile_name) + " not found", ErrorCodes::THERE_IS_NO_PROFILE);
default_profile_id = it->second;
}
void SettingsProfilesCache::mergeSettingsAndConstraints()
{
/// `mutex` is already locked.
for (auto i = enabled_settings.begin(), e = enabled_settings.end(); i != e;)
{
auto enabled = i->second.lock();
if (!enabled)
i = enabled_settings.erase(i);
else
{
mergeSettingsAndConstraintsFor(*enabled);
++i;
}
}
}
void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & enabled) const
{
SettingsProfileElements merged_settings;
if (default_profile_id)
{
SettingsProfileElement new_element;
new_element.parent_profile = *default_profile_id;
merged_settings.emplace_back(new_element);
}
for (const auto & [profile_id, profile] : all_profiles)
if (profile->to_roles.match(enabled.params.user_id, enabled.params.enabled_roles))
{
SettingsProfileElement new_element;
new_element.parent_profile = profile_id;
merged_settings.emplace_back(new_element);
}
merged_settings.merge(enabled.params.settings_from_enabled_roles);
merged_settings.merge(enabled.params.settings_from_user);
auto info = std::make_shared<SettingsProfilesInfo>(manager);
info->profiles = enabled.params.settings_from_user.toProfileIDs();
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
info->settings = merged_settings.toSettingsChanges();
info->constraints = merged_settings.toSettingsConstraints(manager);
enabled.setInfo(std::move(info));
}
void SettingsProfilesCache::substituteProfiles(
SettingsProfileElements & elements,
std::vector<UUID> & substituted_profiles,
std::unordered_map<UUID, String> & names_of_substituted_profiles) const
{
/// We should substitute profiles in reversive order because the same profile can occur
/// in `elements` multiple times (with some other settings in between) and in this case
/// the last occurrence should override all the previous ones.
boost::container::flat_set<UUID> substituted_profiles_set;
size_t i = elements.size();
while (i != 0)
{
auto & element = elements[--i];
if (!element.parent_profile)
continue;
auto profile_id = *element.parent_profile;
element.parent_profile.reset();
if (substituted_profiles_set.count(profile_id))
continue;
auto profile_it = all_profiles.find(profile_id);
if (profile_it == all_profiles.end())
continue;
const auto & profile = profile_it->second;
const auto & profile_elements = profile->elements;
elements.insert(elements.begin() + i, profile_elements.begin(), profile_elements.end());
i += profile_elements.size();
substituted_profiles.push_back(profile_id);
substituted_profiles_set.insert(profile_id);
names_of_substituted_profiles.emplace(profile_id, profile->getName());
}
std::reverse(substituted_profiles.begin(), substituted_profiles.end());
}
std::shared_ptr<const EnabledSettings> SettingsProfilesCache::getEnabledSettings(
const UUID & user_id,
const SettingsProfileElements & settings_from_user,
const boost::container::flat_set<UUID> & enabled_roles,
const SettingsProfileElements & settings_from_enabled_roles)
{
std::lock_guard lock{mutex};
ensureAllProfilesRead();
EnabledSettings::Params params;
params.user_id = user_id;
params.settings_from_user = settings_from_user;
params.enabled_roles = enabled_roles;
params.settings_from_enabled_roles = settings_from_enabled_roles;
auto it = enabled_settings.find(params);
if (it != enabled_settings.end())
{
auto from_cache = it->second.lock();
if (from_cache)
return from_cache;
enabled_settings.erase(it);
}
std::shared_ptr<EnabledSettings> res(new EnabledSettings(params));
enabled_settings.emplace(std::move(params), res);
mergeSettingsAndConstraintsFor(*res);
return res;
}
std::shared_ptr<const SettingsProfilesInfo> SettingsProfilesCache::getSettingsProfileInfo(const UUID & profile_id)
{
std::lock_guard lock{mutex};
ensureAllProfilesRead();
if (auto pos = this->profile_infos_cache.get(profile_id))
return *pos;
SettingsProfileElements elements = all_profiles[profile_id]->elements;
auto info = std::make_shared<SettingsProfilesInfo>(manager);
info->profiles.push_back(profile_id);
info->profiles_with_implicit.push_back(profile_id);
substituteProfiles(elements, info->profiles_with_implicit, info->names_of_profiles);
info->settings = elements.toSettingsChanges();
info->constraints.merge(elements.toSettingsConstraints(manager));
profile_infos_cache.add(profile_id, info);
return info;
}
}
| 2,937 |
460 | #include "../../../tools/designer/src/lib/shared/gridpanel_p.h"
| 26 |
672 | <reponame>DogeCoding/iOSCompiledRuntime<filename>xnu-4903.241.1/osfmk/arm/machine_routines_common.c
/*
* Copyright (c) 2007-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <arm/machine_cpu.h>
#include <arm/cpu_internal.h>
#include <arm/cpuid.h>
#include <arm/cpu_data.h>
#include <arm/cpu_data_internal.h>
#include <arm/misc_protos.h>
#include <arm/machdep_call.h>
#include <arm/machine_routines.h>
#include <arm/rtclock.h>
#include <kern/machine.h>
#include <kern/thread.h>
#include <kern/thread_group.h>
#include <kern/policy_internal.h>
#include <machine/config.h>
#include <pexpert/pexpert.h>
#if MONOTONIC
#include <kern/monotonic.h>
#include <machine/monotonic.h>
#endif /* MONOTONIC */
#include <mach/machine.h>
#if INTERRUPT_MASKED_DEBUG
extern boolean_t interrupt_masked_debug;
extern uint64_t interrupt_masked_timeout;
#endif
extern uint64_t mach_absolutetime_asleep;
static void
sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
{
}
static void
sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
{
}
static void
sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
{
}
static void
sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
{
}
static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
{
}
static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
perfcontrol_work_interval_t work_interval __unused)
{
}
static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
perfcontrol_work_interval_instance_t instance __unused)
{
}
static void
sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
{
}
static void
sched_perfcontrol_csw_default(
__unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
__unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
__unused struct perfcontrol_thread_data *oncore,
__unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
{
}
static void
sched_perfcontrol_state_update_default(
__unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
__unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
__unused void *unused)
{
}
sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
void
sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
{
assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
if (size_of_state > sizeof(struct perfcontrol_state)) {
panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
}
if (callbacks) {
if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
if (callbacks->work_interval_ctl != NULL) {
sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
} else {
sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
}
}
if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
if (callbacks->csw != NULL) {
sched_perfcontrol_csw = callbacks->csw;
} else {
sched_perfcontrol_csw = sched_perfcontrol_csw_default;
}
if (callbacks->state_update != NULL) {
sched_perfcontrol_state_update = callbacks->state_update;
} else {
sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
}
}
if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
if (callbacks->deadline_passed != NULL) {
sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
} else {
sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
}
}
if (callbacks->offcore != NULL) {
sched_perfcontrol_offcore = callbacks->offcore;
} else {
sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
}
if (callbacks->context_switch != NULL) {
sched_perfcontrol_switch = callbacks->context_switch;
} else {
sched_perfcontrol_switch = sched_perfcontrol_switch_default;
}
if (callbacks->oncore != NULL) {
sched_perfcontrol_oncore = callbacks->oncore;
} else {
sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
}
if (callbacks->max_runnable_latency != NULL) {
sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
} else {
sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
}
if (callbacks->work_interval_notify != NULL) {
sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
} else {
sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
}
} else {
/* reset to defaults */
sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
sched_perfcontrol_switch = sched_perfcontrol_switch_default;
sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
sched_perfcontrol_csw = sched_perfcontrol_csw_default;
sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
}
}
static void
machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
thread_t thread,
uint64_t same_pri_latency)
{
bzero(data, sizeof(struct perfcontrol_thread_data));
data->perfctl_class = thread_get_perfcontrol_class(thread);
data->energy_estimate_nj = 0;
data->thread_id = thread->thread_id;
data->scheduling_latency_at_same_basepri = same_pri_latency;
data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
}
static void
machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
{
#if MONOTONIC
mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
#else /* MONOTONIC */
cpu_counters->instructions = 0;
cpu_counters->cycles = 0;
#endif /* !MONOTONIC */
}
int perfcontrol_callout_stats_enabled = 0;
static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
#if MONOTONIC
static inline
bool perfcontrol_callout_counters_begin(uint64_t *counters)
{
if (!perfcontrol_callout_stats_enabled)
return false;
mt_fixed_counts(counters);
return true;
}
static inline
void perfcontrol_callout_counters_end(uint64_t *start_counters,
perfcontrol_callout_type_t type)
{
uint64_t end_counters[MT_CORE_NFIXED];
mt_fixed_counts(end_counters);
atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed);
#ifdef MT_CORE_INSTRS
atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed);
#endif /* defined(MT_CORE_INSTRS) */
atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed);
}
#endif /* MONOTONIC */
uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
perfcontrol_callout_stat_t stat)
{
if (!perfcontrol_callout_stats_enabled)
return 0;
return (perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]);
}
void
machine_switch_perfcontrol_context(perfcontrol_event event,
uint64_t timestamp,
uint32_t flags,
uint64_t new_thread_same_pri_latency,
thread_t old,
thread_t new)
{
if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
}
if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
uint32_t cpu_id = (uint32_t)cpu_number();
struct perfcontrol_cpu_counters cpu_counters;
struct perfcontrol_thread_data offcore, oncore;
machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
machine_switch_populate_perfcontrol_thread_data(&oncore, new,
new_thread_same_pri_latency);
machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
#if MONOTONIC
uint64_t counters[MT_CORE_NFIXED];
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
&offcore, &oncore, &cpu_counters, NULL);
#if MONOTONIC
if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
#endif /* MONOTONIC */
#if __arm64__
old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
#endif
}
}
void
machine_switch_perfcontrol_state_update(perfcontrol_event event,
uint64_t timestamp,
uint32_t flags,
thread_t thread)
{
if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default)
return;
uint32_t cpu_id = (uint32_t)cpu_number();
struct perfcontrol_thread_data data;
machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
#if MONOTONIC
uint64_t counters[MT_CORE_NFIXED];
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
&data, NULL);
#if MONOTONIC
if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
#endif /* MONOTONIC */
#if __arm64__
thread->machine.energy_estimate_nj += data.energy_estimate_nj;
#endif
}
void
machine_thread_going_on_core(thread_t new_thread,
int urgency,
uint64_t sched_latency,
uint64_t same_pri_latency,
uint64_t timestamp)
{
if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default)
return;
struct going_on_core on_core;
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
on_core.thread_id = new_thread->thread_id;
on_core.energy_estimate_nj = 0;
on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
on_core.urgency = urgency;
on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
on_core.is_kernel_thread = new_thread->task == kernel_task;
on_core.scheduling_latency = sched_latency;
on_core.start_time = timestamp;
on_core.scheduling_latency_at_same_basepri = same_pri_latency;
#if MONOTONIC
uint64_t counters[MT_CORE_NFIXED];
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
sched_perfcontrol_oncore(state, &on_core);
#if MONOTONIC
if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
#endif /* MONOTONIC */
#if __arm64__
new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
#endif
}
void
machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch)
{
if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default)
return;
struct going_off_core off_core;
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
off_core.thread_id = old_thread->thread_id;
off_core.energy_estimate_nj = 0;
off_core.end_time = last_dispatch;
#if MONOTONIC
uint64_t counters[MT_CORE_NFIXED];
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
sched_perfcontrol_offcore(state, &off_core, thread_terminating);
#if MONOTONIC
if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
#endif /* MONOTONIC */
#if __arm64__
old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
#endif
}
void
machine_max_runnable_latency(uint64_t bg_max_latency,
uint64_t default_max_latency,
uint64_t realtime_max_latency)
{
if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default)
return;
struct perfcontrol_max_runnable_latency latencies = {
.max_scheduling_latencies = {
[THREAD_URGENCY_NONE] = 0,
[THREAD_URGENCY_BACKGROUND] = bg_max_latency,
[THREAD_URGENCY_NORMAL] = default_max_latency,
[THREAD_URGENCY_REAL_TIME] = realtime_max_latency
}
};
sched_perfcontrol_max_runnable_latency(&latencies);
}
void
machine_work_interval_notify(thread_t thread,
struct kern_work_interval_args* kwi_args)
{
if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default)
return;
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
struct perfcontrol_work_interval work_interval = {
.thread_id = thread->thread_id,
.qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
.urgency = kwi_args->urgency,
.flags = kwi_args->notify_flags,
.work_interval_id = kwi_args->work_interval_id,
.start = kwi_args->start,
.finish = kwi_args->finish,
.deadline = kwi_args->deadline,
.next_start = kwi_args->next_start,
.create_flags = kwi_args->create_flags,
};
sched_perfcontrol_work_interval_notify(state, &work_interval);
}
void
machine_perfcontrol_deadline_passed(uint64_t deadline)
{
if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default)
sched_perfcontrol_deadline_passed(deadline);
}
#if INTERRUPT_MASKED_DEBUG
/*
* ml_spin_debug_reset()
* Reset the timestamp on a thread that has been unscheduled
* to avoid false alarms. Alarm will go off if interrupts are held
* disabled for too long, starting from now.
*/
void
ml_spin_debug_reset(thread_t thread)
{
thread->machine.intmask_timestamp = mach_absolute_time();
}
/*
* ml_spin_debug_clear()
* Clear the timestamp on a thread that has been unscheduled
* to avoid false alarms
*/
void
ml_spin_debug_clear(thread_t thread)
{
thread->machine.intmask_timestamp = 0;
}
/*
* ml_spin_debug_clear_self()
* Clear the timestamp on the current thread to prevent
* false alarms
*/
void
ml_spin_debug_clear_self()
{
ml_spin_debug_clear(current_thread());
}
void
ml_check_interrupts_disabled_duration(thread_t thread)
{
uint64_t start;
uint64_t now;
start = thread->machine.intmask_timestamp;
if (start != 0) {
now = mach_absolute_time();
if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) {
mach_timebase_info_data_t timebase;
clock_timebase_info(&timebase);
#ifndef KASAN
/*
* Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
* mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
*/
panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer)/timebase.denom));
#endif
}
}
return;
}
#endif // INTERRUPT_MASKED_DEBUG
boolean_t
ml_set_interrupts_enabled(boolean_t enable)
{
thread_t thread;
uint64_t state;
#if __arm__
#define INTERRUPT_MASK PSR_IRQF
state = __builtin_arm_rsr("cpsr");
#else
#define INTERRUPT_MASK DAIF_IRQF
state = __builtin_arm_rsr("DAIF");
#endif
if (enable && (state & INTERRUPT_MASK)) {
#if INTERRUPT_MASKED_DEBUG
if (interrupt_masked_debug) {
// Interrupts are currently masked, we will enable them (after finishing this check)
thread = current_thread();
ml_check_interrupts_disabled_duration(thread);
thread->machine.intmask_timestamp = 0;
}
#endif // INTERRUPT_MASKED_DEBUG
if (get_preemption_level() == 0) {
thread = current_thread();
while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
#if __ARM_USER_PROTECT__
uintptr_t up = arm_user_protect_begin(thread);
#endif
ast_taken_kernel();
#if __ARM_USER_PROTECT__
arm_user_protect_end(thread, up, FALSE);
#endif
}
}
#if __arm__
__asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
#else
__builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
#endif
} else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
#if __arm__
__asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
#else
__builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
#endif
#if INTERRUPT_MASKED_DEBUG
if (interrupt_masked_debug) {
// Interrupts were enabled, we just masked them
current_thread()->machine.intmask_timestamp = mach_absolute_time();
}
#endif
}
return ((state & INTERRUPT_MASK) == 0);
}
/*
* Routine: ml_at_interrupt_context
* Function: Check if running at interrupt context
*/
boolean_t
ml_at_interrupt_context(void)
{
/* Do not use a stack-based check here, as the top-level exception handler
* is free to use some other stack besides the per-CPU interrupt stack.
* Interrupts should always be disabled if we're at interrupt context.
* Check that first, as we may be in a preemptible non-interrupt context, in
* which case we could be migrated to a different CPU between obtaining
* the per-cpu data pointer and loading cpu_int_state. We then might end
* up checking the interrupt state of a different CPU, resulting in a false
* positive. But if interrupts are disabled, we also know we cannot be
* preempted. */
return (!ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL));
}
vm_offset_t
ml_stack_remaining(void)
{
uintptr_t local = (uintptr_t) &local;
vm_offset_t intstack_top_ptr;
/* Since this is a stack-based check, we don't need to worry about
* preemption as we do in ml_at_interrupt_context(). If we are preemptible,
* then the sp should never be within any CPU's interrupt stack unless
* something has gone horribly wrong. */
intstack_top_ptr = getCpuDatap()->intstack_top;
if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE));
} else {
return (local - current_thread()->kernel_stack);
}
}
static boolean_t ml_quiescing;
void ml_set_is_quiescing(boolean_t quiescing)
{
assert(FALSE == ml_get_interrupts_enabled());
ml_quiescing = quiescing;
}
boolean_t ml_is_quiescing(void)
{
assert(FALSE == ml_get_interrupts_enabled());
return (ml_quiescing);
}
uint64_t ml_get_booter_memory_size(void)
{
uint64_t size;
uint64_t roundsize = 512*1024*1024ULL;
size = BootArgs->memSizeActual;
if (!size) {
size = BootArgs->memSize;
if (size < (2 * roundsize)) roundsize >>= 1;
size = (size + roundsize - 1) & ~(roundsize - 1);
size -= BootArgs->memSize;
}
return (size);
}
uint64_t
ml_get_abstime_offset(void)
{
return rtclock_base_abstime;
}
uint64_t
ml_get_conttime_offset(void)
{
return (rtclock_base_abstime + mach_absolutetime_asleep);
}
uint64_t
ml_get_time_since_reset(void)
{
/* The timebase resets across S2R, so just return the raw value. */
return ml_get_hwclock();
}
uint64_t
ml_get_conttime_wake_time(void)
{
/* The wake time is simply our continuous time offset. */
return ml_get_conttime_offset();
}
| 8,960 |
473 | #ifndef DIFFER_H_
#define DIFFER_H_
void cgc_compare_files(SFILE *lfile, SFILE *rfile, int ignore_ws, int treat_as_ascii);
void cgc_clear_cache(int file_num);
#endif
| 75 |
3,102 | <gh_stars>1000+
//===-- DiagnosticsYaml.h -- Serialiazation for Diagnosticss ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the structure of a YAML document for serializing
/// diagnostics.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLING_DIAGNOSTICSYAML_H
#define LLVM_CLANG_TOOLING_DIAGNOSTICSYAML_H
#include "clang/Tooling/Core/Diagnostic.h"
#include "clang/Tooling/ReplacementsYaml.h"
#include "llvm/Support/YAMLTraits.h"
#include <string>
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::Diagnostic)
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::DiagnosticMessage)
namespace llvm {
namespace yaml {
template <> struct MappingTraits<clang::tooling::DiagnosticMessage> {
static void mapping(IO &Io, clang::tooling::DiagnosticMessage &M) {
Io.mapRequired("Message", M.Message);
Io.mapOptional("FilePath", M.FilePath);
Io.mapOptional("FileOffset", M.FileOffset);
std::vector<clang::tooling::Replacement> Fixes;
for (auto &Replacements : M.Fix) {
for (auto &Replacement : Replacements.second)
Fixes.push_back(Replacement);
}
Io.mapRequired("Replacements", Fixes);
for (auto &Fix : Fixes) {
llvm::Error Err = M.Fix[Fix.getFilePath()].add(Fix);
if (Err) {
// FIXME: Implement better conflict handling.
llvm::errs() << "Fix conflicts with existing fix: "
<< llvm::toString(std::move(Err)) << "\n";
}
}
}
};
template <> struct MappingTraits<clang::tooling::Diagnostic> {
/// Helper to (de)serialize a Diagnostic since we don't have direct
/// access to its data members.
class NormalizedDiagnostic {
public:
NormalizedDiagnostic(const IO &)
: DiagLevel(clang::tooling::Diagnostic::Level::Warning) {}
NormalizedDiagnostic(const IO &, const clang::tooling::Diagnostic &D)
: DiagnosticName(D.DiagnosticName), Message(D.Message), Notes(D.Notes),
DiagLevel(D.DiagLevel), BuildDirectory(D.BuildDirectory) {}
clang::tooling::Diagnostic denormalize(const IO &) {
return clang::tooling::Diagnostic(DiagnosticName, Message, Notes,
DiagLevel, BuildDirectory);
}
std::string DiagnosticName;
clang::tooling::DiagnosticMessage Message;
llvm::StringMap<clang::tooling::Replacements> Fix;
SmallVector<clang::tooling::DiagnosticMessage, 1> Notes;
clang::tooling::Diagnostic::Level DiagLevel;
std::string BuildDirectory;
};
static void mapping(IO &Io, clang::tooling::Diagnostic &D) {
MappingNormalization<NormalizedDiagnostic, clang::tooling::Diagnostic> Keys(
Io, D);
Io.mapRequired("DiagnosticName", Keys->DiagnosticName);
Io.mapRequired("DiagnosticMessage", Keys->Message);
Io.mapOptional("Notes", Keys->Notes);
// FIXME: Export properly all the different fields.
}
};
/// Specialized MappingTraits to describe how a
/// TranslationUnitDiagnostics is (de)serialized.
template <> struct MappingTraits<clang::tooling::TranslationUnitDiagnostics> {
static void mapping(IO &Io, clang::tooling::TranslationUnitDiagnostics &Doc) {
Io.mapRequired("MainSourceFile", Doc.MainSourceFile);
Io.mapRequired("Diagnostics", Doc.Diagnostics);
}
};
} // end namespace yaml
} // end namespace llvm
#endif // LLVM_CLANG_TOOLING_DIAGNOSTICSYAML_H
| 1,348 |
348 | <filename>docs/data/leg-t2/050/05003184.json
{"nom":"Flamanville","circ":"3ème circonscription","dpt":"Manche","inscrits":1255,"abs":800,"votants":455,"blancs":51,"nuls":15,"exp":389,"res":[{"nuance":"REM","nom":"<NAME>","voix":252},{"nuance":"LR","nom":"<NAME>","voix":137}]} | 110 |
314 | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "CDStructures.h"
#import "DVTPageControllerDots-Protocol.h"
@class DVTPagingDotView, NSString;
@interface DVTPageController : NSPageController <DVTPageControllerDots>
{
DVTPagingDotView *_pagingDotView;
}
- (void)setSelectedIndex:(long long)arg1;
- (void)setArrangedObjects:(id)arg1;
@property(retain) DVTPagingDotView *pagingDotView; // @synthesize pagingDotView=_pagingDotView;
- (void)updatePagingDotView;
- (BOOL)shouldShowPagingDots;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| 300 |
1,511 | <gh_stars>1000+
package com.privatecompany.somepackage;
import java.io.File;
import java.io.FilenameFilter;
import javax.swing.filechooser.FileFilter;
/**
* <p>Title: CustomFileFilter</p>
*
* <p>Description: </p>
*
* <p>Copyright: Copyright (c) 2006</p>
*
* <p>Company: Private Company</p>
*
* @author <NAME>
* @version 1.0
*/
public class CustomFileFilter extends FileFilter
{
// private static final member variable
private static final int TYPE_LENGTH = 2;
// some comment
private final String fileType;
public CustomFileFilter(final String fileType)
{
}
}
| 241 |
1,443 | {
"copyright": "TehBrian",
"url": "https://tehbrian.xyz",
"email": "<EMAIL>",
"format": "html",
"license": "mit",
"theme": "default",
"gravatar": false
} | 72 |
314 | <filename>Multiplex/IDEHeaders/IDEHeaders/DVTFoundation/DVTSourceFileLineCoverageData.h
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "CDStructures.h"
@class NSArray;
@interface DVTSourceFileLineCoverageData : NSObject <NSCoding>
{
BOOL _executable;
int _executionCount;
NSArray *_subRanges;
}
@property(readonly, nonatomic) NSArray *subRanges; // @synthesize subRanges=_subRanges;
@property(readonly, nonatomic, getter=isExecutable) BOOL executable; // @synthesize executable=_executable;
@property(readonly, nonatomic) int executionCount; // @synthesize executionCount=_executionCount;
- (id)description;
- (void)updateWithExecutionCount:(int)arg1 executable:(BOOL)arg2 subRanges:(id)arg3;
- (void)encodeWithCoder:(id)arg1;
- (id)initWithCoder:(id)arg1;
- (id)initWithExecutionCount:(int)arg1 executable:(BOOL)arg2 subRanges:(id)arg3;
@end
| 358 |
771 | """Benchmarks the file handler"""
from logbook import Logger, FileHandler
from tempfile import NamedTemporaryFile
log = Logger('Test logger')
def run():
f = NamedTemporaryFile()
with FileHandler(f.name) as handler:
for x in xrange(500):
log.warning('this is handled')
| 104 |
967 | <filename>Mac/Echo/Template/Outline/ECOTemplateOutlineViewController.h
//
// ECOTemplateOutlineViewController.h
// Echo
//
// Created by 陈爱彬 on 2019/6/21. Maintain by 陈爱彬
// Description
//
#import <Cocoa/Cocoa.h>
#import "ECOPluginUIProtocol.h"
NS_ASSUME_NONNULL_BEGIN
@interface ECOTemplateOutlineViewController : NSViewController
<ECOPluginUIProtocol>
@property (nonatomic, weak) __kindof ECOBasePlugin *plugin;
@end
NS_ASSUME_NONNULL_END
| 188 |
487 | <filename>common/analysis/matcher/inner_match_handlers.h<gh_stars>100-1000
// Copyright 2017-2020 The Verible Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef VERIBLE_COMMON_ANALYSIS_MATCHER_INNER_MATCH_HANDLERS_H_
#define VERIBLE_COMMON_ANALYSIS_MATCHER_INNER_MATCH_HANDLERS_H_
#include <vector>
#include "common/analysis/matcher/bound_symbol_manager.h"
#include "common/analysis/matcher/matcher.h"
#include "common/text/symbol.h"
namespace verible {
namespace matcher {
// These are a collection of inner matcher handlers, which are used by
// the matcher class to describe how to process inner matchers.
//
// Broadly speaking, this behavior includes how many inner matchers need in
// order for the handler to return true and how inner matchers' bound symbols
// are bound into the manager.
//
// Usage:
// InnerMatchHandler handler = ... select one of the functions ...
// Matcher matcher(some_predicate, handler);
// Returns true if all inner_matchers match
//
// If all inner matchers match, each inner matcher binds its symbols to
// manager. The order of these binds is the order in which matchers appear in
// inner_matchers.
// If not all inner matchers match, then nothing is bound to manager.
//
bool InnerMatchAll(const Symbol& symbol,
const std::vector<Matcher>& inner_matchers,
BoundSymbolManager* manager);
// Returns true if one of inner_matchers matches
//
// Only the first matching inner matcher in inner_matchers gets to bind.
// Subsequent matchers are not run.
// If no inner matchers match, then nothing is bound to manager.
//
bool InnerMatchAny(const Symbol& symbol,
const std::vector<Matcher>& inner_matchers,
BoundSymbolManager* manager);
// Returns true if one of inner_matchers matches
//
// Every matching inner_matcher binds symbols to manager. The order of these
// binds is the order in which matchers appear in inner_matchers.
// If no inner matchers match, then nothing is bound to manager.
//
bool InnerMatchEachOf(const Symbol& symbol,
const std::vector<Matcher>& inner_matchers,
BoundSymbolManager* manager);
// Returns true if inner_matcher does not match.
// Returns false if inner_matcher does match.
//
// Inner matchers should contain exactly one inner matcher.
//
// No symbols are bound to manager regardless of outcome.
//
bool InnerMatchUnless(const Symbol& symbol,
const std::vector<Matcher>& inner_matchers,
BoundSymbolManager* manager);
} // namespace matcher
} // namespace verible
#endif // VERIBLE_COMMON_ANALYSIS_MATCHER_INNER_MATCH_HANDLERS_H_
| 1,031 |
1,144 | from app.flask_app import celery
from lib.celery.task_decorator import throttled_task
@throttled_task(countdown=10)
@celery.task(bind=True)
def poll_engine_status(self, checker_name, engine_id):
from lib.engine_status_checker import get_engine_checker_class
get_engine_checker_class(checker_name).get_server_status_task(engine_id)
| 124 |
3,218 | //
// WeiBoMineController.h
// CodeDemo
//
// Created by wangrui on 2017/4/11.
// Copyright © 2017年 wangrui. All rights reserved.
//
// Github地址:https://github.com/wangrui460/WRNavigationBar
#import <UIKit/UIKit.h>
@interface AllTransparent : UIViewController
@end
| 110 |
3,012 | <filename>MdePkg/Library/PeiIoLibCpuIo/IoHighLevel.c
/** @file
High-level Io/Mmio functions.
All assertions for bit field operations are handled bit field functions in the
Base Library.
Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include <PiPei.h>
#include <Library/IoLib.h>
#include <Library/DebugLib.h>
#include <Library/BaseLib.h>
#include <Library/PeiServicesTablePointerLib.h>
/**
Reads an 8-bit I/O port, performs a bitwise OR, and writes the
result back to the 8-bit I/O port.
Reads the 8-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 8-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized.
If 8-bit I/O port operations are not supported, then ASSERT().
@param Port The I/O port to write.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoOr8 (
IN UINTN Port,
IN UINT8 OrData
)
{
return IoWrite8 (Port, (UINT8)(IoRead8 (Port) | OrData));
}
/**
Reads an 8-bit I/O port, performs a bitwise AND, and writes the result back
to the 8-bit I/O port.
Reads the 8-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 8-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized.
If 8-bit I/O port operations are not supported, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoAnd8 (
IN UINTN Port,
IN UINT8 AndData
)
{
return IoWrite8 (Port, (UINT8)(IoRead8 (Port) & AndData));
}
/**
Reads an 8-bit I/O port, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 8-bit I/O port.
Reads the 8-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, performs a bitwise OR
between the result of the AND operation and the value specified by OrData,
and writes the result to the 8-bit I/O port specified by Port. The value
written to the I/O port is returned. This function must guarantee that all
I/O read and write operations are serialized.
If 8-bit I/O port operations are not supported, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoAndThenOr8 (
IN UINTN Port,
IN UINT8 AndData,
IN UINT8 OrData
)
{
return IoWrite8 (Port, (UINT8)((IoRead8 (Port) & AndData) | OrData));
}
/**
Reads a bit field of an I/O register.
Reads the bit field in an 8-bit I/O register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 8-bit I/O port operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Port The I/O port to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@return The value read.
**/
UINT8
EFIAPI
IoBitFieldRead8 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead8 (IoRead8 (Port), StartBit, EndBit);
}
/**
Writes a bit field to an I/O register.
Writes Value to the bit field of the I/O register. The bit field is specified
by the StartBit and the EndBit. All other bits in the destination I/O
register are preserved. The value written to the I/O port is returned.
If 8-bit I/O port operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param Value The new value of the bit field.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoBitFieldWrite8 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 Value
)
{
return IoWrite8 (
Port,
BitFieldWrite8 (IoRead8 (Port), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in an 8-bit port, performs a bitwise OR, and writes the
result back to the bit field in the 8-bit port.
Reads the 8-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 8-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized. Extra left bits in OrData are stripped.
If 8-bit I/O port operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoBitFieldOr8 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 OrData
)
{
return IoWrite8 (
Port,
BitFieldOr8 (IoRead8 (Port), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in an 8-bit port, performs a bitwise AND, and writes the
result back to the bit field in the 8-bit port.
Reads the 8-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 8-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized. Extra left bits in AndData are stripped.
If 8-bit I/O port operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoBitFieldAnd8 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 AndData
)
{
return IoWrite8 (
Port,
BitFieldAnd8 (IoRead8 (Port), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in an 8-bit port, performs a bitwise AND followed by a
bitwise OR, and writes the result back to the bit field in the
8-bit port.
Reads the 8-bit I/O port specified by Port, performs a bitwise AND followed
by a bitwise OR between the read result and the value specified by
AndData, and writes the result to the 8-bit I/O port specified by Port. The
value written to the I/O port is returned. This function must guarantee that
all I/O read and write operations are serialized. Extra left bits in both
AndData and OrData are stripped.
If 8-bit I/O port operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT8
EFIAPI
IoBitFieldAndThenOr8 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 AndData,
IN UINT8 OrData
)
{
return IoWrite8 (
Port,
BitFieldAndThenOr8 (IoRead8 (Port), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 16-bit I/O port, performs a bitwise OR, and writes the
result back to the 16-bit I/O port.
Reads the 16-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 16-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoOr16 (
IN UINTN Port,
IN UINT16 OrData
)
{
return IoWrite16 (Port, (UINT16)(IoRead16 (Port) | OrData));
}
/**
Reads a 16-bit I/O port, performs a bitwise AND, and writes the result back
to the 16-bit I/O port.
Reads the 16-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 16-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoAnd16 (
IN UINTN Port,
IN UINT16 AndData
)
{
return IoWrite16 (Port, (UINT16)(IoRead16 (Port) & AndData));
}
/**
Reads a 16-bit I/O port, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 16-bit I/O port.
Reads the 16-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, performs a bitwise OR
between the result of the AND operation and the value specified by OrData,
and writes the result to the 16-bit I/O port specified by Port. The value
written to the I/O port is returned. This function must guarantee that all
I/O read and write operations are serialized.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoAndThenOr16 (
IN UINTN Port,
IN UINT16 AndData,
IN UINT16 OrData
)
{
return IoWrite16 (Port, (UINT16)((IoRead16 (Port) & AndData) | OrData));
}
/**
Reads a bit field of an I/O register.
Reads the bit field in a 16-bit I/O register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Port The I/O port to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@return The value read.
**/
UINT16
EFIAPI
IoBitFieldRead16 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead16 (IoRead16 (Port), StartBit, EndBit);
}
/**
Writes a bit field to an I/O register.
Writes Value to the bit field of the I/O register. The bit field is specified
by the StartBit and the EndBit. All other bits in the destination I/O
register are preserved. The value written to the I/O port is returned. Extra
left bits in Value are stripped.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param Value The new value of the bit field.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoBitFieldWrite16 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 Value
)
{
return IoWrite16 (
Port,
BitFieldWrite16 (IoRead16 (Port), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 16-bit port, performs a bitwise OR, and writes the
result back to the bit field in the 16-bit port.
Reads the 16-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 16-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized. Extra left bits in OrData are stripped.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoBitFieldOr16 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 OrData
)
{
return IoWrite16 (
Port,
BitFieldOr16 (IoRead16 (Port), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 16-bit port, performs a bitwise AND, and writes the
result back to the bit field in the 16-bit port.
Reads the 16-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 16-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized. Extra left bits in AndData are stripped.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoBitFieldAnd16 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 AndData
)
{
return IoWrite16 (
Port,
BitFieldAnd16 (IoRead16 (Port), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 16-bit port, performs a bitwise AND followed by a
bitwise OR, and writes the result back to the bit field in the
16-bit port.
Reads the 16-bit I/O port specified by Port, performs a bitwise AND followed
by a bitwise OR between the read result and the value specified by
AndData, and writes the result to the 16-bit I/O port specified by Port. The
value written to the I/O port is returned. This function must guarantee that
all I/O read and write operations are serialized. Extra left bits in both
AndData and OrData are stripped.
If 16-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT16
EFIAPI
IoBitFieldAndThenOr16 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 AndData,
IN UINT16 OrData
)
{
return IoWrite16 (
Port,
BitFieldAndThenOr16 (IoRead16 (Port), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 32-bit I/O port, performs a bitwise OR, and writes the
result back to the 32-bit I/O port.
Reads the 32-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 32-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoOr32 (
IN UINTN Port,
IN UINT32 OrData
)
{
return IoWrite32 (Port, IoRead32 (Port) | OrData);
}
/**
Reads a 32-bit I/O port, performs a bitwise AND, and writes the result back
to the 32-bit I/O port.
Reads the 32-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 32-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoAnd32 (
IN UINTN Port,
IN UINT32 AndData
)
{
return IoWrite32 (Port, IoRead32 (Port) & AndData);
}
/**
Reads a 32-bit I/O port, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 32-bit I/O port.
Reads the 32-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, performs a bitwise OR
between the result of the AND operation and the value specified by OrData,
and writes the result to the 32-bit I/O port specified by Port. The value
written to the I/O port is returned. This function must guarantee that all
I/O read and write operations are serialized.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoAndThenOr32 (
IN UINTN Port,
IN UINT32 AndData,
IN UINT32 OrData
)
{
return IoWrite32 (Port, (IoRead32 (Port) & AndData) | OrData);
}
/**
Reads a bit field of an I/O register.
Reads the bit field in a 32-bit I/O register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Port The I/O port to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@return The value read.
**/
UINT32
EFIAPI
IoBitFieldRead32 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead32 (IoRead32 (Port), StartBit, EndBit);
}
/**
Writes a bit field to an I/O register.
Writes Value to the bit field of the I/O register. The bit field is specified
by the StartBit and the EndBit. All other bits in the destination I/O
register are preserved. The value written to the I/O port is returned. Extra
left bits in Value are stripped.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param Value The new value of the bit field.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoBitFieldWrite32 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 Value
)
{
return IoWrite32 (
Port,
BitFieldWrite32 (IoRead32 (Port), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 32-bit port, performs a bitwise OR, and writes the
result back to the bit field in the 32-bit port.
Reads the 32-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 32-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized. Extra left bits in OrData are stripped.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoBitFieldOr32 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 OrData
)
{
return IoWrite32 (
Port,
BitFieldOr32 (IoRead32 (Port), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 32-bit port, performs a bitwise AND, and writes the
result back to the bit field in the 32-bit port.
Reads the 32-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 32-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized. Extra left bits in AndData are stripped.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoBitFieldAnd32 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 AndData
)
{
return IoWrite32 (
Port,
BitFieldAnd32 (IoRead32 (Port), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 32-bit port, performs a bitwise AND followed by a
bitwise OR, and writes the result back to the bit field in the
32-bit port.
Reads the 32-bit I/O port specified by Port, performs a bitwise AND followed
by a bitwise OR between the read result and the value specified by
AndData, and writes the result to the 32-bit I/O port specified by Port. The
value written to the I/O port is returned. This function must guarantee that
all I/O read and write operations are serialized. Extra left bits in both
AndData and OrData are stripped.
If 32-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT32
EFIAPI
IoBitFieldAndThenOr32 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 AndData,
IN UINT32 OrData
)
{
return IoWrite32 (
Port,
BitFieldAndThenOr32 (IoRead32 (Port), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 64-bit I/O port, performs a bitwise OR, and writes the
result back to the 64-bit I/O port.
Reads the 64-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 64-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoOr64 (
IN UINTN Port,
IN UINT64 OrData
)
{
return IoWrite64 (Port, IoRead64 (Port) | OrData);
}
/**
Reads a 64-bit I/O port, performs a bitwise AND, and writes the result back
to the 64-bit I/O port.
Reads the 64-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 64-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoAnd64 (
IN UINTN Port,
IN UINT64 AndData
)
{
return IoWrite64 (Port, IoRead64 (Port) & AndData);
}
/**
Reads a 64-bit I/O port, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 64-bit I/O port.
Reads the 64-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, performs a bitwise OR
between the result of the AND operation and the value specified by OrData,
and writes the result to the 64-bit I/O port specified by Port. The value
written to the I/O port is returned. This function must guarantee that all
I/O read and write operations are serialized.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
@param Port The I/O port to write.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoAndThenOr64 (
IN UINTN Port,
IN UINT64 AndData,
IN UINT64 OrData
)
{
return IoWrite64 (Port, (IoRead64 (Port) & AndData) | OrData);
}
/**
Reads a bit field of an I/O register.
Reads the bit field in a 64-bit I/O register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Port The I/O port to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@return The value read.
**/
UINT64
EFIAPI
IoBitFieldRead64 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead64 (IoRead64 (Port), StartBit, EndBit);
}
/**
Writes a bit field to an I/O register.
Writes Value to the bit field of the I/O register. The bit field is specified
by the StartBit and the EndBit. All other bits in the destination I/O
register are preserved. The value written to the I/O port is returned. Extra
left bits in Value are stripped.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param Value The new value of the bit field.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoBitFieldWrite64 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 Value
)
{
return IoWrite64 (
Port,
BitFieldWrite64 (IoRead64 (Port), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 64-bit port, performs a bitwise OR, and writes the
result back to the bit field in the 64-bit port.
Reads the 64-bit I/O port specified by Port, performs a bitwise OR
between the read result and the value specified by OrData, and writes the
result to the 64-bit I/O port specified by Port. The value written to the I/O
port is returned. This function must guarantee that all I/O read and write
operations are serialized. Extra left bits in OrData are stripped.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param OrData The value to OR with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoBitFieldOr64 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 OrData
)
{
return IoWrite64 (
Port,
BitFieldOr64 (IoRead64 (Port), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 64-bit port, performs a bitwise AND, and writes the
result back to the bit field in the 64-bit port.
Reads the 64-bit I/O port specified by Port, performs a bitwise AND between
the read result and the value specified by AndData, and writes the result to
the 64-bit I/O port specified by Port. The value written to the I/O port is
returned. This function must guarantee that all I/O read and write operations
are serialized. Extra left bits in AndData are stripped.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param AndData The value to AND with the read value from the I/O port.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoBitFieldAnd64 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 AndData
)
{
return IoWrite64 (
Port,
BitFieldAnd64 (IoRead64 (Port), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 64-bit port, performs a bitwise AND followed by a
bitwise OR, and writes the result back to the bit field in the
64-bit port.
Reads the 64-bit I/O port specified by Port, performs a bitwise AND followed
by a bitwise OR between the read result and the value specified by
AndData, and writes the result to the 64-bit I/O port specified by Port. The
value written to the I/O port is returned. This function must guarantee that
all I/O read and write operations are serialized. Extra left bits in both
AndData and OrData are stripped.
If 64-bit I/O port operations are not supported, then ASSERT().
If Port is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Port The I/O port to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param AndData The value to AND with the read value from the I/O port.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the I/O port.
**/
UINT64
EFIAPI
IoBitFieldAndThenOr64 (
IN UINTN Port,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 AndData,
IN UINT64 OrData
)
{
return IoWrite64 (
Port,
BitFieldAndThenOr64 (IoRead64 (Port), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads an 8-bit MMIO register, performs a bitwise OR, and writes the
result back to the 8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 8-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized.
If 8-bit MMIO register operations are not supported, then ASSERT().
@param Address The MMIO register to write.
@param OrData The value to OR with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioOr8 (
IN UINTN Address,
IN UINT8 OrData
)
{
return MmioWrite8 (Address, (UINT8)(MmioRead8 (Address) | OrData));
}
/**
Reads an 8-bit MMIO register, performs a bitwise AND, and writes the result
back to the 8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 8-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized.
If 8-bit MMIO register operations are not supported, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioAnd8 (
IN UINTN Address,
IN UINT8 AndData
)
{
return MmioWrite8 (Address, (UINT8)(MmioRead8 (Address) & AndData));
}
/**
Reads an 8-bit MMIO register, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, performs a
bitwise OR between the result of the AND operation and the value specified by
OrData, and writes the result to the 8-bit MMIO register specified by
Address. The value written to the MMIO register is returned. This function
must guarantee that all MMIO read and write operations are serialized.
If 8-bit MMIO register operations are not supported, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioAndThenOr8 (
IN UINTN Address,
IN UINT8 AndData,
IN UINT8 OrData
)
{
return MmioWrite8 (Address, (UINT8)((MmioRead8 (Address) & AndData) | OrData));
}
/**
Reads a bit field of a MMIO register.
Reads the bit field in an 8-bit MMIO register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 8-bit MMIO register operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Address The MMIO register to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@return The value read.
**/
UINT8
EFIAPI
MmioBitFieldRead8 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead8 (MmioRead8 (Address), StartBit, EndBit);
}
/**
Writes a bit field to a MMIO register.
Writes Value to the bit field of the MMIO register. The bit field is
specified by the StartBit and the EndBit. All other bits in the destination
MMIO register are preserved. The new value of the 8-bit register is returned.
If 8-bit MMIO register operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param Value The new value of the bit field.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioBitFieldWrite8 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 Value
)
{
return MmioWrite8 (
Address,
BitFieldWrite8 (MmioRead8 (Address), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in an 8-bit MMIO register, performs a bitwise OR, and
writes the result back to the bit field in the 8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 8-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized. Extra left bits in OrData
are stripped.
If 8-bit MMIO register operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param OrData The value to OR with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioBitFieldOr8 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 OrData
)
{
return MmioWrite8 (
Address,
BitFieldOr8 (MmioRead8 (Address), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in an 8-bit MMIO register, performs a bitwise AND, and
writes the result back to the bit field in the 8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 8-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized. Extra left bits in AndData are
stripped.
If 8-bit MMIO register operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param AndData The value to AND with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioBitFieldAnd8 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 AndData
)
{
return MmioWrite8 (
Address,
BitFieldAnd8 (MmioRead8 (Address), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in an 8-bit MMIO register, performs a bitwise AND followed
by a bitwise OR, and writes the result back to the bit field in the
8-bit MMIO register.
Reads the 8-bit MMIO register specified by Address, performs a bitwise AND
followed by a bitwise OR between the read result and the value
specified by AndData, and writes the result to the 8-bit MMIO register
specified by Address. The value written to the MMIO register is returned.
This function must guarantee that all MMIO read and write operations are
serialized. Extra left bits in both AndData and OrData are stripped.
If 8-bit MMIO register operations are not supported, then ASSERT().
If StartBit is greater than 7, then ASSERT().
If EndBit is greater than 7, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..7.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..7.
@param AndData The value to AND with read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT8
EFIAPI
MmioBitFieldAndThenOr8 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT8 AndData,
IN UINT8 OrData
)
{
return MmioWrite8 (
Address,
BitFieldAndThenOr8 (MmioRead8 (Address), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 16-bit MMIO register, performs a bitwise OR, and writes the
result back to the 16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 16-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param OrData The value to OR with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioOr16 (
IN UINTN Address,
IN UINT16 OrData
)
{
return MmioWrite16 (Address, (UINT16)(MmioRead16 (Address) | OrData));
}
/**
Reads a 16-bit MMIO register, performs a bitwise AND, and writes the result
back to the 16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 16-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioAnd16 (
IN UINTN Address,
IN UINT16 AndData
)
{
return MmioWrite16 (Address, (UINT16)(MmioRead16 (Address) & AndData));
}
/**
Reads a 16-bit MMIO register, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, performs a
bitwise OR between the result of the AND operation and the value specified by
OrData, and writes the result to the 16-bit MMIO register specified by
Address. The value written to the MMIO register is returned. This function
must guarantee that all MMIO read and write operations are serialized.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioAndThenOr16 (
IN UINTN Address,
IN UINT16 AndData,
IN UINT16 OrData
)
{
return MmioWrite16 (Address, (UINT16)((MmioRead16 (Address) & AndData) | OrData));
}
/**
Reads a bit field of a MMIO register.
Reads the bit field in a 16-bit MMIO register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Address The MMIO register to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@return The value read.
**/
UINT16
EFIAPI
MmioBitFieldRead16 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead16 (MmioRead16 (Address), StartBit, EndBit);
}
/**
Writes a bit field to a MMIO register.
Writes Value to the bit field of the MMIO register. The bit field is
specified by the StartBit and the EndBit. All other bits in the destination
MMIO register are preserved. The new value of the 16-bit register is returned.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param Value The new value of the bit field.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioBitFieldWrite16 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 Value
)
{
return MmioWrite16 (
Address,
BitFieldWrite16 (MmioRead16 (Address), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 16-bit MMIO register, performs a bitwise OR, and
writes the result back to the bit field in the 16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 16-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized. Extra left bits in OrData
are stripped.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param OrData The value to OR with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioBitFieldOr16 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 OrData
)
{
return MmioWrite16 (
Address,
BitFieldOr16 (MmioRead16 (Address), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 16-bit MMIO register, performs a bitwise AND, and
writes the result back to the bit field in the 16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 16-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized. Extra left bits in AndData are
stripped.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param AndData The value to AND with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioBitFieldAnd16 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 AndData
)
{
return MmioWrite16 (
Address,
BitFieldAnd16 (MmioRead16 (Address), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 16-bit MMIO register, performs a bitwise AND followed
by a bitwise OR, and writes the result back to the bit field in the
16-bit MMIO register.
Reads the 16-bit MMIO register specified by Address, performs a bitwise AND
followed by a bitwise OR between the read result and the value
specified by AndData, and writes the result to the 16-bit MMIO register
specified by Address. The value written to the MMIO register is returned.
This function must guarantee that all MMIO read and write operations are
serialized. Extra left bits in both AndData and OrData are stripped.
If 16-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 16-bit boundary, then ASSERT().
If StartBit is greater than 15, then ASSERT().
If EndBit is greater than 15, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..15.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..15.
@param AndData The value to AND with read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT16
EFIAPI
MmioBitFieldAndThenOr16 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT16 AndData,
IN UINT16 OrData
)
{
return MmioWrite16 (
Address,
BitFieldAndThenOr16 (MmioRead16 (Address), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 32-bit MMIO register, performs a bitwise OR, and writes the
result back to the 32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 32-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param OrData The value to OR with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioOr32 (
IN UINTN Address,
IN UINT32 OrData
)
{
return MmioWrite32 (Address, MmioRead32 (Address) | OrData);
}
/**
Reads a 32-bit MMIO register, performs a bitwise AND, and writes the result
back to the 32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 32-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioAnd32 (
IN UINTN Address,
IN UINT32 AndData
)
{
return MmioWrite32 (Address, MmioRead32 (Address) & AndData);
}
/**
Reads a 32-bit MMIO register, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, performs a
bitwise OR between the result of the AND operation and the value specified by
OrData, and writes the result to the 32-bit MMIO register specified by
Address. The value written to the MMIO register is returned. This function
must guarantee that all MMIO read and write operations are serialized.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioAndThenOr32 (
IN UINTN Address,
IN UINT32 AndData,
IN UINT32 OrData
)
{
return MmioWrite32 (Address, (MmioRead32 (Address) & AndData) | OrData);
}
/**
Reads a bit field of a MMIO register.
Reads the bit field in a 32-bit MMIO register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Address The MMIO register to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@return The value read.
**/
UINT32
EFIAPI
MmioBitFieldRead32 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead32 (MmioRead32 (Address), StartBit, EndBit);
}
/**
Writes a bit field to a MMIO register.
Writes Value to the bit field of the MMIO register. The bit field is
specified by the StartBit and the EndBit. All other bits in the destination
MMIO register are preserved. The new value of the 32-bit register is returned.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param Value The new value of the bit field.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioBitFieldWrite32 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 Value
)
{
return MmioWrite32 (
Address,
BitFieldWrite32 (MmioRead32 (Address), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 32-bit MMIO register, performs a bitwise OR, and
writes the result back to the bit field in the 32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 32-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized. Extra left bits in OrData
are stripped.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param OrData The value to OR with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioBitFieldOr32 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 OrData
)
{
return MmioWrite32 (
Address,
BitFieldOr32 (MmioRead32 (Address), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 32-bit MMIO register, performs a bitwise AND, and
writes the result back to the bit field in the 32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 32-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized. Extra left bits in AndData are
stripped.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param AndData The value to AND with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioBitFieldAnd32 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 AndData
)
{
return MmioWrite32 (
Address,
BitFieldAnd32 (MmioRead32 (Address), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 32-bit MMIO register, performs a bitwise AND followed
by a bitwise OR, and writes the result back to the bit field in the
32-bit MMIO register.
Reads the 32-bit MMIO register specified by Address, performs a bitwise AND
followed by a bitwise OR between the read result and the value
specified by AndData, and writes the result to the 32-bit MMIO register
specified by Address. The value written to the MMIO register is returned.
This function must guarantee that all MMIO read and write operations are
serialized. Extra left bits in both AndData and OrData are stripped.
If 32-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 32-bit boundary, then ASSERT().
If StartBit is greater than 31, then ASSERT().
If EndBit is greater than 31, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..31.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..31.
@param AndData The value to AND with read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT32
EFIAPI
MmioBitFieldAndThenOr32 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT32 AndData,
IN UINT32 OrData
)
{
return MmioWrite32 (
Address,
BitFieldAndThenOr32 (MmioRead32 (Address), StartBit, EndBit, AndData, OrData)
);
}
/**
Reads a 64-bit MMIO register, performs a bitwise OR, and writes the
result back to the 64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 64-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param OrData The value to OR with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioOr64 (
IN UINTN Address,
IN UINT64 OrData
)
{
return MmioWrite64 (Address, MmioRead64 (Address) | OrData);
}
/**
Reads a 64-bit MMIO register, performs a bitwise AND, and writes the result
back to the 64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 64-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioAnd64 (
IN UINTN Address,
IN UINT64 AndData
)
{
return MmioWrite64 (Address, MmioRead64 (Address) & AndData);
}
/**
Reads a 64-bit MMIO register, performs a bitwise AND followed by a bitwise
OR, and writes the result back to the 64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, performs a
bitwise OR between the result of the AND operation and the value specified by
OrData, and writes the result to the 64-bit MMIO register specified by
Address. The value written to the MMIO register is returned. This function
must guarantee that all MMIO read and write operations are serialized.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
@param Address The MMIO register to write.
@param AndData The value to AND with the read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioAndThenOr64 (
IN UINTN Address,
IN UINT64 AndData,
IN UINT64 OrData
)
{
return MmioWrite64 (Address, (MmioRead64 (Address) & AndData) | OrData);
}
/**
Reads a bit field of a MMIO register.
Reads the bit field in a 64-bit MMIO register. The bit field is specified by
the StartBit and the EndBit. The value of the bit field is returned.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
@param Address The MMIO register to read.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@return The value read.
**/
UINT64
EFIAPI
MmioBitFieldRead64 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit
)
{
return BitFieldRead64 (MmioRead64 (Address), StartBit, EndBit);
}
/**
Writes a bit field to a MMIO register.
Writes Value to the bit field of the MMIO register. The bit field is
specified by the StartBit and the EndBit. All other bits in the destination
MMIO register are preserved. The new value of the 64-bit register is returned.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If Value is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param Value The new value of the bit field.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioBitFieldWrite64 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 Value
)
{
return MmioWrite64 (
Address,
BitFieldWrite64 (MmioRead64 (Address), StartBit, EndBit, Value)
);
}
/**
Reads a bit field in a 64-bit MMIO register, performs a bitwise OR, and
writes the result back to the bit field in the 64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise
OR between the read result and the value specified by OrData, and
writes the result to the 64-bit MMIO register specified by Address. The value
written to the MMIO register is returned. This function must guarantee that
all MMIO read and write operations are serialized. Extra left bits in OrData
are stripped.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param OrData The value to OR with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioBitFieldOr64 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 OrData
)
{
return MmioWrite64 (
Address,
BitFieldOr64 (MmioRead64 (Address), StartBit, EndBit, OrData)
);
}
/**
Reads a bit field in a 64-bit MMIO register, performs a bitwise AND, and
writes the result back to the bit field in the 64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise AND
between the read result and the value specified by AndData, and writes the
result to the 64-bit MMIO register specified by Address. The value written to
the MMIO register is returned. This function must guarantee that all MMIO
read and write operations are serialized. Extra left bits in AndData are
stripped.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param AndData The value to AND with read value from the MMIO register.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioBitFieldAnd64 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 AndData
)
{
return MmioWrite64 (
Address,
BitFieldAnd64 (MmioRead64 (Address), StartBit, EndBit, AndData)
);
}
/**
Reads a bit field in a 64-bit MMIO register, performs a bitwise AND followed
by a bitwise OR, and writes the result back to the bit field in the
64-bit MMIO register.
Reads the 64-bit MMIO register specified by Address, performs a bitwise AND
followed by a bitwise OR between the read result and the value
specified by AndData, and writes the result to the 64-bit MMIO register
specified by Address. The value written to the MMIO register is returned.
This function must guarantee that all MMIO read and write operations are
serialized. Extra left bits in both AndData and OrData are stripped.
If 64-bit MMIO register operations are not supported, then ASSERT().
If Address is not aligned on a 64-bit boundary, then ASSERT().
If StartBit is greater than 63, then ASSERT().
If EndBit is greater than 63, then ASSERT().
If EndBit is less than StartBit, then ASSERT().
If AndData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
If OrData is larger than the bitmask value range specified by StartBit and EndBit, then ASSERT().
@param Address The MMIO register to write.
@param StartBit The ordinal of the least significant bit in the bit field.
Range 0..63.
@param EndBit The ordinal of the most significant bit in the bit field.
Range 0..63.
@param AndData The value to AND with read value from the MMIO register.
@param OrData The value to OR with the result of the AND operation.
@return The value written back to the MMIO register.
**/
UINT64
EFIAPI
MmioBitFieldAndThenOr64 (
IN UINTN Address,
IN UINTN StartBit,
IN UINTN EndBit,
IN UINT64 AndData,
IN UINT64 OrData
)
{
return MmioWrite64 (
Address,
BitFieldAndThenOr64 (MmioRead64 (Address), StartBit, EndBit, AndData, OrData)
);
}
| 30,634 |
2,151 | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.setupwizard.navigationbar;
import android.app.Activity;
import android.app.Fragment;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.os.Bundle;
import android.util.AttributeSet;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.ViewGroup;
import android.view.ViewTreeObserver;
import android.view.ViewTreeObserver.OnPreDrawListener;
import android.widget.Button;
/**
* Fragment class for controlling the custom navigation bar shown during setup wizard. Apps in the
* Android tree can use this by including the common.mk makefile. Apps outside of the tree can
* create a library project out of the source.
*/
public class SetupWizardNavBar extends Fragment implements OnPreDrawListener, OnClickListener {
private static final String TAG = "SetupWizardNavBar";
private static final int IMMERSIVE_FLAGS =
View.SYSTEM_UI_FLAG_HIDE_NAVIGATION | View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY;
private int mSystemUiFlags = IMMERSIVE_FLAGS | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION;
private ViewGroup mNavigationBarView;
private Button mNextButton;
private Button mBackButton;
private NavigationBarListener mCallback;
public interface NavigationBarListener {
public void onNavigationBarCreated(SetupWizardNavBar bar);
public void onNavigateBack();
public void onNavigateNext();
}
public SetupWizardNavBar() {
// no-arg constructor for fragments
}
@Override
public void onAttach(Activity activity) {
super.onAttach(activity);
mCallback = (NavigationBarListener) activity;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
Context context = new ContextThemeWrapper(getActivity(), getNavbarTheme());
inflater = LayoutInflater.from(context);
mNavigationBarView = (ViewGroup) inflater.inflate(R.layout.setup_wizard_navbar_layout,
container, false);
mNextButton = (Button) mNavigationBarView.findViewById(R.id.setup_wizard_navbar_next);
mBackButton = (Button) mNavigationBarView.findViewById(R.id.setup_wizard_navbar_back);
mNextButton.setOnClickListener(this);
mBackButton.setOnClickListener(this);
return mNavigationBarView;
}
@Override
public void onViewCreated(View view, Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
mCallback.onNavigationBarCreated(this);
mNavigationBarView.setSystemUiVisibility(mSystemUiFlags);
// Set the UI flags before draw because the visibility might change in unexpected /
// undetectable times, like transitioning from a finishing activity that had a keyboard
ViewTreeObserver viewTreeObserver = mNavigationBarView.getViewTreeObserver();
viewTreeObserver.addOnPreDrawListener(this);
}
@Override
public boolean onPreDraw() {
// View.setSystemUiVisibility checks if the visibility changes before applying them
// so the performance impact is contained
mNavigationBarView.setSystemUiVisibility(mSystemUiFlags);
return true;
}
/**
* Sets whether system navigation bar should be hidden.
* @param useImmersiveMode True to activate immersive mode and hide the system navigation bar
*/
public void setUseImmersiveMode(boolean useImmersiveMode) {
// By default, enable layoutHideNavigation if immersive mode is used
setUseImmersiveMode(useImmersiveMode, useImmersiveMode);
}
public void setUseImmersiveMode(boolean useImmersiveMode, boolean layoutHideNavigation) {
if (useImmersiveMode) {
mSystemUiFlags |= IMMERSIVE_FLAGS;
if (layoutHideNavigation) {
mSystemUiFlags |= View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION;
}
} else {
mSystemUiFlags &= ~(IMMERSIVE_FLAGS | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION);
}
mNavigationBarView.setSystemUiVisibility(mSystemUiFlags);
}
private int getNavbarTheme() {
// Normally we can automatically guess the theme by comparing the foreground color against
// the background color. But we also allow specifying explicitly using
// setup_wizard_navbar_theme.
TypedArray attributes = getActivity().obtainStyledAttributes(
new int[] {
R.attr.setup_wizard_navbar_theme,
android.R.attr.colorForeground,
android.R.attr.colorBackground });
int theme = attributes.getResourceId(0, 0);
if (theme == 0) {
// Compare the value of the foreground against the background color to see if current
// theme is light-on-dark or dark-on-light.
float[] foregroundHsv = new float[3];
float[] backgroundHsv = new float[3];
Color.colorToHSV(attributes.getColor(1, 0), foregroundHsv);
Color.colorToHSV(attributes.getColor(2, 0), backgroundHsv);
boolean isDarkBg = foregroundHsv[2] > backgroundHsv[2];
theme = isDarkBg ? R.style.setup_wizard_navbar_theme_dark :
R.style.setup_wizard_navbar_theme_light;
}
attributes.recycle();
return theme;
}
@Override
public void onClick(View v) {
if (v == mBackButton) {
mCallback.onNavigateBack();
} else if (v == mNextButton) {
mCallback.onNavigateNext();
}
}
public Button getBackButton() {
return mBackButton;
}
public Button getNextButton() {
return mNextButton;
}
public static class NavButton extends Button {
public NavButton(Context context, AttributeSet attrs, int defStyleAttr, int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
}
public NavButton(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
}
public NavButton(Context context, AttributeSet attrs) {
super(context, attrs);
}
public NavButton(Context context) {
super(context);
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
// The color of the button is #de000000 / #deffffff when enabled. When disabled, apply
// additional 23% alpha, so the overall opacity is 20%.
setAlpha(enabled ? 1.0f : 0.23f);
}
}
}
| 2,785 |
1,998 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import datetime
import asyncio
import warnings
from typing import List, Callable
from botbuilder.schema import (
Activity,
ActivityTypes,
ChannelAccount,
ConversationAccount,
ResourceResponse,
ConversationReference,
)
from botbuilder.core.turn_context import TurnContext
from botbuilder.core.bot_adapter import BotAdapter
class ConsoleAdapter(BotAdapter):
"""
Lets a user communicate with a bot from a console window.
:Example:
import asyncio
from botbuilder.core import ConsoleAdapter
async def logic(context):
await context.send_activity('Hello World!')
adapter = ConsoleAdapter()
loop = asyncio.get_event_loop()
if __name__ == "__main__":
try:
loop.run_until_complete(adapter.process_activity(logic))
except KeyboardInterrupt:
pass
finally:
loop.stop()
loop.close()
"""
def __init__(self, reference: ConversationReference = None):
super(ConsoleAdapter, self).__init__()
self.reference = ConversationReference(
channel_id="console",
user=ChannelAccount(id="user", name="User1"),
bot=ChannelAccount(id="bot", name="Bot"),
conversation=ConversationAccount(id="convo1", name="", is_group=False),
service_url="",
)
# Warn users to pass in an instance of a ConversationReference, otherwise the parameter will be ignored.
if reference is not None and not isinstance(reference, ConversationReference):
warnings.warn(
"ConsoleAdapter: `reference` argument is not an instance of ConversationReference and will "
"be ignored."
)
else:
self.reference.channel_id = getattr(
reference, "channel_id", self.reference.channel_id
)
self.reference.user = getattr(reference, "user", self.reference.user)
self.reference.bot = getattr(reference, "bot", self.reference.bot)
self.reference.conversation = getattr(
reference, "conversation", self.reference.conversation
)
self.reference.service_url = getattr(
reference, "service_url", self.reference.service_url
)
# The only attribute on self.reference without an initial value is activity_id, so if reference does not
# have a value for activity_id, default self.reference.activity_id to None
self.reference.activity_id = getattr(reference, "activity_id", None)
self._next_id = 0
async def process_activity(self, logic: Callable):
"""
Begins listening to console input.
:param logic:
:return:
"""
while True:
msg = input()
if msg is None:
pass
else:
self._next_id += 1
activity = Activity(
text=msg,
channel_id="console",
from_property=ChannelAccount(id="user", name="User1"),
recipient=ChannelAccount(id="bot", name="Bot"),
conversation=ConversationAccount(id="Convo1"),
type=ActivityTypes.message,
timestamp=datetime.datetime.now(),
id=str(self._next_id),
)
activity = TurnContext.apply_conversation_reference(
activity, self.reference, True
)
context = TurnContext(self, activity)
await self.run_pipeline(context, logic)
async def send_activities(self, context: TurnContext, activities: List[Activity]) -> List[ResourceResponse]:
"""
Logs a series of activities to the console.
:param context:
:param activities:
:return:
"""
if context is None:
raise TypeError(
"ConsoleAdapter.send_activities(): `context` argument cannot be None."
)
if not isinstance(activities, list):
raise TypeError(
"ConsoleAdapter.send_activities(): `activities` argument must be a list."
)
if len(activities) == 0:
raise ValueError(
"ConsoleAdapter.send_activities(): `activities` argument cannot have a length of 0."
)
async def next_activity(i: int):
responses = []
if i < len(activities):
responses.append(ResourceResponse())
activity = activities[i]
if activity.type == "delay":
await asyncio.sleep(activity.delay)
await next_activity(i + 1)
elif activity.type == ActivityTypes.message:
if (
activity.attachments is not None
and len(activity.attachments) > 0
):
append = (
"(1 attachment)"
if len(activity.attachments) == 1
else f"({len(activity.attachments)} attachments)"
)
print(f"{activity.text} {append}")
else:
print(activity.text)
await next_activity(i + 1)
else:
print(f"[{activity.type}]")
await next_activity(i + 1)
else:
return responses
await next_activity(0)
async def delete_activity(
self, context: TurnContext, reference: ConversationReference
):
"""
Not supported for the ConsoleAdapter. Calling this method or `TurnContext.delete_activity()`
will result an error being returned.
:param context:
:param reference:
:return:
"""
raise NotImplementedError("ConsoleAdapter.delete_activity(): not supported.")
async def update_activity(self, context: TurnContext, activity: Activity):
"""
Not supported for the ConsoleAdapter. Calling this method or `TurnContext.update_activity()`
will result an error being returned.
:param context:
:param activity:
:return:
"""
raise NotImplementedError("ConsoleAdapter.update_activity(): not supported.")
| 2,989 |
666 | <gh_stars>100-1000
package com.orhanobut.wasp.http;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* Make a POST request to a REST path relative to base URL
*/
@Documented
@Target(METHOD)
@Retention(RUNTIME)
@RestMethod(value = "POST", hasBody = true)
public @interface POST {
String value();
}
| 159 |
1,350 | <filename>sdk/security/azure-resourcemanager-security/src/main/java/com/azure/resourcemanager/security/models/ScanningFunctionality.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.security.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for ScanningFunctionality. */
public final class ScanningFunctionality extends ExpandableStringEnum<ScanningFunctionality> {
/** Static value ScannerDevice for ScanningFunctionality. */
public static final ScanningFunctionality SCANNER_DEVICE = fromString("ScannerDevice");
/** Static value NotScannerDevice for ScanningFunctionality. */
public static final ScanningFunctionality NOT_SCANNER_DEVICE = fromString("NotScannerDevice");
/**
* Creates or finds a ScanningFunctionality from its string representation.
*
* @param name a name to look for.
* @return the corresponding ScanningFunctionality.
*/
@JsonCreator
public static ScanningFunctionality fromString(String name) {
return fromString(name, ScanningFunctionality.class);
}
/** @return known ScanningFunctionality values. */
public static Collection<ScanningFunctionality> values() {
return values(ScanningFunctionality.class);
}
}
| 434 |
335 | <reponame>Safal08/Hacktoberfest-1
{
"word": "Naturopathy",
"definitions": [
"A system of alternative medicine based on the theory that diseases can be successfully treated or prevented without the use of drugs, by techniques such as control of diet, exercise, and massage."
],
"parts-of-speech": "Noun"
} | 106 |
2,137 | package com.publiccms.common.handler;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.context.annotation.AnnotationBeanNameGenerator;
import org.springframework.util.Assert;
public class FullBeanNameGenerator extends AnnotationBeanNameGenerator {
protected String buildDefaultBeanName(BeanDefinition definition) {
String beanClassName = definition.getBeanClassName();
Assert.state(beanClassName != null, "No bean class name set");
return beanClassName;
}
} | 179 |
303 | <reponame>rgacogne/bgpq3
#ifndef SX_SLENTRY_H_
#define SX_SLENTRY_H_
#if HAVE_SYS_QUEUE_H && HAVE_STAILQ_IN_SYS_QUEUE
#include <sys/queue.h>
#else
#include "sys_queue.h"
#endif
#if HAVE_SYS_TREE_H
#include <sys/tree.h>
#else
#include "sys_tree.h"
#endif
struct sx_slentry {
STAILQ_ENTRY(sx_slentry) next;
char* text;
};
struct sx_slentry* sx_slentry_new(char* text);
struct sx_tentry {
RB_ENTRY(sx_tentry) entry;
char* text;
};
struct sx_tentry* sx_tentry_new(char* text);
#endif
| 248 |
12,278 | //---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 <NAME> <<EMAIL>>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#define BOOST_TEST_MODULE TestInteropVTK
#include <boost/test/unit_test.hpp>
#include <vtkFloatArray.h>
#include <vtkMatrix4x4.h>
#include <vtkNew.h>
#include <vtkPoints.h>
#include <vtkSmartPointer.h>
#include <vtkUnsignedCharArray.h>
#include <boost/compute/system.hpp>
#include <boost/compute/algorithm/sort.hpp>
#include <boost/compute/container/vector.hpp>
#include <boost/compute/detail/is_contiguous_iterator.hpp>
#include <boost/compute/interop/vtk.hpp>
#include "check_macros.hpp"
#include "context_setup.hpp"
namespace compute = boost::compute;
BOOST_AUTO_TEST_CASE(bounds)
{
using compute::float4_;
// create vtk points
vtkNew<vtkPoints> points;
points->InsertNextPoint(0.0, 0.0, 0.0);
points->InsertNextPoint(1.0, 2.0, 1.0);
points->InsertNextPoint(-1.0, -3.0, -1.0);
points->InsertNextPoint(0.5, 2.5, 1.5);
// copy points to vector on gpu
compute::vector<float4_> vector(points->GetNumberOfPoints(), context);
compute::vtk_copy_points_to_buffer(points.GetPointer(), vector.begin(), queue);
// compute bounds
double bounds[6];
compute::vtk_compute_bounds(vector.begin(), vector.end(), bounds, queue);
// check bounds
BOOST_CHECK_CLOSE(bounds[0], -1.0, 1e-8);
BOOST_CHECK_CLOSE(bounds[1], 1.0, 1e-8);
BOOST_CHECK_CLOSE(bounds[2], -3.0, 1e-8);
BOOST_CHECK_CLOSE(bounds[3], 2.5, 1e-8);
BOOST_CHECK_CLOSE(bounds[4], -1.0, 1e-8);
BOOST_CHECK_CLOSE(bounds[5], 1.5, 1e-8);
}
BOOST_AUTO_TEST_CASE(copy_uchar_array)
{
// create vtk uchar vector containing 3 RGBA colors
vtkNew<vtkUnsignedCharArray> array;
array->SetNumberOfComponents(4);
unsigned char red[4] = { 255, 0, 0, 255 };
array->InsertNextTupleValue(red);
unsigned char green[4] = { 0, 255, 0, 255 };
array->InsertNextTupleValue(green);
unsigned char blue[4] = { 0, 0, 255, 255 };
array->InsertNextTupleValue(blue);
// create vector<uchar4> on device and copy values from vtk array
compute::vector<compute::uchar4_> vector(3, context);
compute::vtk_copy_data_array_to_buffer(
array.GetPointer(),
compute::make_buffer_iterator<compute::uchar_>(vector.get_buffer(), 0),
queue
);
// check values
std::vector<compute::uchar4_> host_vector(3);
compute::copy(
vector.begin(), vector.end(), host_vector.begin(), queue
);
BOOST_CHECK(host_vector[0] == compute::uchar4_(255, 0, 0, 255));
BOOST_CHECK(host_vector[1] == compute::uchar4_(0, 255, 0, 255));
BOOST_CHECK(host_vector[2] == compute::uchar4_(0, 0, 255, 255));
}
BOOST_AUTO_TEST_CASE(sort_float_array)
{
// create vtk float array
vtkNew<vtkFloatArray> array;
array->InsertNextValue(2.5f);
array->InsertNextValue(1.0f);
array->InsertNextValue(6.5f);
array->InsertNextValue(4.0f);
// create vector on device and copy values from vtk array
compute::vector<float> vector(4, context);
compute::vtk_copy_data_array_to_buffer(array.GetPointer(), vector.begin(), queue);
// sort values on the gpu
compute::sort(vector.begin(), vector.end(), queue);
CHECK_RANGE_EQUAL(float, 4, vector, (1.0f, 2.5f, 4.0f, 6.5f));
// copy sorted values back to the vtk array
compute::vtk_copy_buffer_to_data_array(
vector.begin(), vector.end(), array.GetPointer(), queue
);
BOOST_CHECK_EQUAL(array->GetValue(0), 1.0f);
BOOST_CHECK_EQUAL(array->GetValue(1), 2.5f);
BOOST_CHECK_EQUAL(array->GetValue(2), 4.0f);
BOOST_CHECK_EQUAL(array->GetValue(3), 6.5f);
}
BOOST_AUTO_TEST_SUITE_END()
| 1,620 |
543 | <filename>tests/test_functional.py
import json
import os
from unittest.mock import call, patch
import pandas as pd
import pytest
from mlblocks import MLPipeline
from mlblocks.discovery import load_pipeline
from orion import functional
from orion.core import Orion
class TestLoadData:
@classmethod
def setup_class(cls):
cls.data = pd.DataFrame({
'timestamp': list(range(100)),
'value': [1] * 100,
})
def test_load_data_df(self):
data = functional._load_data(self.data)
assert data is self.data
def test_load_data_path(self, tmpdir):
path = os.path.join(tmpdir, 'data.csv')
self.data.to_csv(path, index=False)
data = functional._load_data(path)
pd.testing.assert_frame_equal(data, self.data)
def test_load_data_none(self):
data = functional._load_data(None)
assert data is None
class TestLoadDict:
def test_load_dict_dict(self):
a_dict = {'a': 'dict'}
returned = functional._load_dict(a_dict)
assert returned is a_dict
def test_load_dict_json(self, tmpdir):
a_dict = {'a': 'dict'}
path = os.path.join(tmpdir, 'a.json')
with open(path, 'w') as json_file:
json.dump(a_dict, json_file)
returned = functional._load_dict(path)
assert returned == a_dict
def test_load_dict_none(self):
returned = functional._load_dict(None)
assert returned is None
class TestLoadOrion:
@classmethod
def setup_class(cls):
data = pd.DataFrame({
'timestamp': list(range(100)),
'value': [1] * 100,
})
cls.orion = Orion('dummy')
cls.orion.fit(data)
def test_load_orion_orion(self):
orion = functional._load_orion(self.orion)
assert orion is self.orion
def test_load_orion_pickle(self, tmpdir):
path = os.path.join(tmpdir, 'orion.pkl')
self.orion.save(path)
orion = functional._load_orion(path)
assert orion is not self.orion
assert orion == self.orion
def test_load_orion_name(self):
orion = functional._load_orion('dummy')
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert not orion._fitted
assert orion._hyperparameters is None
def test_load_orion_json_path(self, tmpdir):
pipeline = load_pipeline('dummy')
path = os.path.join(tmpdir, 'pipeline.json')
with open(path, 'w') as json_file:
json.dump(pipeline, json_file)
orion = functional._load_orion(path)
assert isinstance(orion, Orion)
assert orion._pipeline == path
assert not orion._fitted
assert orion._hyperparameters is None
def test_load_orion_dict(self):
pipeline = load_pipeline('dummy')
orion = functional._load_orion(pipeline)
assert isinstance(orion, Orion)
assert orion._pipeline == pipeline
assert not orion._fitted
assert orion._hyperparameters is None
def test_load_orion_mlpipeline(self, tmpdir):
pipeline = MLPipeline('dummy')
orion = functional._load_orion(pipeline)
assert isinstance(orion, Orion)
assert orion._pipeline == pipeline
assert not orion._fitted
assert orion._hyperparameters is None
def test_load_orion_hyperparams(self):
hyperparams = {
"orion.primitives.detectors.ThresholdDetector#1": {
"ratio": 0.9
}
}
orion = functional._load_orion('dummy', hyperparams)
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert not orion._fitted
assert orion._hyperparameters == hyperparams
def test_load_orion_invalid(self):
with pytest.raises(ValueError):
functional._load_orion('invalid')
class TestFitPipeline:
@classmethod
def setup_class(cls):
cls.data = pd.DataFrame({
'timestamp': list(range(100)),
'value': [1] * 100,
})
@patch('orion.core.Orion.DEFAULT_PIPELINE', new='dummy')
def test_fit_pipeline_default(self):
orion = functional.fit_pipeline(self.data)
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert orion._fitted
assert orion._hyperparameters is None
def test_fit_pipeline_dict(self):
pipeline = load_pipeline('dummy')
orion = functional.fit_pipeline(self.data, pipeline)
assert isinstance(orion, Orion)
assert orion._pipeline == pipeline
assert orion._fitted
assert orion._hyperparameters is None
def test_fit_pipeline_name(self):
orion = functional.fit_pipeline(self.data, 'dummy')
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert orion._fitted
assert orion._hyperparameters is None
def test_fit_pipeline_csv(self, tmpdir):
data_path = os.path.join(tmpdir, 'data.csv')
self.data.to_csv(data_path, index=False)
orion = functional.fit_pipeline(data_path, 'dummy')
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert orion._fitted
assert orion._hyperparameters is None
def test_fit_pipeline_hyperparams_dict(self):
hyperparams = {
"orion.primitives.detectors.ThresholdDetector#1": {
"ratio": 0.9
}
}
orion = functional.fit_pipeline(self.data, 'dummy', hyperparams)
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert orion._fitted
assert orion._hyperparameters == hyperparams
def test_fit_pipeline_hyperparams_json(self, tmpdir):
hyperparams = {
"orion.primitives.detectors.ThresholdDetector#1": {
"ratio": 0.9
}
}
hyperparams_path = os.path.join(tmpdir, 'hyperparams.json')
with open(hyperparams_path, 'w') as json_file:
json.dump(hyperparams, json_file)
orion = functional.fit_pipeline(self.data, 'dummy', hyperparams_path)
assert isinstance(orion, Orion)
assert orion._pipeline == 'dummy'
assert orion._fitted
assert orion._hyperparameters == hyperparams
def test_fit_pipeline_save_path(self, tmpdir):
path = os.path.join(tmpdir, 'some/path.pkl')
returned = functional.fit_pipeline(self.data, 'dummy', save_path=path)
assert returned is None
assert os.path.isfile(path)
class TestDetectAnomalies:
@classmethod
def setup_class(cls):
cls.clean = pd.DataFrame({
'timestamp': list(range(100)),
'value': [1] * 100,
})
cls.anomalous = pd.DataFrame({
'timestamp': list(range(100, 200)),
'value': [1] * 45 + [10] * 10 + [1] * 45
})
cls.events = pd.DataFrame([
{'start': 145, 'end': 155, 'severity': 9.0}
], columns=['start', 'end', 'severity'])
cls.all_data = pd.concat((cls.clean, cls.anomalous))
cls.all_events = pd.DataFrame([
{'start': 145, 'end': 155, 'severity': 4.275}
], columns=['start', 'end', 'severity'])
@patch('orion.core.Orion.DEFAULT_PIPELINE', new='dummy')
def test_detect_anomalies_fit_default(self):
anomalies = functional.detect_anomalies(
data=self.anomalous,
train_data=self.clean
)
pd.testing.assert_frame_equal(self.events, anomalies)
def test_detect_anomalies_fit_pipeline(self):
anomalies = functional.detect_anomalies(
data=self.anomalous,
pipeline='dummy',
train_data=self.clean
)
pd.testing.assert_frame_equal(self.events, anomalies)
@patch('orion.core.Orion.DEFAULT_PIPELINE', new='dummy')
def test_detect_anomalies_fit_hyperparams(self):
hyperparams = {
"orion.primitives.detectors.ThresholdDetector#1": {
"ratio": 0.9
}
}
anomalies = functional.detect_anomalies(
data=self.anomalous,
hyperparameters=hyperparams,
train_data=self.clean
)
pd.testing.assert_frame_equal(self.events, anomalies)
def test_detect_anomalies_fit_pipeine_dict(self):
pipeline = load_pipeline('dummy')
anomalies = functional.detect_anomalies(
data=self.anomalous,
pipeline=pipeline,
train_data=self.clean
)
pd.testing.assert_frame_equal(self.events, anomalies)
def test_detect_anomalies_fitted_orion(self):
orion = functional.fit_pipeline(self.clean, 'dummy')
anomalies = functional.detect_anomalies(
data=self.anomalous,
pipeline=orion,
)
pd.testing.assert_frame_equal(self.events, anomalies)
def test_detect_anomalies_saved_orion(self, tmpdir):
orion_path = os.path.join(tmpdir, 'orion.pkl')
functional.fit_pipeline(self.clean, 'dummy', save_path=orion_path)
anomalies = functional.detect_anomalies(
data=self.anomalous,
pipeline=orion_path,
)
pd.testing.assert_frame_equal(self.events, anomalies)
class TestEvaluatePipeline:
@patch('orion.functional._load_orion')
@patch('orion.functional._load_data')
def test_evaluate_pipeline_no_fit(self, load_data_mock, load_orion_mock):
load_data_mock.side_effect = lambda x: x
ret = functional.evaluate_pipeline('data', 'truth', 'pipeline', 'hyperparams', 'metrics')
load_data_calls = [
call('data'),
call('truth'),
]
assert load_data_calls == load_data_mock.call_args_list
load_orion_mock.assert_called_once_with('pipeline', 'hyperparams')
orion = load_orion_mock.return_value
orion.detect.assert_called_once_with('data', 'truth', False, None, 'metrics')
assert ret == orion.detect.return_value
@patch('orion.functional._load_orion')
@patch('orion.functional._load_data')
def test_evaluate_pipeline_fit(self, load_data_mock, load_orion_mock):
load_data_mock.side_effect = lambda x: x
ret = functional.evaluate_pipeline(
'data', 'truth', 'pipeline', 'hyperparams', 'metrics', 'train_data')
load_data_calls = [
call('data'),
call('truth'),
call('train_data'),
]
assert load_data_calls == load_data_mock.call_args_list
load_orion_mock.assert_called_once_with('pipeline', 'hyperparams')
orion = load_orion_mock.return_value
orion.detect.assert_called_once_with('data', 'truth', True, 'train_data', 'metrics')
assert ret == orion.detect.return_value
| 5,023 |
6,240 | {
"schema-version": "0.2",
"changes": [
{
"category": "packaging",
"description": "Fix packaging multiple local directories as dependencies\n(`#1047 <https://github.com/aws/chalice/pull/1047>`__)",
"type": "bugfix"
},
{
"category": "event-source",
"description": "Add support for passing SNS ARNs to ``on_sns_message``\n(`#1048 <https://github.com/aws/chalice/pull/1048>`__)",
"type": "feature"
},
{
"category": "blueprint",
"description": "Add support for Blueprints\n(`#1023 <https://github.com/aws/chalice/pull/1023>`__)",
"type": "feature"
},
{
"category": "config",
"description": "Add support for opting-in to experimental features\n(`#1053 <https://github.com/aws/chalice/pull/1053>`__)",
"type": "feature"
},
{
"category": "event-source",
"description": "Provide Lambda context in event object\n(`#856 <https://github.com/aws/chalice/issues/856>`__)",
"type": "feature"
}
]
}
| 434 |
831 | /*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.profilers.memory.adapters.classifiers;
import static com.android.tools.profilers.memory.adapters.ClassDb.INVALID_CLASS_ID;
import com.android.tools.adtui.model.filter.Filter;
import com.android.tools.profilers.memory.adapters.ClassDb;
import com.android.tools.profilers.memory.adapters.InstanceObject;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Classifies {@link InstanceObject}s based on their {@link Class}.
*/
public class ClassSet extends ClassifierSet {
public static final ClassSet EMPTY_SET = new ClassSet(new ClassDb.ClassEntry(INVALID_CLASS_ID, INVALID_CLASS_ID, "null"));
@NotNull private final ClassDb.ClassEntry myClassEntry;
@NotNull
public static Classifier createDefaultClassifier() {
return new ClassClassifier();
}
public ClassSet(@NotNull ClassDb.ClassEntry classEntry) {
super(classEntry.getSimpleClassName());
myClassEntry = classEntry;
}
@NotNull
public ClassDb.ClassEntry getClassEntry() {
return myClassEntry;
}
@NotNull
@Override
public Classifier createSubClassifier() {
// Do nothing, as this is a leaf node (presently).
return Classifier.IDENTITY_CLASSIFIER;
}
@Override
protected void applyFilter(@NotNull Filter filter, boolean hasMatchedAncestor, boolean filterChanged) {
if (!filterChanged && !myNeedsRefiltering) {
return;
}
myIsMatched = matches(filter);
myFilterMatchCount = myIsMatched ? 1 : 0;
myIsFiltered = filter != null && !myIsMatched && !hasMatchedAncestor;
myNeedsRefiltering = false;
}
@Override
protected boolean matches(@NotNull Filter filter) {
return filter.matches(myClassEntry.getClassName());
}
private static final class ClassClassifier extends Classifier {
@NotNull private final Map<ClassDb.ClassEntry, ClassSet> myClassMap = new LinkedHashMap<>();
@Nullable
@Override
public ClassifierSet getClassifierSet(@NotNull InstanceObject instance, boolean createIfAbsent) {
ClassDb.ClassEntry classEntry = instance.getClassEntry();
ClassSet classSet = myClassMap.get(classEntry);
if (classSet == null && createIfAbsent) {
classSet = new ClassSet(classEntry);
myClassMap.put(classEntry, classSet);
}
return classSet;
}
@NotNull
@Override
public List<ClassifierSet> getFilteredClassifierSets() {
return myClassMap.values().stream().filter(child -> !child.getIsFiltered()).collect(Collectors.toList());
}
@NotNull
@Override
protected List<ClassifierSet> getAllClassifierSets() {
return myClassMap.values().stream().collect(Collectors.toList());
}
}
}
| 1,104 |
389 | <filename>gosu-core-api/src/main/java/gw/lang/reflect/ITypeRefFactory.java
/*
* Copyright 2014 <NAME>, Inc.
*/
package gw.lang.reflect;
import java.util.List;
public interface ITypeRefFactory
{
public static final String SYSTEM_PROXY_SUFFIX = "_Proxy";
public static final String USER_PROXY_SUFFIX = "_TypeProxy";
ITypeRef create( IType type );
ITypeRef get( IType type );
ITypeRef get( String strTypeName );
void clearCaches();
boolean isClearing();
List<String> getTypesWithPrefix(String namespace, String prefix);
List<ITypeRef> getSubordinateRefs(String topLevelTypeName);
}
| 200 |
1,398 | # -*- coding: utf-8 -*-
import mock
import completor
import re
from completers.common import Omni # noqa
from completor.compat import to_unicode
def test_has_omnifunc(vim_mod):
vim_mod.vars = {
'completor_css_omni_trigger': br'([\w-]+|@[\w-]*|[\w-]+:\s*[\w-]*)$'
}
vim_mod.current.buffer.options['omnifunc'] = b''
omni = completor.get('omni')
assert omni.has_omnifunc('css') is False
omni.trigger_cache = {}
vim_mod.current.buffer.options['omnifunc'] = b'csscomplete#CompleteCSS'
assert omni.has_omnifunc('css') is True
def test_on_data(vim_mod):
omnifunc = mock.Mock()
vim_mod.current.buffer.options['omnifunc'] = b'csscomplete#CompleteCSS'
vim_mod.funcs[b'csscomplete#CompleteCSS'] = omnifunc
vim_mod.current.window.cursor = (1, 2)
vim_mod.buffers = []
omni = completor.get('omni')
omni.trigger_cache = {}
omni.ft = 'css'
assert omni.on_data(b'complete', b'text') == []
omni.trigger_cache = {
'css': re.compile(r'([\w-]+|@[\w-]*|[\w-]+:\s*[\w-]*)$', re.X | re.U)}
omnifunc.side_effect = [1, [b'text-transform']]
assert omni.on_data(b'complete', b'#') == []
omnifunc.side_effect = [0, [b'text-transform']]
vim_mod.current.window.cursor = (1, 2)
omni.input_data = 'text'
assert omni.on_data(b'complete', b'text') == [ {'word': b'text-transform', 'offset': 0}] # noqa
omnifunc.assert_called_with(0, b'text')
omnifunc.side_effect = [17, [b'text-transform']]
vim_mod.current.window.cursor = (1, 2)
omni.input_data = to_unicode('które się nią opiekują', 'utf-8')
omni.on_data(b'complete', omni.input_data)
omnifunc.assert_called_with(0, b'opiekuj\xc4\x85')
| 783 |
462 | <gh_stars>100-1000
#include <stdint.h>
#include <gb/drawing.h>
/* Print a long number in any radix */
extern char *digits;
void gprintln(int16_t number, int8_t radix, int8_t signed_value) NONBANKED
{
uint16_t l;
if(number < 0 && signed_value) {
wrtchr('-');
number = -number;
}
if((l = (uint16_t)number / (uint16_t)radix) != 0)
gprintln(l, radix, UNSIGNED);
wrtchr(digits[(uint16_t)number % (uint16_t)radix]);
}
| 196 |
2,279 | <filename>ime/addons/src/main/java/com/anysoftkeyboard/addons/Support.java
package com.anysoftkeyboard.addons;
import android.content.Context;
import android.content.res.Resources;
import android.util.SparseIntArray;
import androidx.annotation.NonNull;
import com.anysoftkeyboard.base.utils.Logger;
import java.util.ArrayList;
import java.util.List;
class Support {
private static final String TAG = Support.class.getName();
/**
* Creates a mapping between the local styleable and the remote. NOTE: the return value may be
* in a different length, this can happen if a certain attr is not available in the
* remote-context, and therefore can not be queried.
*
* @param localStyleableArray the local styleable to map against
* @param localContext local APK's Context
* @param remoteContext remote package's Context
* @param attributeIdMap a mapping between the remote-id -> local-id
* @return Always returns the remote version of localStyleableArray
*/
public static int[] createBackwardCompatibleStyleable(
@NonNull int[] localStyleableArray,
@NonNull Context localContext,
@NonNull Context remoteContext,
@NonNull SparseIntArray attributeIdMap) {
final String remotePackageName = remoteContext.getPackageName();
if (localContext.getPackageName().equals(remotePackageName)) {
Logger.d(
TAG,
"This is a local context ("
+ remotePackageName
+ "), optimization will be done.");
// optimization
for (int attrId : localStyleableArray) {
attributeIdMap.put(attrId, attrId);
}
return localStyleableArray;
}
final Resources localRes = localContext.getResources();
final Resources remoteRes = remoteContext.getResources();
List<Integer> styleableIdList = new ArrayList<>(localStyleableArray.length);
for (int attrId : localStyleableArray) {
final boolean isAndroidAttribute =
localRes.getResourcePackageName(attrId).equals("android");
final int remoteAttrId;
if (isAndroidAttribute) {
// android attribute IDs are the same always. So, I can optimize.
remoteAttrId = attrId;
} else {
final String attributeName = localRes.getResourceEntryName(attrId);
remoteAttrId = remoteRes.getIdentifier(attributeName, "attr", remotePackageName);
Logger.d(
TAG,
"attr "
+ attributeName
+ ", local id "
+ attrId
+ ", remote id "
+ remoteAttrId);
}
if (remoteAttrId != 0) {
attributeIdMap.put(remoteAttrId, attrId);
styleableIdList.add(remoteAttrId);
}
}
final int[] remoteMappedStyleable = new int[styleableIdList.size()];
for (int i = 0; i < remoteMappedStyleable.length; i++) {
remoteMappedStyleable[i] = styleableIdList.get(i);
}
return remoteMappedStyleable;
}
}
| 1,506 |
2,729 |
#pragma once
#include "arithmetics.hpp"
namespace msdfgen {
typedef unsigned char byte;
inline byte pixelFloatToByte(float x) {
return byte(clamp(256.f*x, 255.f));
}
inline float pixelByteToFloat(byte x) {
return 1.f/255.f*float(x);
}
}
| 106 |
1,338 | /*
* EncryptionUtils.h
* Copyright (C) 2019 <NAME> <<EMAIL>>
*
* Distributed under terms of the MIT license.
*/
#ifndef ENCRYPTIONUTILS_H
#define ENCRYPTIONUTILS_H
const char* EncryptionType(const char* path);
#endif /* !ENCRYPTIONUTILS_H */
| 96 |
333 | <filename>src/main/java/com/alipay/api/domain/OuterTargetingItem.java<gh_stars>100-1000
package com.alipay.api.domain;
import java.util.List;
import com.alipay.api.AlipayObject;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.internal.mapping.ApiListField;
/**
* 单元定向
*
* @author auto create
* @since 1.0, 2019-10-16 10:23:25
*/
public class OuterTargetingItem extends AlipayObject {
private static final long serialVersionUID = 7196911786156722266L;
/**
* 定向类型:
REGION_LIST: 实时地址
AD_POS_LIST: 广告位定向
OUTER_KOUBEI_INTEREST_TAG_LIST: 口碑偏好
OUTER_KOUBEI_CROWD_TAG_LIST:口碑人群
*/
@ApiField("type")
private String type;
/**
* 定向值
*/
@ApiListField("value_list")
@ApiField("string")
private List<String> valueList;
public String getType() {
return this.type;
}
public void setType(String type) {
this.type = type;
}
public List<String> getValueList() {
return this.valueList;
}
public void setValueList(List<String> valueList) {
this.valueList = valueList;
}
}
| 515 |
1,389 | <reponame>zbmain/PGL
#-*- coding: utf-8 -*-
from .dataset import Dataset, StreamDataset, HadoopDataset
from .dataloader import Dataloader
| 59 |
377 | <reponame>platschi/trustroots<filename>public/locales/id/contacts.json
{
"Cancel": "Batal",
"(additional {{count}} pending)": "(tambahan {{count}} menunggu keputusan)",
"{{count}} contacts in common": "{{count}} kontak yang serupa",
"{{count}} contacts": "{{count}} kontak",
"You received a contact request.": "Anda menerima permintaan kontak.",
"Yes, revoke request": "Ya, cabut permintaan",
"Yes, remove contact": "Ya, hapus kontak",
"Yes, decline request": "Ya, tolak permintaan",
"Wait a moment…": "Tunggu sebentar…",
"Since {{created, MMM D, YYYY}}": "Sejak {{created, MMM D, YYYY}}",
"Search contacts": "Cari kontak",
"Revoke contact request?": "Cabut permintaan kontak?",
"Revoke Request": "Cabut Permintaan",
"Requested {{created, MMM D, YYYY}}": "Diminta {{created, MMM D, YYYY}}",
"Remove contact?": "Hapus kontak?",
"No contacts yet.": "Belum ada kontak.",
"Lives in <2>{{locationLiving}}</2>": "Tinggal di <2>{{locationLiving}}</2>",
"From <2>{{locationFrom}}</2>": "Dari <2>{{locationFrom}}</2>",
"Decline contact request?": "Tolak permintaan kontak?",
"Decline Request": "Tolak Permintaan",
"Contact request sent and pending.": "Permintaan kontak terkirim dan menunggu keputusan.",
"Connected since {{created, MMM D, YYYY}}": "Terhubung sejak {{created, MMM D, YYYY}}",
"Confirm Request": "Terima Permintaan",
"Since {{created, LL}}": "Sejak {{created, LL}}",
"Requested {{date, LL}}": "Diminta {{date, LL}}",
"Requested {{created, LL}}": "Diminta {{created, LL}}",
"Connected since {{date, LL}}": "Terhubung sejak {{date, LL}}"
}
| 668 |
391 | package com.sparrowwallet.sparrow.io;
import com.sparrowwallet.drongo.wallet.WalletModel;
public interface Export {
String getName();
WalletModel getWalletModel();
}
| 59 |
777 | <filename>cc/output/begin_frame_args_unittest.cc
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string>
#include "cc/output/begin_frame_args.h"
#include "cc/test/begin_frame_args_test.h"
#include "testing/gtest/include/gtest/gtest-spi.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cc {
namespace {
TEST(BeginFrameArgsTest, Helpers) {
// Quick create methods work
BeginFrameArgs args0 =
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 0, 1);
EXPECT_TRUE(args0.IsValid()) << args0;
BeginFrameArgs args1 =
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 0, 1, 0, 0, -1);
EXPECT_FALSE(args1.IsValid()) << args1;
BeginFrameArgs args2 =
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 10, 1, 2, 3);
EXPECT_TRUE(args2.IsValid()) << args2;
EXPECT_EQ(123u, args2.source_id);
EXPECT_EQ(10u, args2.sequence_number);
EXPECT_EQ(1, args2.frame_time.ToInternalValue());
EXPECT_EQ(2, args2.deadline.ToInternalValue());
EXPECT_EQ(3, args2.interval.ToInternalValue());
EXPECT_EQ(BeginFrameArgs::NORMAL, args2.type);
BeginFrameArgs args4 = CreateBeginFrameArgsForTesting(
BEGINFRAME_FROM_HERE, 234, 20, 1, 2, 3, BeginFrameArgs::MISSED);
EXPECT_TRUE(args4.IsValid()) << args4;
EXPECT_EQ(234u, args4.source_id);
EXPECT_EQ(20u, args4.sequence_number);
EXPECT_EQ(1, args4.frame_time.ToInternalValue());
EXPECT_EQ(2, args4.deadline.ToInternalValue());
EXPECT_EQ(3, args4.interval.ToInternalValue());
EXPECT_EQ(BeginFrameArgs::MISSED, args4.type);
// operator==
EXPECT_EQ(
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 20, 4, 5, 6),
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 20, 4, 5, 6));
EXPECT_NONFATAL_FAILURE(
EXPECT_EQ(CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 7,
8, 9, BeginFrameArgs::MISSED),
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 7,
8, 9)),
"");
EXPECT_NONFATAL_FAILURE(
EXPECT_EQ(CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 4,
5, 6),
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 7,
8, 9)),
"");
EXPECT_NONFATAL_FAILURE(
EXPECT_EQ(CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 7,
8, 9),
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 40, 7,
8, 9)),
"");
EXPECT_NONFATAL_FAILURE(
EXPECT_EQ(CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 123, 30, 7,
8, 9),
CreateBeginFrameArgsForTesting(BEGINFRAME_FROM_HERE, 234, 30, 7,
8, 9)),
"");
// operator<<
std::stringstream out1;
out1 << args1;
EXPECT_EQ("BeginFrameArgs(NORMAL, 0, 1, 0, 0, -1us)", out1.str());
std::stringstream out2;
out2 << args2;
EXPECT_EQ("BeginFrameArgs(NORMAL, 123, 10, 1, 2, 3us)", out2.str());
// PrintTo
EXPECT_EQ(std::string("BeginFrameArgs(NORMAL, 0, 1, 0, 0, -1us)"),
::testing::PrintToString(args1));
EXPECT_EQ(std::string("BeginFrameArgs(NORMAL, 123, 10, 1, 2, 3us)"),
::testing::PrintToString(args2));
}
TEST(BeginFrameArgsTest, Create) {
// BeginFrames are not valid by default
BeginFrameArgs args1;
EXPECT_FALSE(args1.IsValid()) << args1;
EXPECT_TRUE(args1.on_critical_path);
BeginFrameArgs args2 = BeginFrameArgs::Create(
BEGINFRAME_FROM_HERE, 123, 10, base::TimeTicks::FromInternalValue(1),
base::TimeTicks::FromInternalValue(2),
base::TimeDelta::FromInternalValue(3), BeginFrameArgs::NORMAL);
EXPECT_TRUE(args2.IsValid()) << args2;
EXPECT_EQ(123u, args2.source_id) << args2;
EXPECT_EQ(10u, args2.sequence_number) << args2;
EXPECT_EQ(1, args2.frame_time.ToInternalValue()) << args2;
EXPECT_EQ(2, args2.deadline.ToInternalValue()) << args2;
EXPECT_EQ(3, args2.interval.ToInternalValue()) << args2;
EXPECT_EQ(BeginFrameArgs::NORMAL, args2.type) << args2;
}
#ifndef NDEBUG
TEST(BeginFrameArgsTest, Location) {
tracked_objects::Location expected_location = BEGINFRAME_FROM_HERE;
BeginFrameArgs args = CreateBeginFrameArgsForTesting(expected_location, 0, 1);
EXPECT_EQ(expected_location.ToString(), args.created_from.ToString());
}
#endif
} // namespace
} // namespace cc
| 2,182 |
337 | package com.edotassi.amazmod.update;
import java.io.File;
public interface Updater {
void updateCheckFailed();
void updateAvailable(int version);
void updateDownloadProgress(String filename, int progress);
void updateDownloadFailed();
void updateDownloadCompleted(File updateFile, String filename);
}
| 94 |
446 | <gh_stars>100-1000
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ratis.examples.arithmetic.cli;
import org.apache.ratis.client.RaftClient;
import org.apache.ratis.conf.Parameters;
import org.apache.ratis.conf.RaftProperties;
import org.apache.ratis.examples.common.SubCommandBase;
import org.apache.ratis.grpc.GrpcFactory;
import org.apache.ratis.protocol.ClientId;
import org.apache.ratis.protocol.RaftGroup;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import java.io.IOException;
/**
* Client to connect arithmetic example cluster.
*/
public abstract class Client extends SubCommandBase {
@Override
public void run() throws Exception {
RaftProperties raftProperties = new RaftProperties();
final RaftGroup raftGroup = RaftGroup.valueOf(RaftGroupId.valueOf(ByteString.copyFromUtf8(getRaftGroupId())),
getPeers());
RaftClient.Builder builder =
RaftClient.newBuilder().setProperties(raftProperties);
builder.setRaftGroup(raftGroup);
builder.setClientRpc(new GrpcFactory(new Parameters()).newRaftClientRpc(ClientId.randomId(), raftProperties));
RaftClient client = builder.build();
operation(client);
}
protected abstract void operation(RaftClient client) throws IOException;
}
| 625 |
512 | import responses
from binance.spot import Spot as Client
from tests.util import mock_http_response
from tests.util import random_str
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
def test_subscribe_blvt_without_tokenName():
"""Tests the API endpoint to subscribe BLVT without tokenName"""
client = Client(key, secret)
client.subscribe_blvt.when.called_with("", "10").should.throw(
ParameterRequiredError
)
def test_subscribe_blvt_without_cost():
"""Tests the API endpoint to subscribe BLVT without cost"""
client = Client(key, secret)
client.subscribe_blvt.when.called_with("BTCUP", "").should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.POST, "/sapi/v1/blvt/subscribe\\?tokenName=BTCUP&cost=10", mock_item, 200
)
def test_subscribe_blvt():
"""Tests the API endpoint to subscribe BLVT"""
client = Client(key, secret)
response = client.subscribe_blvt("BTCUP", "10")
response.should.equal(mock_item)
| 383 |
1,010 | /*
* The Shadow Simulator
* See LICENSE for licensing information
*/
#ifndef SRC_MAIN_HOST_STATUS_LISTENER_H_
#define SRC_MAIN_HOST_STATUS_LISTENER_H_
#include "main/host/status.h"
/* Opaque object to store the state needed to implement the module. */
typedef struct _StatusListener StatusListener;
/* Indicates when the listener should trigger a callback, i.e.,
* when the status bits that we are monitoring flip from off to on,
* from on to off, always (on any flip), or never. */
typedef enum _StatusListenerFilter StatusListenerFilter;
enum _StatusListenerFilter {
SLF_NEVER,
SLF_OFF_TO_ON,
SLF_ON_TO_OFF,
SLF_ALWAYS
};
/* Function definitions used by the module. */
typedef void (*StatusCallbackFunc)(void* callbackObject, void* callbackArgument);
typedef void (*StatusObjectFreeFunc)(void* data);
typedef void (*StatusArgumentFreeFunc)(void* data);
/* Create an object that can be set to listen to a status
* and execute a callback whenever a state transition (bit flips) occurs
* on one of the status bits that are requested in setMonitorStatus.
* Note that the callback will never be called unless setMonitorStatus is first
* used to specify which status bits this listener should monitor. */
StatusListener* statuslistener_new(StatusCallbackFunc notifyFunc, void* callbackObject,
StatusObjectFreeFunc objectFreeFunc, void* callbackArgument,
StatusArgumentFreeFunc argumentFreeFunc);
/* Increment the reference count for this listener. */
void statuslistener_ref(StatusListener* listener);
/* Decrement the reference count and free the listener if no refs remain. */
void statuslistener_unref(StatusListener* listener);
/* Called when a transition (bit flip) occurred on
* at least one of its status bits. (This function should only be called
* by status owners, i.e., the descriptor or futex base classes.)
* If this listener is monitoring (via setMonitorStatus) any of the status bits
* that just transitioned, then this function will trigger a notification
* via the callback supplied to the new func.*/
void statuslistener_onStatusChanged(StatusListener* listener, Status currentStatus,
Status transitions);
/* Set the status bits that we should monitor for transitions (flips),
* and a filter that specifies which flips should cause the callback
* to be invoked. */
void statuslistener_setMonitorStatus(StatusListener* listener, Status status,
StatusListenerFilter filter);
#endif /* SRC_MAIN_HOST_STATUS_LISTENER_H_ */
| 810 |
359 | <gh_stars>100-1000
/*
Copyright 2018 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#import "MXHTTPClient.h"
/**
The `MXHTTPClient_Private` extension exposes internal operations like methods
required for testing.
Note: These methods are too intrusive. We should implement our own NSURLProtocol
a la OHHTTPStubs and manage only a delay, not stub data.
The issue is that AFNetworking does not use the shared NSURLSessionConfiguration so
that [NSURLProtocol registerClass:] does not work which makes things longer to implement.
FTR, OHHTTPStubs solves that by doing some swizzling (https://github.com/AliSoftware/OHHTTPStubs/blob/c7c96546db35d5bb15f027b42b9208e57f6c4289/OHHTTPStubs/Sources/NSURLSession/OHHTTPStubs%2BNSURLSessionConfiguration.m#L54).
*/
@interface MXHTTPClient ()
/**
Set a delay in the reponse of requests containing `string` in their path.
@param delayMs the delay in milliseconds. 0 to remove it.
@param string a pattern in the request path.
*/
+ (void)setDelay:(NSUInteger)delayMs toRequestsContainingString:(NSString*)string;
/**
Remove all created delays.
*/
+ (void)removeAllDelays;
@end
| 451 |
36,552 |
// Copyright 2020 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_CORE_LIB_SECURITY_AUTHORIZATION_CEL_AUTHORIZATION_ENGINE_H
#define GRPC_CORE_LIB_SECURITY_AUTHORIZATION_CEL_AUTHORIZATION_ENGINE_H
#include <grpc/support/port_platform.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "envoy/config/rbac/v3/rbac.upb.h"
#include "google/api/expr/v1alpha1/syntax.upb.h"
#include "upb/upb.hpp"
#include <grpc/support/log.h>
#include "src/core/lib/security/authorization/evaluate_args.h"
#include "src/core/lib/security/authorization/mock_cel/activation.h"
namespace grpc_core {
// CelAuthorizationEngine makes an AuthorizationDecision to ALLOW or DENY the
// current action based on the condition fields in provided RBAC policies.
// The engine may be constructed with one or two policies. If two polcies,
// the first policy is deny-if-matched and the second is allow-if-matched.
// The engine returns UNDECIDED decision if it fails to find a match in any
// policy. This engine ignores the principal and permission fields in RBAC
// policies. It is the caller's responsibility to provide RBAC policies that
// are compatible with this engine.
//
// Example:
// CelAuthorizationEngine* engine =
// CelAuthorizationEngine::CreateCelAuthorizationEngine(rbac_policies);
// engine->Evaluate(evaluate_args); // returns authorization decision.
class CelAuthorizationEngine {
public:
// rbac_policies must be a vector containing either a single policy of any
// kind, or one deny policy and one allow policy, in that order.
static std::unique_ptr<CelAuthorizationEngine> CreateCelAuthorizationEngine(
const std::vector<envoy_config_rbac_v3_RBAC*>& rbac_policies);
// Users should use the CreateCelAuthorizationEngine factory function
// instead of calling the CelAuthorizationEngine constructor directly.
explicit CelAuthorizationEngine(
const std::vector<envoy_config_rbac_v3_RBAC*>& rbac_policies);
// TODO(<EMAIL>): add an Evaluate member function.
private:
enum Action {
kAllow,
kDeny,
};
std::unique_ptr<mock_cel::Activation> CreateActivation(
const EvaluateArgs& args);
std::map<const std::string, const google_api_expr_v1alpha1_Expr*>
deny_if_matched_;
std::map<const std::string, const google_api_expr_v1alpha1_Expr*>
allow_if_matched_;
upb::Arena arena_;
absl::flat_hash_set<std::string> envoy_attributes_;
absl::flat_hash_set<std::string> header_keys_;
std::unique_ptr<mock_cel::CelMap> headers_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_SECURITY_AUTHORIZATION_CEL_AUTHORIZATION_ENGINE_H */
| 1,049 |
435 | {
"description": "Blueshoe was founded in 2014. Since the beginning of our company we used Python and the web framework Django to build web pages and applications. With respect to technology, there have been many ups and downs in the past 2 \u00bd years. This is an experience report about building a startup with Django.",
"language": "eng",
"recorded": "2017-05-28",
"related_urls": [
{
"label": "schedule",
"url": "https://pyconweb.com/#schedule"
}
],
"speakers": [
"<NAME>"
],
"tags": [
"django"
],
"thumbnail_url": "https://i.ytimg.com/vi/_GEAYcmkJhw/hqdefault.jpg",
"title": "Building a startup with Django",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=_GEAYcmkJhw"
}
]
}
| 289 |
791 | #include "editing_system.h"
#include <core/graphics/texture.h>
#include <core/system/subsystem.h>
#include <runtime/assets/asset_manager.h>
#include <runtime/ecs/components/audio_listener_component.h>
#include <runtime/ecs/components/camera_component.h>
#include <runtime/ecs/components/transform_component.h>
#include <runtime/ecs/constructs/utils.h>
#include <runtime/rendering/material.h>
#include <runtime/rendering/mesh.h>
#include <runtime/rendering/render_window.h>
#include <runtime/system/events.h>
namespace editor
{
editing_system::editing_system()
{
auto& am = core::get_subsystem<runtime::asset_manager>();
icons["translate"] = am.load<gfx::texture>("editor:/data/icons/translate.png").get();
icons["rotate"] = am.load<gfx::texture>("editor:/data/icons/rotate.png").get();
icons["scale"] = am.load<gfx::texture>("editor:/data/icons/scale.png").get();
icons["local"] = am.load<gfx::texture>("editor:/data/icons/local.png").get();
icons["global"] = am.load<gfx::texture>("editor:/data/icons/global.png").get();
icons["play"] = am.load<gfx::texture>("editor:/data/icons/play.png").get();
icons["pause"] = am.load<gfx::texture>("editor:/data/icons/pause.png").get();
icons["stop"] = am.load<gfx::texture>("editor:/data/icons/stop.png").get();
icons["next"] = am.load<gfx::texture>("editor:/data/icons/next.png").get();
icons["material"] = am.load<gfx::texture>("editor:/data/icons/material.png").get();
icons["mesh"] = am.load<gfx::texture>("editor:/data/icons/mesh.png").get();
icons["export"] = am.load<gfx::texture>("editor:/data/icons/export.png").get();
icons["grid"] = am.load<gfx::texture>("editor:/data/icons/grid.png").get();
icons["wireframe"] = am.load<gfx::texture>("editor:/data/icons/wireframe.png").get();
icons["prefab"] = am.load<gfx::texture>("editor:/data/icons/prefab.png").get();
icons["scene"] = am.load<gfx::texture>("editor:/data/icons/scene.png").get();
icons["shader"] = am.load<gfx::texture>("editor:/data/icons/shader.png").get();
icons["loading"] = am.load<gfx::texture>("editor:/data/icons/loading.png").get();
icons["folder"] = am.load<gfx::texture>("editor:/data/icons/folder.png").get();
icons["animation"] = am.load<gfx::texture>("editor:/data/icons/animation.png").get();
icons["sound"] = am.load<gfx::texture>("editor:/data/icons/sound.png").get();
}
void editing_system::save_editor_camera()
{
if(camera)
ecs::utils::save_entity_to_file(fs::resolve_protocol("app:/settings/editor_camera.cfg"), camera);
}
void editing_system::load_editor_camera()
{
runtime::entity object;
if(!ecs::utils::try_load_entity_from_file(fs::resolve_protocol("app:/settings/editor_camera.cfg"),
object))
{
auto& ecs = core::get_subsystem<runtime::entity_component_system>();
object = ecs.create();
}
object.set_name("EDITOR CAMERA");
if(!object.has_component<transform_component>())
{
auto transform_comp = object.assign<transform_component>().lock();
transform_comp->set_local_position({0.0f, 2.0f, -5.0f});
}
if(!object.has_component<camera_component>())
{
object.assign<camera_component>();
}
if(!object.has_component<audio_listener_component>())
{
object.assign<audio_listener_component>();
}
camera = object;
}
void editing_system::select(rttr::variant object)
{
selection_data.object = object;
}
void editing_system::unselect()
{
selection_data = {};
imguizmo::enable(false);
imguizmo::enable(true);
}
void editing_system::close_project()
{
save_editor_camera();
unselect();
scene.clear();
}
}
| 1,301 |
1,253 | #include <bits/stdc++.h>
using namespace std;
const int MAX_NODES = 1000000;//maximum nodes in the trie (the sum of lengths of our dictiory words is a common upperbound)
const int ALPHABET_SIZE = 26;//Implementation works for english lowercase letters
int cntNodes = 1;//the root counts as one node (the node 1). The node 0 is left as null reference
int nodes[ MAX_NODES ][ ALPHABET_SIZE ];
int fail[ MAX_NODES ];
vector< vector< int > > matches( MAX_NODES );//matches[i] contains the indexes of patterns matched when reach the i node.
string t, p;//string t is the text when the patterns are going to be search
int k;//number of patterns
vector<string> patterns;
void insert( int node, int idx, string pat ){
int cur = node;
char next;
for( int i = 0; i < pat.size(); i++ ){
next = pat[i] - 'a';
if( !nodes[cur][next] ) nodes[cur][next] = ++cntNodes;
cur = nodes[cur][next];
}
matches[ cur ].push_back( idx );
}
//Read k patterns and inserts each of them in a Trie
void buildTrie(){
for( int i = 0; i < k; i++ ){
cin >> p;
patterns.push_back( p );
insert( 1, i, p );
}
}
int move( int curNode, int next ){
while( !nodes[curNode][next] )
curNode = fail[ curNode ];
return nodes[curNode][next];
}
//Do a BFS to build the fail function for the Trie nodes
void buildAho(){
queue<int> q;
for( int i = 0; i < ALPHABET_SIZE; i++ ){
if( !nodes[1][i] )
nodes[1][i] = 1;//sentinel
else{
fail[ nodes[1][i] ] = 1;
q.push( nodes[1][i] );
}
}
while( !q.empty() ){
int curNode = q.front(); q.pop();
int failCh;
for( int i = 0; i < ALPHABET_SIZE; i++ ){
if( !nodes[curNode][i] ) continue;
failCh = fail[ nodes[curNode][i] ] = move( fail[ curNode ], i );
if( matches[ failCh ].size() ){
matches[ nodes[curNode][i] ].insert( matches[ nodes[curNode][i] ].end(), matches[ failCh ].begin(), matches[ failCh ].end() );
}
q.push( nodes[curNode][i] );
}
}
}
//Search for patterns matching in string t
void match( ){
int curNode = 1;
for( int i = 0; i < t.size(); i++ ){
curNode = move( curNode, t[i] - 'a' );
for( int j = 0; j < matches[curNode].size(); j++ ){
cout << "Pattern " << patterns[ matches[curNode][j] ];
cout << " find in range [" << (i - patterns[ matches[curNode][j] ].size() + 1 ) << "-" << i << "]\n";
}
}
}
int main(){
cin >> k;//read number of patterns
buildTrie();//read k patterns and buildTrie with them
buildAho();//build fail function for trie nodes
cin >> t;//read text to search for patterns matching
match();//search and print matches
}
| 990 |
3,651 | /**
* Copyright 2010-2016 OrientDB LTD (http://orientdb.com)
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
* <p>For more information: http://www.orientdb.com
*/
package com.orientechnologies.spatial;
import com.orientechnologies.common.serialization.types.OBinarySerializer;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.storage.impl.local.paginated.wal.OWALChanges;
import java.nio.ByteBuffer;
/** Created by <NAME> on 04/09/15. */
public class OLuceneMockSpatialSerializer implements OBinarySerializer<ODocument> {
protected static OLuceneMockSpatialSerializer INSTANCE = new OLuceneMockSpatialSerializer();
protected OLuceneMockSpatialSerializer() {}
@Override
public int getObjectSize(ODocument object, Object... hints) {
return 0;
}
@Override
public int getObjectSize(byte[] stream, int startPosition) {
return 0;
}
@Override
public void serialize(ODocument object, byte[] stream, int startPosition, Object... hints) {}
@Override
public ODocument deserialize(byte[] stream, int startPosition) {
return null;
}
@Override
public byte getId() {
return -10;
}
@Override
public boolean isFixedLength() {
return false;
}
@Override
public int getFixedLength() {
return 0;
}
@Override
public void serializeNativeObject(
ODocument object, byte[] stream, int startPosition, Object... hints) {}
@Override
public ODocument deserializeNativeObject(byte[] stream, int startPosition) {
return null;
}
@Override
public int getObjectSizeNative(byte[] stream, int startPosition) {
return 0;
}
@Override
public ODocument preprocess(ODocument value, Object... hints) {
return null;
}
@Override
public void serializeInByteBufferObject(ODocument object, ByteBuffer buffer, Object... hints) {}
@Override
public ODocument deserializeFromByteBufferObject(ByteBuffer buffer) {
return null;
}
@Override
public int getObjectSizeInByteBuffer(ByteBuffer buffer) {
return 0;
}
@Override
public ODocument deserializeFromByteBufferObject(
ByteBuffer buffer, OWALChanges walChanges, int offset) {
return null;
}
@Override
public int getObjectSizeInByteBuffer(ByteBuffer buffer, OWALChanges walChanges, int offset) {
return 0;
}
}
| 858 |
589 | <filename>inspectit.shared.cs/src/main/java/rocks/inspectit/shared/cs/ci/sensor/method/impl/ExecutorClientSensorConfig.java
package rocks.inspectit.shared.cs.ci.sensor.method.impl;
import javax.xml.bind.annotation.XmlRootElement;
import rocks.inspectit.shared.cs.ci.sensor.method.AbstractRemoteSensorConfig;
/**
* Configuration for the executor client sensor.
*
* @author <NAME>
*
*/
@XmlRootElement(name = "executor-client-sensor-config")
public class ExecutorClientSensorConfig extends AbstractRemoteSensorConfig {
/**
* Sensor name.
*/
public static final String SENSOR_NAME = "Executor Client Sensor";
/**
* Implementing class name.
*/
public static final String CLASS_NAME = "rocks.inspectit.agent.java.sensor.method.async.executor.ExecutorClientSensor";
/**
* {@inheritDoc}
*/
@Override
public String getClassName() {
return CLASS_NAME;
}
/**
* {@inheritDoc}
*/
@Override
public String getName() {
return SENSOR_NAME;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isServerSide() {
return false;
}
}
| 373 |
645 | <reponame>morenn520/TonY
#
# Copyright 2019 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
#
import os
tb_port = None
if 'TB_PORT' in os.environ:
tb_port = os.environ['TB_PORT']
job_name = os.environ['JOB_NAME']
print('TB_PORT is ' + str(tb_port))
print('JOB_NAME is ' + job_name)
if tb_port and job_name != 'chief':
raise ValueError | 160 |
737 | from . import functions
def aggregation(input, weight, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1):
assert input.shape[0] == weight.shape[0] and (input.shape[1] % weight.shape[1] == 0) and pad_mode in [0, 1]
if input.is_cuda:
if pad_mode == 0:
out = functions.aggregation_zeropad(input, weight, kernel_size, stride, padding, dilation)
elif pad_mode == 1:
out = functions.aggregation_refpad(input, weight, kernel_size, stride, padding, dilation)
else:
raise NotImplementedError
return out
def subtraction(input, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1):
assert input.dim() == 4 and pad_mode in [0, 1]
if input.is_cuda:
if pad_mode == 0:
out = functions.subtraction_zeropad(input, kernel_size, stride, padding, dilation)
elif pad_mode == 1:
out = functions.subtraction_refpad(input, kernel_size, stride, padding, dilation)
else:
raise NotImplementedError
return out
def subtraction2(input1, input2, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1):
assert input1.dim() == 4 and input2.dim() == 4 and pad_mode in [0, 1]
if input1.is_cuda:
if pad_mode == 0:
out = functions.subtraction2_zeropad(input1, input2, kernel_size, stride, padding, dilation)
elif pad_mode == 1:
out = functions.subtraction2_refpad(input1, input2, kernel_size, stride, padding, dilation)
else:
raise NotImplementedError
return out
| 646 |
2,137 | <filename>publiccms-parent/publiccms-trade/src/main/java/com/publiccms/logic/component/paymentgateway/AccountGatewayComponent.java
package com.publiccms.logic.component.paymentgateway;
import java.io.IOException;
import javax.servlet.http.HttpServletResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.publiccms.common.base.AbstractPaymentGateway;
import com.publiccms.entities.sys.SysSite;
import com.publiccms.entities.trade.TradeAccountHistory;
import com.publiccms.entities.trade.TradePayment;
import com.publiccms.entities.trade.TradeRefund;
import com.publiccms.logic.service.trade.TradeAccountHistoryService;
import com.publiccms.logic.service.trade.TradeAccountService;
import com.publiccms.logic.service.trade.TradePaymentService;
@Component
public class AccountGatewayComponent extends AbstractPaymentGateway {
/**
*
*/
@Autowired
private TradePaymentService service;
@Autowired
private TradeAccountService accountService;
@Override
public String getAccountType() {
return "account";
}
@Override
public boolean pay(SysSite site, TradePayment payment, String callbackUrl, HttpServletResponse response) {
if (null != payment && payment.getStatus() == TradePaymentService.STATUS_PENDING_PAY) {
TradeAccountHistory history = accountService.change(site.getId(), payment.getSerialNumber(), payment.getUserId(),
payment.getUserId(), TradeAccountHistoryService.STATUS_PAY, payment.getAmount().negate(),
payment.getDescription());
if (null != history) {
if (service.paid(site.getId(), payment.getId(), history.getId().toString())) {
if (confirmPay(site.getId(), payment, response)) {
try {
response.sendRedirect(callbackUrl);
} catch (IOException e) {
}
}
return true;
} else {
accountService.change(site.getId(), payment.getSerialNumber(), payment.getUserId(), payment.getUserId(),
TradeAccountHistoryService.STATUS_REFUND, payment.getAmount(), payment.getDescription());
}
}
}
return false;
}
@Override
public boolean refund(short siteId, TradePayment payment, TradeRefund refund) {
if (null != payment && service.refunded(siteId, payment.getId())) {
TradeAccountHistory history = accountService.change(siteId, payment.getSerialNumber(), payment.getUserId(),
payment.getUserId(), TradeAccountHistoryService.STATUS_REFUND, payment.getAmount().negate(),
payment.getDescription());
if (null == history) {
service.pendingRefund(siteId, payment.getId());
} else {
return true;
}
}
return false;
}
@Override
public boolean enable(short siteId) {
return true;
}
}
| 1,381 |
324 | <gh_stars>100-1000
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import argparse
import numpy as np
import os
import copy
import time
import math
import torch
import torch.utils
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
from os.path import join as pjoin
print('using torch', torch.__version__)
# Experiment parameters
parser = argparse.ArgumentParser(description='Graph Convolutional Networks')
parser.add_argument('-D', '--dataset', type=str, default='PROTEINS')
parser.add_argument('-M', '--model', type=str, default='gcn', choices=['gcn', 'unet', 'mgcn'])
parser.add_argument('--lr', type=float, default=0.005, help='learning rate')
parser.add_argument('--lr_decay_steps', type=str, default='25,35', help='learning rate')
parser.add_argument('--wd', type=float, default=1e-4, help='weight decay')
parser.add_argument('-d', '--dropout', type=float, default=0.1, help='dropout rate')
parser.add_argument('-f', '--filters', type=str, default='64,64,64', help='number of filters in each layer')
parser.add_argument('-K', '--filter_scale', type=int, default=1, help='filter scale (receptive field size), must be > 0; 1 for GCN, >1 for ChebNet')
parser.add_argument('--n_hidden', type=int, default=0,
help='number of hidden units in a fully connected layer after the last conv layer')
parser.add_argument('--n_hidden_edge', type=int, default=32,
help='number of hidden units in a fully connected layer of the edge prediction network')
parser.add_argument('--degree', action='store_true', default=False, help='use one-hot node degree features')
parser.add_argument('--epochs', type=int, default=40, help='number of epochs')
parser.add_argument('-b', '--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--bn', action='store_true', default=False, help='use BatchNorm layer')
parser.add_argument('--folds', type=int, default=10, help='number of cross-validation folds (1 for COLORS and TRIANGLES and 10 for other datasets)')
parser.add_argument('-t', '--threads', type=int, default=0, help='number of threads to load data')
parser.add_argument('--log_interval', type=int, default=10, help='interval (number of batches) of logging')
parser.add_argument('--device', type=str, default='cuda', choices=['cuda', 'cpu'])
parser.add_argument('--seed', type=int, default=111, help='random seed')
parser.add_argument('--shuffle_nodes', action='store_true', default=False, help='shuffle nodes for debugging')
parser.add_argument('-g', '--torch_geom', action='store_true', default=False, help='use PyTorch Geometric')
parser.add_argument('-a', '--adj_sq', action='store_true', default=False,
help='use A^2 instead of A as an adjacency matrix')
parser.add_argument('-s', '--scale_identity', action='store_true', default=False,
help='use 2I instead of I for self connections')
parser.add_argument('-v', '--visualize', action='store_true', default=False,
help='only for unet: save some adjacency matrices and other data as images')
parser.add_argument('-c', '--use_cont_node_attr', action='store_true', default=False,
help='use continuous node attributes in addition to discrete ones')
args = parser.parse_args()
if args.torch_geom:
from torch_geometric.datasets import TUDataset
import torch_geometric.transforms as T
args.filters = list(map(int, args.filters.split(',')))
args.lr_decay_steps = list(map(int, args.lr_decay_steps.split(',')))
for arg in vars(args):
print(arg, getattr(args, arg))
n_folds = args.folds # train,val,test splits for COLORS and TRIANGLES and 10-fold cross validation for other datasets
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
rnd_state = np.random.RandomState(args.seed)
def split_ids(ids, folds=10):
if args.dataset == 'COLORS-3':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(500)]
val_ids = [np.arange(500, 3000)]
test_ids = [np.arange(3000, 10500)]
elif args.dataset == 'TRIANGLES':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(30000)]
val_ids = [np.arange(30000, 35000)]
test_ids = [np.arange(35000, 45000)]
else:
n = len(ids)
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(
np.unique(np.concatenate(test_ids)) == sorted(ids)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(
np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids
if not args.torch_geom:
# Unversal data loader and reader (can be used for other graph datasets from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets)
class GraphData(torch.utils.data.Dataset):
def __init__(self,
datareader,
fold_id,
split):
self.fold_id = fold_id
self.split = split
self.rnd_state = datareader.rnd_state
self.set_fold(datareader.data, fold_id)
def set_fold(self, data, fold_id):
self.total = len(data['targets'])
self.N_nodes_max = data['N_nodes_max']
self.num_classes = data['num_classes']
self.num_features = data['num_features']
self.idx = data['splits'][fold_id][self.split]
# use deepcopy to make sure we don't alter objects in folds
self.labels = copy.deepcopy([data['targets'][i] for i in self.idx])
self.adj_list = copy.deepcopy([data['adj_list'][i] for i in self.idx])
self.features_onehot = copy.deepcopy([data['features_onehot'][i] for i in self.idx])
print('%s: %d/%d' % (self.split.upper(), len(self.labels), len(data['targets'])))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
# convert to torch
return [torch.from_numpy(self.features_onehot[index]).float(), # node_features
torch.from_numpy(self.adj_list[index]).float(), # adjacency matrix
int(self.labels[index])]
class DataReader():
'''
Class to read the txt files containing all data of the dataset.
Should work for any dataset from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
'''
def __init__(self,
data_dir, # folder with txt files
rnd_state=None,
use_cont_node_attr=False,
# use or not additional float valued node attributes available in some datasets
folds=10):
self.data_dir = data_dir
self.rnd_state = np.random.RandomState() if rnd_state is None else rnd_state
self.use_cont_node_attr = use_cont_node_attr
files = os.listdir(self.data_dir)
data = {}
nodes, graphs = self.read_graph_nodes_relations(
list(filter(lambda f: f.find('graph_indicator') >= 0, files))[0])
data['adj_list'] = self.read_graph_adj(list(filter(lambda f: f.find('_A') >= 0, files))[0], nodes, graphs)
node_labels_file = list(filter(lambda f: f.find('node_labels') >= 0, files))
if len(node_labels_file) == 1:
data['features'] = self.read_node_features(node_labels_file[0], nodes, graphs, fn=lambda s: int(s.strip()))
else:
data['features'] = None
data['targets'] = np.array(
self.parse_txt_file(list(filter(lambda f: f.find('graph_labels') >= 0 or f.find('graph_attributes') >= 0, files))[0],
line_parse_fn=lambda s: int(float(s.strip()))))
if self.use_cont_node_attr:
data['attr'] = self.read_node_features(list(filter(lambda f: f.find('node_attributes') >= 0, files))[0],
nodes, graphs,
fn=lambda s: np.array(list(map(float, s.strip().split(',')))))
features, n_edges, degrees = [], [], []
for sample_id, adj in enumerate(data['adj_list']):
N = len(adj) # number of nodes
if data['features'] is not None:
assert N == len(data['features'][sample_id]), (N, len(data['features'][sample_id]))
if not np.allclose(adj, adj.T):
print(sample_id, 'not symmetric')
n = np.sum(adj) # total sum of edges
assert n % 2 == 0, n
n_edges.append(int(n / 2)) # undirected edges, so need to divide by 2
degrees.extend(list(np.sum(adj, 1)))
if data['features'] is not None:
features.append(np.array(data['features'][sample_id]))
# Create features over graphs as one-hot vectors for each node
if data['features'] is not None:
features_all = np.concatenate(features)
features_min = features_all.min()
num_features = int(features_all.max() - features_min + 1) # number of possible values
max_degree = np.max(degrees)
features_onehot = []
for sample_id, adj in enumerate(data['adj_list']):
N = adj.shape[0]
if data['features'] is not None:
x = data['features'][sample_id]
feature_onehot = np.zeros((len(x), num_features))
for node, value in enumerate(x):
feature_onehot[node, value - features_min] = 1
else:
feature_onehot = np.empty((N, 0))
if self.use_cont_node_attr:
if args.dataset in ['COLORS-3', 'TRIANGLES']:
# first column corresponds to node attention and shouldn't be used as node features
feature_attr = np.array(data['attr'][sample_id])[:, 1:]
else:
feature_attr = np.array(data['attr'][sample_id])
else:
feature_attr = np.empty((N, 0))
if args.degree:
degree_onehot = np.zeros((N, max_degree + 1))
degree_onehot[np.arange(N), np.sum(adj, 1).astype(np.int32)] = 1
else:
degree_onehot = np.empty((N, 0))
node_features = np.concatenate((feature_onehot, feature_attr, degree_onehot), axis=1)
if node_features.shape[1] == 0:
# dummy features for datasets without node labels/attributes
# node degree features can be used instead
node_features = np.ones((N, 1))
features_onehot.append(node_features)
num_features = features_onehot[0].shape[1]
shapes = [len(adj) for adj in data['adj_list']]
labels = data['targets'] # graph class labels
labels -= np.min(labels) # to start from 0
classes = np.unique(labels)
num_classes = len(classes)
if not np.all(np.diff(classes) == 1):
print('making labels sequential, otherwise pytorch might crash')
labels_new = np.zeros(labels.shape, dtype=labels.dtype) - 1
for lbl in range(num_classes):
labels_new[labels == classes[lbl]] = lbl
labels = labels_new
classes = np.unique(labels)
assert len(np.unique(labels)) == num_classes, np.unique(labels)
def stats(x):
return (np.mean(x), np.std(x), np.min(x), np.max(x))
print('N nodes avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(shapes))
print('N edges avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(n_edges))
print('Node degree avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(degrees))
print('Node features dim: \t\t%d' % num_features)
print('N classes: \t\t\t%d' % num_classes)
print('Classes: \t\t\t%s' % str(classes))
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(labels == lbl)))
if data['features'] is not None:
for u in np.unique(features_all):
print('feature {}, count {}/{}'.format(u, np.count_nonzero(features_all == u), len(features_all)))
N_graphs = len(labels) # number of samples (graphs) in data
assert N_graphs == len(data['adj_list']) == len(features_onehot), 'invalid data'
# Create train/test sets first
train_ids, test_ids = split_ids(rnd_state.permutation(N_graphs), folds=folds)
# Create train sets
splits = []
for fold in range(len(train_ids)):
splits.append({'train': train_ids[fold],
'test': test_ids[fold]})
data['features_onehot'] = features_onehot
data['targets'] = labels
data['splits'] = splits
data['N_nodes_max'] = np.max(shapes) # max number of nodes
data['num_features'] = num_features
data['num_classes'] = num_classes
self.data = data
def parse_txt_file(self, fpath, line_parse_fn=None):
with open(pjoin(self.data_dir, fpath), 'r') as f:
lines = f.readlines()
data = [line_parse_fn(s) if line_parse_fn is not None else s for s in lines]
return data
def read_graph_adj(self, fpath, nodes, graphs):
edges = self.parse_txt_file(fpath, line_parse_fn=lambda s: s.split(','))
adj_dict = {}
for edge in edges:
node1 = int(edge[0].strip()) - 1 # -1 because of zero-indexing in our code
node2 = int(edge[1].strip()) - 1
graph_id = nodes[node1]
assert graph_id == nodes[node2], ('invalid data', graph_id, nodes[node2])
if graph_id not in adj_dict:
n = len(graphs[graph_id])
adj_dict[graph_id] = np.zeros((n, n))
ind1 = np.where(graphs[graph_id] == node1)[0]
ind2 = np.where(graphs[graph_id] == node2)[0]
assert len(ind1) == len(ind2) == 1, (ind1, ind2)
adj_dict[graph_id][ind1, ind2] = 1
adj_list = [adj_dict[graph_id] for graph_id in sorted(list(graphs.keys()))]
return adj_list
def read_graph_nodes_relations(self, fpath):
graph_ids = self.parse_txt_file(fpath, line_parse_fn=lambda s: int(s.rstrip()))
nodes, graphs = {}, {}
for node_id, graph_id in enumerate(graph_ids):
if graph_id not in graphs:
graphs[graph_id] = []
graphs[graph_id].append(node_id)
nodes[node_id] = graph_id
graph_ids = np.unique(list(graphs.keys()))
for graph_id in graph_ids:
graphs[graph_id] = np.array(graphs[graph_id])
return nodes, graphs
def read_node_features(self, fpath, nodes, graphs, fn):
node_features_all = self.parse_txt_file(fpath, line_parse_fn=fn)
node_features = {}
for node_id, x in enumerate(node_features_all):
graph_id = nodes[node_id]
if graph_id not in node_features:
node_features[graph_id] = [None] * len(graphs[graph_id])
ind = np.where(graphs[graph_id] == node_id)[0]
assert len(ind) == 1, ind
assert node_features[graph_id][ind[0]] is None, node_features[graph_id][ind[0]]
node_features[graph_id][ind[0]] = x
node_features_lst = [node_features[graph_id] for graph_id in sorted(list(graphs.keys()))]
return node_features_lst
# NN layers and models
class GraphConv(nn.Module):
'''
Graph Convolution Layer according to (<NAME> and <NAME>, ICLR 2017) if K<=1
Chebyshev Graph Convolution Layer according to (<NAME>, <NAME>, and <NAME>, NIPS 2017) if K>1
Additional tricks (power of adjacency matrix and weighted self connections) as in the Graph U-Net paper
'''
def __init__(self,
in_features,
out_features,
n_relations=1, # number of relation types (adjacency matrices)
K=1, # GCN is K<=1, else ChebNet
activation=None,
bnorm=False,
adj_sq=False,
scale_identity=False):
super(GraphConv, self).__init__()
self.fc = nn.Linear(in_features=in_features * K * n_relations, out_features=out_features)
self.n_relations = n_relations
assert K > 0, ('filter scale must be greater than 0', K)
self.K = K
self.activation = activation
self.bnorm = bnorm
if self.bnorm:
self.bn = nn.BatchNorm1d(out_features)
self.adj_sq = adj_sq
self.scale_identity = scale_identity
def chebyshev_basis(self, L, X, K):
if K > 1:
Xt = [X]
Xt.append(torch.bmm(L, X)) # B,N,F
for k in range(2, K):
Xt.append(2 * torch.bmm(L, Xt[k - 1]) - Xt[k - 2]) # B,N,F
Xt = torch.cat(Xt, dim=2) # B,N,K,F
return Xt
else:
# GCN
assert K == 1, K
return torch.bmm(L, X) # B,N,1,F
def laplacian_batch(self, A):
batch, N = A.shape[:2]
if self.adj_sq:
A = torch.bmm(A, A) # use A^2 to increase graph connectivity
A_hat = A
if self.K < 2 or self.scale_identity:
I = torch.eye(N).unsqueeze(0).to(args.device)
if self.scale_identity:
I = 2 * I # increase weight of self connections
if self.K < 2:
A_hat = A + I
D_hat = (torch.sum(A_hat, 1) + 1e-5) ** (-0.5)
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, data):
x, A, mask = data[:3]
# print('in', x.shape, torch.sum(torch.abs(torch.sum(x, 2)) > 0))
if len(A.shape) == 3:
A = A.unsqueeze(3)
x_hat = []
for rel in range(self.n_relations):
L = self.laplacian_batch(A[:, :, :, rel])
x_hat.append(self.chebyshev_basis(L, x, self.K))
x = self.fc(torch.cat(x_hat, 2))
if len(mask.shape) == 2:
mask = mask.unsqueeze(2)
x = x * mask # to make values of dummy nodes zeros again, otherwise the bias is added after applying self.fc which affects node embeddings in the following layers
if self.bnorm:
x = self.bn(x.permute(0, 2, 1)).permute(0, 2, 1)
if self.activation is not None:
x = self.activation(x)
return (x, A, mask)
class GCN(nn.Module):
'''
Baseline Graph Convolutional Network with a stack of Graph Convolution Layers and global pooling over nodes.
'''
def __init__(self,
in_features,
out_features,
filters=[64, 64, 64],
K=1,
bnorm=False,
n_hidden=0,
dropout=0.2,
adj_sq=False,
scale_identity=False):
super(GCN, self).__init__()
# Graph convolution layers
self.gconv = nn.Sequential(*([GraphConv(in_features=in_features if layer == 0 else filters[layer - 1],
out_features=f,
K=K,
activation=nn.ReLU(inplace=True),
bnorm=bnorm,
adj_sq=adj_sq,
scale_identity=scale_identity) for layer, f in enumerate(filters)]))
# Fully connected layers
fc = []
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
if n_hidden > 0:
fc.append(nn.Linear(filters[-1], n_hidden))
fc.append(nn.ReLU(inplace=True))
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
n_last = n_hidden
else:
n_last = filters[-1]
fc.append(nn.Linear(n_last, out_features))
self.fc = nn.Sequential(*fc)
def forward(self, data):
x = self.gconv(data)[0]
x = torch.max(x, dim=1)[0].squeeze() # max pooling over nodes (usually performs better than average)
x = self.fc(x)
return x
class GraphUnet(nn.Module):
def __init__(self,
in_features,
out_features,
filters=[64, 64, 64],
K=1,
bnorm=False,
n_hidden=0,
dropout=0.2,
adj_sq=False,
scale_identity=False,
shuffle_nodes=False,
visualize=False,
pooling_ratios=[0.8, 0.8]):
super(GraphUnet, self).__init__()
self.shuffle_nodes = shuffle_nodes
self.visualize = visualize
self.pooling_ratios = pooling_ratios
# Graph convolution layers
self.gconv = nn.ModuleList([GraphConv(in_features=in_features if layer == 0 else filters[layer - 1],
out_features=f,
K=K,
activation=nn.ReLU(inplace=True),
bnorm=bnorm,
adj_sq=adj_sq,
scale_identity=scale_identity) for layer, f in enumerate(filters)])
# Pooling layers
self.proj = []
for layer, f in enumerate(filters[:-1]):
# Initialize projection vectors similar to weight/bias initialization in nn.Linear
fan_in = filters[layer]
p = Parameter(torch.Tensor(fan_in, 1))
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(p, -bound, bound)
self.proj.append(p)
self.proj = nn.ParameterList(self.proj)
# Fully connected layers
fc = []
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
if n_hidden > 0:
fc.append(nn.Linear(filters[-1], n_hidden))
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
n_last = n_hidden
else:
n_last = filters[-1]
fc.append(nn.Linear(n_last, out_features))
self.fc = nn.Sequential(*fc)
def forward(self, data):
# data: [node_features, A, graph_support, N_nodes, label]
if self.shuffle_nodes:
# shuffle nodes to make sure that the model does not adapt to nodes order (happens in some cases)
N = data[0].shape[1]
idx = torch.randperm(N)
data = (data[0][:, idx], data[1][:, idx, :][:, :, idx], data[2][:, idx], data[3])
sample_id_vis, N_nodes_vis = -1, -1
for layer, gconv in enumerate(self.gconv):
N_nodes = data[3]
# TODO: remove dummy or dropped nodes for speeding up forward/backward passes
# data = (data[0][:, :N_nodes_max], data[1][:, :N_nodes_max, :N_nodes_max], data[2][:, :N_nodes_max], data[3])
x, A = data[:2]
B, N, _ = x.shape
# visualize data
if self.visualize and layer < len(self.gconv) - 1:
for b in range(B):
if (layer == 0 and N_nodes[b] < 20 and N_nodes[b] > 10) or sample_id_vis > -1:
if sample_id_vis > -1 and sample_id_vis != b:
continue
if N_nodes_vis < 0:
N_nodes_vis = N_nodes[b]
plt.figure()
plt.imshow(A[b][:N_nodes_vis, :N_nodes_vis].data.cpu().numpy())
plt.title('layer %d, Input adjacency matrix' % (layer))
plt.savefig('input_adjacency_%d.png' % layer)
sample_id_vis = b
break
mask = data[2].clone() # clone as we are going to make inplace changes
x = gconv(data)[0] # graph convolution
if layer < len(self.gconv) - 1:
B, N, C = x.shape
y = torch.mm(x.view(B * N, C), self.proj[layer]).view(B, N) # project features
y = y / (torch.sum(self.proj[layer] ** 2).view(1, 1) ** 0.5) # node scores used for ranking below
idx = torch.sort(y, dim=1)[1] # get indices of y values in the ascending order
N_remove = (N_nodes.float() * (1 - self.pooling_ratios[layer])).long() # number of removed nodes
# sanity checks
assert torch.all(
N_nodes > N_remove), 'the number of removed nodes must be large than the number of nodes'
for b in range(B):
# check that mask corresponds to the actual (non-dummy) nodes
assert torch.sum(mask[b]) == float(N_nodes[b]), (torch.sum(mask[b]), N_nodes[b])
N_nodes_prev = N_nodes
N_nodes = N_nodes - N_remove
for b in range(B):
idx_b = idx[b, mask[b, idx[b]] == 1] # take indices of non-dummy nodes for current data example
assert len(idx_b) >= N_nodes[b], (
len(idx_b), N_nodes[b]) # number of indices must be at least as the number of nodes
mask[b, idx_b[:N_remove[b]]] = 0 # set mask values corresponding to the smallest y-values to 0
# sanity checks
for b in range(B):
# check that the new mask corresponds to the actual (non-dummy) nodes
assert torch.sum(mask[b]) == float(N_nodes[b]), (
b, torch.sum(mask[b]), N_nodes[b], N_remove[b], N_nodes_prev[b])
# make sure that y-values of selected nodes are larger than of dropped nodes
s = torch.sum(y[b] >= torch.min((y * mask.float())[b]))
assert s >= float(N_nodes[b]), (s, N_nodes[b], (y * mask.float())[b])
mask = mask.unsqueeze(2)
x = x * torch.tanh(y).unsqueeze(2) * mask # propagate only part of nodes using the mask
A = mask * A * mask.view(B, 1, N)
mask = mask.squeeze()
data = (x, A, mask, N_nodes)
# visualize data
if self.visualize and sample_id_vis > -1:
b = sample_id_vis
plt.figure()
plt.imshow(y[b].view(N, 1).expand(N, 2)[:N_nodes_vis].data.cpu().numpy())
plt.title('Node ranking')
plt.colorbar()
plt.savefig('nodes_ranking_%d.png' % layer)
plt.figure()
plt.imshow(mask[b].view(N, 1).expand(N, 2)[:N_nodes_vis].data.cpu().numpy())
plt.title('Pooled nodes (%d/%d)' % (mask[b].sum(), N_nodes_prev[b]))
plt.savefig('pooled_nodes_mask_%d.png' % layer)
plt.figure()
plt.imshow(A[b][:N_nodes_vis, :N_nodes_vis].data.cpu().numpy())
plt.title('Pooled adjacency matrix')
plt.savefig('pooled_adjacency_%d.png' % layer)
print('layer %d: visualizations saved ' % layer)
if self.visualize and sample_id_vis > -1:
self.visualize = False # to prevent visualization for the following batches
x = torch.max(x, dim=1)[0].squeeze() # max pooling over nodes
x = self.fc(x)
return x
class MGCN(nn.Module):
'''
Multigraph Convolutional Network based on (<NAME>., "Spectral Multigraph Networks for Discovering and Fusing Relationships in Molecules")
'''
def __init__(self,
in_features,
out_features,
n_relations,
filters=[64, 64, 64],
K=1,
bnorm=False,
n_hidden=0,
n_hidden_edge=32,
dropout=0.2,
adj_sq=False,
scale_identity=False):
super(MGCN, self).__init__()
# Graph convolution layers
self.gconv = nn.Sequential(*([GraphConv(in_features=in_features if layer == 0 else filters[layer - 1],
out_features=f,
n_relations=n_relations,
K=K,
activation=nn.ReLU(inplace=True),
bnorm=bnorm,
adj_sq=adj_sq,
scale_identity=scale_identity) for layer, f in enumerate(filters)]))
# Edge prediction NN
self.edge_pred = nn.Sequential(nn.Linear(in_features * 2, n_hidden_edge),
nn.ReLU(inplace=True),
nn.Linear(n_hidden_edge, 1))
# Fully connected layers
fc = []
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
if n_hidden > 0:
fc.append(nn.Linear(filters[-1], n_hidden))
if dropout > 0:
fc.append(nn.Dropout(p=dropout))
n_last = n_hidden
else:
n_last = filters[-1]
fc.append(nn.Linear(n_last, out_features))
self.fc = nn.Sequential(*fc)
def forward(self, data):
# data: [node_features, A, graph_support, N_nodes, label]
# Predict edges based on features
x = data[0]
B, N, C = x.shape
mask = data[2]
# find indices of nodes
x_cat, idx = [], []
for b in range(B):
n = int(mask[b].sum())
node_i = torch.nonzero(mask[b]).repeat(1, n).view(-1, 1)
node_j = torch.nonzero(mask[b]).repeat(n, 1).view(-1, 1)
triu = (node_i < node_j).squeeze() # skip loops and symmetric connections
x_cat.append(torch.cat((x[b, node_i[triu]], x[b, node_j[triu]]), 2).view(int(torch.sum(triu)), C * 2))
idx.append((node_i * N + node_j)[triu].squeeze())
x_cat = torch.cat(x_cat)
idx_flip = np.concatenate((np.arange(C, 2 * C), np.arange(C)))
# predict values and encourage invariance to nodes order
y = torch.exp(0.5 * (self.edge_pred(x_cat) + self.edge_pred(x_cat[:, idx_flip])).squeeze())
A_pred = torch.zeros(B, N * N, device=args.device)
c = 0
for b in range(B):
A_pred[b, idx[b]] = y[c:c + idx[b].nelement()]
c += idx[b].nelement()
A_pred = A_pred.view(B, N, N)
A_pred = (A_pred + A_pred.permute(0, 2, 1)) # assume undirected edges
# Use both annotated and predicted adjacency matrices to learn a GCN
data = (x, torch.stack((data[1], A_pred), 3), mask)
x = self.gconv(data)[0]
x = torch.max(x, dim=1)[0].squeeze() # max pooling over nodes
x = self.fc(x)
return x
def collate_batch(batch):
'''
Creates a batch of same size graphs by zero-padding node features and adjacency matrices up to
the maximum number of nodes in the CURRENT batch rather than in the entire dataset.
Graphs in the batches are usually much smaller than the largest graph in the dataset, so this method is fast.
:param batch: batch in the PyTorch Geometric format or [node_features*batch_size, A*batch_size, label*batch_size]
:return: [node_features, A, graph_support, N_nodes, label]
'''
B = len(batch)
if args.torch_geom:
N_nodes = [len(batch[b].x) for b in range(B)]
C = batch[0].x.shape[1]
else:
N_nodes = [len(batch[b][1]) for b in range(B)]
C = batch[0][0].shape[1]
N_nodes_max = int(np.max(N_nodes))
graph_support = torch.zeros(B, N_nodes_max)
A = torch.zeros(B, N_nodes_max, N_nodes_max)
x = torch.zeros(B, N_nodes_max, C)
for b in range(B):
if args.torch_geom:
x[b, :N_nodes[b]] = batch[b].x
A[b].index_put_((batch[b].edge_index[0], batch[b].edge_index[1]), torch.Tensor([1]))
else:
x[b, :N_nodes[b]] = batch[b][0]
A[b, :N_nodes[b], :N_nodes[b]] = batch[b][1]
graph_support[b][:N_nodes[b]] = 1 # mask with values of 0 for dummy (zero padded) nodes, otherwise 1
N_nodes = torch.from_numpy(np.array(N_nodes)).long()
labels = torch.from_numpy(np.array([batch[b].y if args.torch_geom else batch[b][2] for b in range(B)])).long()
return [x, A, graph_support, N_nodes, labels]
is_regression = args.dataset in ['COLORS-3', 'TRIANGLES'] # other datasets can be for the regression task (see their README.txt)
transforms = [] # for PyTorch Geometric
if args.dataset in ['COLORS-3', 'TRIANGLES']:
assert n_folds == 1, 'use train, val and test splits for these datasets'
assert args.use_cont_node_attr, 'node attributes should be used for these datasets'
if args.torch_geom:
# Class to read note attention from DS_node_attributes.txt
class HandleNodeAttention(object):
def __call__(self, data):
if args.dataset == 'COLORS-3':
data.attn = torch.softmax(data.x[:, 0], dim=0)
data.x = data.x[:, 1:]
else:
data.attn = torch.softmax(data.x, dim=0)
data.x = None
return data
transforms.append(HandleNodeAttention())
else:
assert n_folds == 10, '10-fold cross-validation should be used for other datasets'
print('Regression={}'.format(is_regression))
print('Loading data')
if is_regression:
def loss_fn(output, target, reduction='mean'):
loss = (target.float().squeeze() - output.squeeze()) ** 2
return loss.sum() if reduction == 'sum' else loss.mean()
predict_fn = lambda output: output.round().long().detach().cpu()
else:
loss_fn = F.cross_entropy
predict_fn = lambda output: output.max(1, keepdim=True)[1].detach().cpu()
if args.torch_geom:
if args.degree:
if args.dataset == 'TRIANGLES':
max_degree = 14
else:
raise NotImplementedError('max_degree value should be specified in advance. '
'Try running without --torch_geom (-g) and look at dataset statistics printed out by our code.')
if args.degree:
transforms.append(T.OneHotDegree(max_degree=max_degree, cat=False))
dataset = TUDataset('./data/%s/' % args.dataset, name=args.dataset,
use_node_attr=args.use_cont_node_attr,
transform=T.Compose(transforms))
train_ids, test_ids = split_ids(rnd_state.permutation(len(dataset)), folds=n_folds)
else:
datareader = DataReader(data_dir='./data/%s/' % args.dataset,
rnd_state=rnd_state,
folds=n_folds,
use_cont_node_attr=args.use_cont_node_attr)
acc_folds = []
for fold_id in range(n_folds):
loaders = []
for split in ['train', 'test']:
if args.torch_geom:
gdata = dataset[torch.from_numpy((train_ids if split.find('train') >= 0 else test_ids)[fold_id])]
else:
gdata = GraphData(fold_id=fold_id,
datareader=datareader,
split=split)
loader = DataLoader(gdata,
batch_size=args.batch_size,
shuffle=split.find('train') >= 0,
num_workers=args.threads,
collate_fn=collate_batch)
loaders.append(loader)
print('\nFOLD {}/{}, train {}, test {}'.format(fold_id + 1, n_folds, len(loaders[0].dataset), len(loaders[1].dataset)))
if args.model == 'gcn':
model = GCN(in_features=loaders[0].dataset.num_features,
out_features=1 if is_regression else loaders[0].dataset.num_classes,
n_hidden=args.n_hidden,
filters=args.filters,
K=args.filter_scale,
bnorm=args.bn,
dropout=args.dropout,
adj_sq=args.adj_sq,
scale_identity=args.scale_identity).to(args.device)
elif args.model == 'unet':
model = GraphUnet(in_features=loaders[0].dataset.num_features,
out_features=1 if is_regression else loaders[0].dataset.num_classes,
n_hidden=args.n_hidden,
filters=args.filters,
K=args.filter_scale,
bnorm=args.bn,
dropout=args.dropout,
adj_sq=args.adj_sq,
scale_identity=args.scale_identity,
shuffle_nodes=args.shuffle_nodes,
visualize=args.visualize).to(args.device)
elif args.model == 'mgcn':
model = MGCN(in_features=loaders[0].dataset.num_features,
out_features=1 if is_regression else loaders[0].dataset.num_classes,
n_relations=2,
n_hidden=args.n_hidden,
n_hidden_edge=args.n_hidden_edge,
filters=args.filters,
K=args.filter_scale,
bnorm=args.bn,
dropout=args.dropout,
adj_sq=args.adj_sq,
scale_identity=args.scale_identity).to(args.device)
else:
raise NotImplementedError(args.model)
print('\nInitialize model')
print(model)
train_params = list(filter(lambda p: p.requires_grad, model.parameters()))
print('N trainable parameters:', np.sum([p.numel() for p in train_params]))
optimizer = optim.Adam(train_params, lr=args.lr, weight_decay=args.wd, betas=(0.5, 0.999))
scheduler = lr_scheduler.MultiStepLR(optimizer, args.lr_decay_steps, gamma=0.1)
# Normalization of continuous node features
# if args.use_cont_node_attr:
# x = []
# for batch_idx, data in enumerate(loaders[0]):
# if args.torch_geom:
# node_attr_dim = loaders[0].dataset.props['node_attr_dim']
# x.append(data[0][:, :, :node_attr_dim].view(-1, node_attr_dim).data)
# x = torch.cat(x)
# mn, sd = torch.mean(x, dim=0).to(args.device), torch.std(x, dim=0).to(args.device) + 1e-5
# print(mn, sd)
# else:
# mn, sd = 0, 1
# def norm_features(x):
# x[:, :, :node_attr_dim] = (x[:, :, :node_attr_dim] - mn) / sd
def train(train_loader):
scheduler.step()
model.train()
start = time.time()
train_loss, n_samples = 0, 0
for batch_idx, data in enumerate(train_loader):
for i in range(len(data)):
data[i] = data[i].to(args.device)
# if args.use_cont_node_attr:
# data[0] = norm_features(data[0])
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, data[4])
loss.backward()
optimizer.step()
time_iter = time.time() - start
train_loss += loss.item() * len(output)
n_samples += len(output)
if batch_idx % args.log_interval == 0 or batch_idx == len(train_loader) - 1:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} (avg: {:.6f}) \tsec/iter: {:.4f}'.format(
epoch + 1, n_samples, len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item(), train_loss / n_samples,
time_iter / (batch_idx + 1)))
def test(test_loader):
model.eval()
start = time.time()
test_loss, correct, n_samples = 0, 0, 0
for batch_idx, data in enumerate(test_loader):
for i in range(len(data)):
data[i] = data[i].to(args.device)
# if args.use_cont_node_attr:
# data[0] = norm_features(data[0])
output = model(data)
loss = loss_fn(output, data[4], reduction='sum')
test_loss += loss.item()
n_samples += len(output)
pred = predict_fn(output)
correct += pred.eq(data[4].detach().cpu().view_as(pred)).sum().item()
acc = 100. * correct / n_samples
print('Test set (epoch {}): Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%) \tsec/iter: {:.4f}\n'.format(
epoch + 1,
test_loss / n_samples,
correct,
n_samples,
acc, (time.time() - start) / len(test_loader)))
return acc
for epoch in range(args.epochs):
train(loaders[0]) # no need to evaluate after each epoch
acc = test(loaders[1])
acc_folds.append(acc)
print(acc_folds)
print('{}-fold cross validation avg acc (+- std): {} ({})'.format(n_folds, np.mean(acc_folds), np.std(acc_folds)))
| 22,791 |
335 | <filename>P/Parallel.json
{
"word": "Parallel",
"definitions": [
"(of lines, planes, surfaces, or objects) side by side and having the same distance continuously between them."
],
"parts-of-speech": "Adjective"
}
| 87 |
1,531 | /*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.hc.client5.http.cookie;
import org.apache.hc.core5.http.Header;
import java.util.List;
/**
* Defines the cookie management specification.
* <p>Cookie management specification must define
* <ul>
* <li> rules of parsing "Set-Cookie" header
* <li> rules of validation of parsed cookies
* <li> formatting of "Cookie" header
* </ul>
* for a given host, port and path of origin
*
* @since 4.0
*/
public interface CookieSpec {
/**
* Parse the {@code "Set-Cookie"} Header into an array of Cookies.
*
* <p>This method will not perform the validation of the resultant
* {@link Cookie}s</p>
*
* @see #validate
*
* @param header the {@code Set-Cookie} received from the server
* @param origin details of the cookie origin
* @return an array of {@code Cookie}s parsed from the header
* @throws MalformedCookieException if an exception occurs during parsing
*/
List<Cookie> parse(Header header, CookieOrigin origin) throws MalformedCookieException;
/**
* Validate the cookie according to validation rules defined by the
* cookie specification.
*
* @param cookie the Cookie to validate
* @param origin details of the cookie origin
* @throws MalformedCookieException if the cookie is invalid
*/
void validate(Cookie cookie, CookieOrigin origin) throws MalformedCookieException;
/**
* Determines if a Cookie matches the target location.
*
* @param cookie the Cookie to be matched
* @param origin the target to test against
*
* @return {@code true} if the cookie should be submitted with a request
* with given attributes, {@code false} otherwise.
*/
boolean match(Cookie cookie, CookieOrigin origin);
/**
* Create {@code "Cookie"} headers for an array of Cookies.
*
* @param cookies the Cookies format into a Cookie header
* @return a Header for the given Cookies.
* @throws IllegalArgumentException if an input parameter is illegal
*/
List<Header> formatCookies(List<Cookie> cookies);
}
| 971 |
369 | // Copyright (c) 2017-2021, Mudita <NAME>.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/fiemap.h>
static void syntax(char **argv)
{
fprintf(stderr, "%s filename blkdev\n", argv[0]);
}
static struct fiemap *read_fiemap(int fd)
{
struct fiemap *fiemap;
int extents_size;
if ((fiemap = (struct fiemap *)malloc(sizeof(struct fiemap))) == NULL) {
fprintf(stderr, "Out of memory allocating fiemap\n");
return NULL;
}
memset(fiemap, 0, sizeof(struct fiemap));
fiemap->fm_start = 0;
fiemap->fm_length = ~0; /* Lazy */
fiemap->fm_flags = 0;
fiemap->fm_extent_count = 0;
fiemap->fm_mapped_extents = 0;
/* Find out how many extents there are */
if (ioctl(fd, FS_IOC_FIEMAP, fiemap) < 0) {
fprintf(stderr, "fiemap ioctl() failed\n");
return NULL;
}
/* Read in the extents */
extents_size = sizeof(struct fiemap_extent) * (fiemap->fm_mapped_extents);
/* Resize fiemap to allow us to read in the extents */
if ((fiemap = (struct fiemap *)realloc(fiemap, sizeof(struct fiemap) + extents_size)) == NULL) {
fprintf(stderr, "Out of memory allocating fiemap\n");
return NULL;
}
memset(fiemap->fm_extents, 0, extents_size);
fiemap->fm_extent_count = fiemap->fm_mapped_extents;
fiemap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fiemap) < 0) {
fprintf(stderr, "fiemap ioctl() failed\n");
return NULL;
}
return fiemap;
}
static off_t compare_extent(int fd1, int fd2, off_t start_offs, size_t len)
{
char buf1[32768], buf2[32768];
off_t offs;
ssize_t res1, res2;
size_t segpos = 0;
offs = lseek(fd1, start_offs, SEEK_SET);
if (offs < 0)
return offs;
offs = lseek(fd2, start_offs, SEEK_SET);
if (offs < 0)
return offs;
while (len > 0) {
const size_t rdsiz = (len > sizeof(buf1)) ? (sizeof buf1) : (len);
res1 = read(fd1, buf1, rdsiz);
res2 = read(fd2, buf2, rdsiz);
if (res1 == res2 && res1 > 0) {
if (memcmp(buf1, buf2, res1) == 0) {
len -= res1;
segpos += res1;
}
else {
return start_offs + segpos + 1;
}
}
else {
return start_offs + segpos + 1;
}
}
return 0;
}
static ssize_t write_all(int fd, const char *buf, ssize_t size)
{
ssize_t res;
while (size > 0 && (res = write(fd, buf, size)) != size) {
if (res < 0 && errno == EINTR)
continue;
else if (res < 0) {
return res;
}
size -= res;
buf += res;
}
return 0;
}
static off_t copy_extent(int fd_dest, int fd_src, off_t start_offs, size_t len)
{
char copy_buf[32768];
off_t offs = lseek(fd_dest, start_offs, SEEK_SET);
if (offs < 0)
return offs;
offs = lseek(fd_src, start_offs, SEEK_SET);
if (offs < 0)
return offs;
while (len > 0) {
const size_t nread = len > sizeof(copy_buf) ? sizeof(copy_buf) : len;
ssize_t rlen = read(fd_src, copy_buf, nread);
if (rlen < 0 && errno == EINTR)
continue;
else if (rlen > 0) {
ssize_t res = write_all(fd_dest, copy_buf, rlen);
if (res == 0) {
len -= rlen;
}
else {
return -1;
}
}
else {
return -1;
}
}
return 0;
}
static int verify_image(const char *image_file, const char *block_device)
{
int fd_sparse, fd_block;
if ((fd_sparse = open(image_file, O_RDONLY)) < 0) {
fprintf(stderr, "Cannot open sparse file %s\n", image_file);
return EXIT_FAILURE;
}
if ((fd_block = open(block_device, O_RDONLY)) < 0) {
fprintf(stderr, "Cannot open block device %s\n", block_device);
close(fd_sparse);
return EXIT_FAILURE;
}
struct fiemap *fiemap;
if (!(fiemap = read_fiemap(fd_sparse))) {
fprintf(stderr, "Unable to read fiemap %s\n", image_file);
close(fd_sparse);
close(fd_block);
return EXIT_FAILURE;
}
printf("File %s verify %d extents:\n", image_file, fiemap->fm_mapped_extents);
printf("#\tOffset Length Verify\n");
off_t result = -1;
for (unsigned i = 0; i < fiemap->fm_mapped_extents; i++) {
result = compare_extent(fd_sparse, fd_block, fiemap->fm_extents[i].fe_logical, fiemap->fm_extents[i].fe_length);
printf("%d:\t%-16.16llx %-16.16llx ", i, fiemap->fm_extents[i].fe_logical, fiemap->fm_extents[i].fe_length);
if (result) {
printf("ERR (%lx)\n", result);
}
else {
printf("OK\n");
}
if (result) {
if (result >= 0) {
fprintf(stderr, "Error: Data mismatch at offset %ld\n", result);
}
else {
perror("System error (Verify):");
}
break;
}
}
close(fd_sparse);
close(fd_block);
free(fiemap);
return (result ? EXIT_FAILURE : EXIT_SUCCESS);
}
static int write_image(const char *image_file, const char *block_device)
{
struct stat sbuf;
if (stat(image_file, &sbuf)) {
perror("System error (stat image_file):");
return EXIT_FAILURE;
}
if (!S_ISREG(sbuf.st_mode)) {
fprintf(stderr, "Error: %s is not a regular file\n", image_file);
return EXIT_FAILURE;
}
if (stat(block_device, &sbuf)) {
perror("System error (stat block_device):");
return EXIT_FAILURE;
}
if (!S_ISBLK(sbuf.st_mode)) {
fprintf(stderr, "Error: %s is not a block device\n", block_device);
return EXIT_FAILURE;
}
int fd_sparse, fd_block;
if ((fd_sparse = open(image_file, O_RDONLY)) < 0) {
fprintf(stderr, "Error: Cannot open sparse file %s\n", image_file);
return EXIT_FAILURE;
}
if ((fd_block = open(block_device, O_WRONLY)) < 0) {
fprintf(stderr, "Error: Cannot open block device %s\n", block_device);
close(fd_sparse);
return EXIT_FAILURE;
}
struct fiemap *fiemap;
if (!(fiemap = read_fiemap(fd_sparse))) {
fprintf(stderr, "Error: Unable to read fiemap %s\n", image_file);
close(fd_block);
close(fd_sparse);
return EXIT_FAILURE;
}
printf("File %s copy %d extents:\n", image_file, fiemap->fm_mapped_extents);
printf("#\tOffset Length Status\n");
off_t result = -1;
for (unsigned i = 0; i < fiemap->fm_mapped_extents; i++) {
result = copy_extent(fd_block, fd_sparse, fiemap->fm_extents[i].fe_logical, fiemap->fm_extents[i].fe_length);
printf("%d:\t%-16.16llx %-16.16llx %s\n",
i,
fiemap->fm_extents[i].fe_logical,
fiemap->fm_extents[i].fe_length,
result ? "FAIL" : "OK");
if (result) {
if (errno)
perror("System error (Write copy_extent):");
break;
}
}
free(fiemap);
// Sync block filesystem
syncfs(fd_block);
// Re-read partition table on the device
if (ioctl(fd_block, BLKRRPART, NULL)) {
fprintf(stderr, "Warning: Unable to re-read kernel partition table\n");
}
close(fd_block);
close(fd_sparse);
return result ? EXIT_FAILURE : EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
const char *img_file, *blk_dev;
if (argc == 3) {
img_file = argv[1];
blk_dev = argv[2];
}
else {
syntax(argv);
return EXIT_FAILURE;
}
if (write_image(img_file, blk_dev)) {
return EXIT_FAILURE;
}
int result = verify_image(img_file, blk_dev);
fprintf(stderr, "Write image %s to %s %s\n", img_file, blk_dev, result ? "FAILED" : "SUCCESS");
return result;
}
| 4,235 |
1,056 | <reponame>timfel/netbeans<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Util.java
*
* Created on October 4, 2005, 7:48 PM
*
* To change this template, choose Tools | Template Manager
* and open the template in the editor.
*/
package org.netbeans.modules.xml.xam;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.URI;
import javax.swing.text.Document;
import org.netbeans.modules.xml.xam.dom.DocumentModel;
import org.netbeans.modules.xml.xam.dom.ReadOnlyAccess;
import org.openide.util.Lookup;
import org.openide.util.lookup.Lookups;
/**
*
* @author nn136682
*/
public class Util {
public static final String EMPTY_XSD = "resources/Empty.xml";
public static javax.swing.text.Document getResourceAsDocument(String path) throws Exception {
InputStream in = Util.class.getResourceAsStream(path);
return loadDocument(in);
}
public static javax.swing.text.Document loadDocument(InputStream in) throws Exception {
javax.swing.text.Document sd = ReadOnlyAccess.Provider.getInstance().loadSwingDocument(in);
return sd;
}
public static TestModel2 loadModel(String path) throws Exception {
TestModel2 model = new TestModel2(getResourceAsDocument(path));
return model;
}
public static void dumpToStream(Document doc, OutputStream out) throws Exception{
PrintWriter w = new PrintWriter(out);
w.print(doc.getText(0, doc.getLength()));
w.close();
out.close();
}
public static void dumpToFile(Document doc, File f) throws Exception {
OutputStream out = new BufferedOutputStream(new FileOutputStream(f));
PrintWriter w = new PrintWriter(out);
w.print(doc.getText(0, doc.getLength()));
w.close();
out.close();
}
public static File dumpToTempFile(Document doc) throws Exception {
File f = File.createTempFile("xsm", "xsd");
dumpToFile(doc, f);
return f;
}
public static Document loadDocument(File f) throws Exception {
InputStream in = new BufferedInputStream(new FileInputStream(f));
return loadDocument(in);
}
public static Document setDocumentContentTo(Document doc, InputStream in) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(in));
StringBuffer sbuf = new StringBuffer();
try {
String line = null;
while ((line = br.readLine()) != null) {
sbuf.append(line);
sbuf.append(System.getProperty("line.separator"));
}
} finally {
br.close();
}
doc.remove(0, doc.getLength());
doc.insertString(0,sbuf.toString(),null);
return doc;
}
public static Document setDocumentContentTo(Document doc, String resourcePath) throws Exception {
return setDocumentContentTo(doc, Util.class.getResourceAsStream(resourcePath));
}
public static TestModel2 dumpAndReloadModel(DocumentModel sm) throws Exception {
Document doc = (Document) sm.getModelSource().getLookup().lookup(Document.class);
File f = dumpToTempFile(doc);
return new TestModel2(loadDocument(f));
}
public static URI getResourceURI(String path) throws Exception {
return Util.class.getResource(path).toURI();
}
public static File getResourceFile(String path) throws Exception {
return new File(getResourceURI(path));
}
public static ModelSource createModelSource(Document doc) {
Lookup lookup = Lookups.fixed(new Object[] { doc } );
return new ModelSource(lookup, true);
}
public static ModelSource createModelSource(String path) throws Exception {
Document doc = Util.getResourceAsDocument(path);
File file = Util.getResourceFile(path);
Lookup lookup = Lookups.fixed(new Object[] { doc, file } );
return new ModelSource(lookup, true);
}
}
| 1,791 |
1,998 | <reponame>keeper121/onnx-tensorrt
/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <NvInfer.h>
#include <string>
#include <vector>
#include "TensorOrWeights.hpp"
#include "ImporterContext.hpp"
namespace onnx2trt
{
nvinfer1::ITensor* addRNNInput(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ILoop* loop, std::vector<TensorOrWeights>& inputs, const std::string& direction);
// Zeros out invalid timesteps in toMask. maxLen must be provided if reverse is true
nvinfer1::ITensor* clearMissingSequenceElements(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* toMask, nvinfer1::ITensor* maxLen, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
// Returns a bool tensor which is true during valid timesteps
nvinfer1::ITensor* getRaggedMask(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
// Selects between prevH and Ht to forward previous hidden state through invalid timesteps
nvinfer1::ITensor* maskRNNHidden(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* prevH, nvinfer1::ITensor* Ht, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
// Splits a bidirectional hidden state into forward and reverse passes, masks each using maskRNNHidden, then concatenates
nvinfer1::ITensor* maskBidirRNNHidden(IImporterContext* ctx, const ::ONNX_NAMESPACE::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen, nvinfer1::ITensor* Ht1, nvinfer1::ITensor* Ht, nvinfer1::ITensor* singlePassShape);
} // namespace onnx2trt
| 690 |
1,319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
帮助类
"""
import hashlib
def read_by_lines(path, encoding="utf-8"):
"""read the data by line"""
result = list()
with open(path, "r") as infile:
for line in infile:
result.append(line.strip().decode(encoding))
return result
def write_by_lines(path, data, t_code="utf-8"):
"""write the data"""
with open(path, "w") as outfile:
[outfile.write(d.encode(t_code) + "\n") for d in data]
def cal_md5(str):
"""calculate string md5"""
str = str.decode("utf-8", "ignore").encode("utf-8", "ignore")
return hashlib.md5(str).hexdigest()
| 284 |
514 | <filename>litedram/gen-src/sdram_init/libc/src/strstr.c<gh_stars>100-1000
/******************************************************************************
* Copyright (c) 2004, 2008 IBM Corporation
* All rights reserved.
* This program and the accompanying materials
* are made available under the terms of the BSD License
* which accompanies this distribution, and is available at
* http://www.opensource.org/licenses/bsd-license.php
*
* Contributors:
* IBM Corporation - initial implementation
*****************************************************************************/
#include <stddef.h>
size_t strlen(const char *s);
int strncmp(const char *s1, const char *s2, size_t n);
char *strstr(const char *hay, const char *needle);
char *strstr(const char *hay, const char *needle)
{
char *pos;
size_t hlen, nlen;
if (hay == NULL || needle == NULL)
return NULL;
hlen = strlen(hay);
nlen = strlen(needle);
if (nlen < 1)
return (char *)hay;
for (pos = (char *)hay; pos < hay + hlen; pos++) {
if (strncmp(pos, needle, nlen) == 0) {
return pos;
}
}
return NULL;
}
| 359 |
2,500 | /*
* Copyright 2016 DiffPlug
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.diffplug.spotless;
import java.io.FileFilter;
import java.io.Serializable;
/** A file filter with full support for serialization. */
public interface SerializableFileFilter extends FileFilter, Serializable, NoLambda {
/** Creates a FileFilter which will accept all files except files with the given name(s). */
public static SerializableFileFilter skipFilesNamed(String... names) {
return new SerializableFileFilterImpl.SkipFilesNamed(names);
}
}
| 278 |
5,238 | <gh_stars>1000+
# Create an array for the points of the line
line_points = [ {"x":5, "y":5},
{"x":70, "y":70},
{"x":120, "y":10},
{"x":180, "y":60},
{"x":240, "y":10}]
# Create style
style_line = lv.style_t()
style_line.init()
style_line.set_line_width(8)
style_line.set_line_color(lv.palette_main(lv.PALETTE.BLUE))
style_line.set_line_rounded(True)
# Create a line and apply the new style
line1 = lv.line(lv.scr_act())
line1.set_points(line_points, 5) # Set the points
line1.add_style(style_line, 0)
line1.center()
| 291 |
848 | <filename>tensorflow/lite/delegates/gpu/cl/texture2d.h
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_TEXTURE2D_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_CL_TEXTURE2D_H_
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_command_queue.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_context.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor_type.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
// Texture2D represent formatted GPU data storage.
// Texture2D is moveable but not copyable.
class Texture2D {
public:
Texture2D() {} // just for using Texture2D as a class members
Texture2D(cl_mem texture, int width, int height, cl_channel_type type);
// Move only
Texture2D(Texture2D&& texture);
Texture2D& operator=(Texture2D&& texture);
Texture2D(const Texture2D&) = delete;
Texture2D& operator=(const Texture2D&) = delete;
~Texture2D();
cl_mem GetMemoryPtr() const { return texture_; }
// Writes data to a texture. Data should point to a region that
// has exact width * height * sizeof(pixel) bytes.
template <typename T>
Status WriteData(CLCommandQueue* queue, const absl::Span<T> data);
// Reads data from Texture2D into CPU memory.
template <typename T>
Status ReadData(CLCommandQueue* queue, std::vector<T>* result) const;
private:
void Release();
cl_mem texture_ = nullptr;
int width_;
int height_;
cl_channel_type channel_type_;
};
using Texture2DPtr = std::shared_ptr<Texture2D>;
// Creates new 4-channel 2D texture with f32 elements
Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
Texture2D* result);
// Creates new 4-channel 2D texture with f16 elements
Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
Texture2D* result);
Status CreateTexture2DRGBA(DataType type, int width, int height,
CLContext* context, Texture2D* result);
Status CreateTexture2DRGBA(DataType type, int width, int height, void* data,
CLContext* context, Texture2D* result);
template <typename T>
Status Texture2D::WriteData(CLCommandQueue* queue, const absl::Span<T> data) {
const int element_size = ChannelTypeToSizeInBytes(channel_type_);
if (sizeof(T) % element_size != 0) {
return InvalidArgumentError(
"Template type T has not suitable element type for created texture.");
}
if (4 * width_ * height_ * element_size != data.size() * sizeof(T)) {
return InvalidArgumentError(
"absl::Span<T> data size is different from texture allocated size.");
}
RETURN_IF_ERROR(queue->EnqueueWriteImage(texture_, int3(width_, height_, 1),
data.data()));
return OkStatus();
}
template <typename T>
Status Texture2D::ReadData(CLCommandQueue* queue,
std::vector<T>* result) const {
const int element_size = ChannelTypeToSizeInBytes(channel_type_);
if (sizeof(T) != element_size) {
return InvalidArgumentError("Pixel format is different.");
}
const int elements_count = width_ * height_ * 4;
result->resize(elements_count);
return queue->EnqueueReadImage(texture_, int3(width_, height_, 1),
result->data());
}
} // namespace cl
} // namespace gpu
} // namespace tflite
#endif // TENSORFLOW_LITE_DELEGATES_GPU_CL_TEXTURE2D_H_
| 1,564 |
8,027 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.sourcepath.resolver.impl;
import com.facebook.buck.core.exceptions.HumanReadableException;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.sourcepath.ArchiveMemberSourcePath;
import com.facebook.buck.core.sourcepath.BuildTargetSourcePath;
import com.facebook.buck.core.sourcepath.DefaultBuildTargetSourcePath;
import com.facebook.buck.core.sourcepath.ExplicitBuildTargetSourcePath;
import com.facebook.buck.core.sourcepath.ForwardingBuildTargetSourcePath;
import com.facebook.buck.core.sourcepath.PathSourcePath;
import com.facebook.buck.core.sourcepath.SourcePath;
import com.facebook.buck.core.sourcepath.resolver.SourcePathResolver;
import com.facebook.buck.core.sourcepath.resolver.SourcePathResolverAdapter;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Ordering;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collection;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* Abstract implementation of SourcePathResolver.
*
* <p>Most of the SourcePathResolverAdapter interface can be implemented in terms of just a few
* functions ( the main requirement is resolving BuildTargetSourcePaths).
*
* <p>Existing code may expect to resolve each {@link SourcePath} to only one {@link Path}. In such
* cases, {@link SourcePathResolverAdapter} is used to convert the resolver to return only one
* {@link Path} per {@link SourcePath}.
*/
public abstract class AbstractSourcePathResolver implements SourcePathResolver {
protected abstract ImmutableSortedSet<SourcePath> resolveDefaultBuildTargetSourcePath(
DefaultBuildTargetSourcePath targetSourcePath);
@Override
public abstract String getSourcePathName(BuildTarget target, SourcePath sourcePath);
protected abstract ProjectFilesystem getBuildTargetSourcePathFilesystem(
BuildTargetSourcePath sourcePath);
@Override
public <T> ImmutableMap<T, ImmutableSortedSet<Path>> getMappedPaths(
Map<T, SourcePath> sourcePathMap) {
ImmutableMap.Builder<T, ImmutableSortedSet<Path>> paths = ImmutableMap.builder();
for (ImmutableMap.Entry<T, SourcePath> entry : sourcePathMap.entrySet()) {
paths.put(entry.getKey(), getAbsolutePath(entry.getValue()));
}
return paths.build();
}
/** @return the {@link ProjectFilesystem} associated with {@code sourcePath}. */
@Override
public ProjectFilesystem getFilesystem(SourcePath sourcePath) {
if (sourcePath instanceof PathSourcePath) {
return ((PathSourcePath) sourcePath).getFilesystem();
}
if (sourcePath instanceof BuildTargetSourcePath) {
return getBuildTargetSourcePathFilesystem((BuildTargetSourcePath) sourcePath);
}
if (sourcePath instanceof ArchiveMemberSourcePath) {
return getFilesystem(((ArchiveMemberSourcePath) sourcePath).getArchiveSourcePath());
}
throw new IllegalStateException();
}
/**
* @return the {@link Path} instances for this {@code sourcePath}, resolved using its associated
* {@link ProjectFilesystem}.
*/
@Override
public ImmutableSortedSet<Path> getAbsolutePath(SourcePath sourcePath) {
ImmutableSortedSet<Path> paths = getPathPrivateImpl(sourcePath);
ImmutableSortedSet.Builder<Path> builder = ImmutableSortedSet.naturalOrder();
for (Path path : paths) {
if (path.isAbsolute()) {
builder.add(path);
} else if (sourcePath instanceof BuildTargetSourcePath) {
builder.add(
getBuildTargetSourcePathFilesystem((BuildTargetSourcePath) sourcePath).resolve(path));
} else if (sourcePath instanceof PathSourcePath) {
builder.add(((PathSourcePath) sourcePath).getFilesystem().resolve(path));
} else {
throw new UnsupportedOperationException(sourcePath.getClass() + " is not supported here!");
}
}
return builder.build();
}
@Override
public ImmutableSortedSet<Path> getAllAbsolutePaths(
Collection<? extends SourcePath> sourcePaths) {
return sourcePaths.stream()
.flatMap(sourcePath -> getAbsolutePath(sourcePath).stream())
.collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));
}
/**
* @return The {@link Path} instances the {@code sourcePath} refers to, relative to its owning
* {@link ProjectFilesystem}.
*/
@Override
public ImmutableSortedSet<Path> getRelativePath(SourcePath sourcePath) {
ImmutableSortedSet<Path> toReturns = getPathPrivateImpl(sourcePath);
toReturns.forEach(
toReturn ->
Preconditions.checkState(
!toReturn.isAbsolute(),
"Expected path to be relative, not absolute: %s (from %s)",
toReturn,
sourcePath));
return toReturns;
}
/**
* @return The {@link Path} instances the {@code sourcePath} refers to, ideally relative to its
* owning {@link ProjectFilesystem}. Absolute path may get returned however!
* <p>We should make sure that {@link #getPathPrivateImpl} always returns a relative path
* after which we should simply call {@link #getRelativePath}. Until then we still need this
* nonsense.
*/
@Override
public ImmutableSortedSet<Path> getIdeallyRelativePath(SourcePath sourcePath) {
return getPathPrivateImpl(sourcePath);
}
private ImmutableSortedSet<Path> getPathsPrivateImpl(ImmutableSortedSet<SourcePath> sourcePaths) {
ImmutableSortedSet.Builder<Path> pathsBuilder = ImmutableSortedSet.naturalOrder();
sourcePaths.forEach(sourcePath -> pathsBuilder.addAll(getPathPrivateImpl(sourcePath)));
return pathsBuilder.build();
}
/**
* @return the {@link SourcePath} as a list of {@link Path} instances, with no guarantee whether
* the return value is absolute or relative. This should never be exposed to users. A {@link
* SourcePath} may resolve into multiple {@link Path} instances if the associated build target
* has multiple outputs.
*/
private ImmutableSortedSet<Path> getPathPrivateImpl(SourcePath sourcePath) {
if (sourcePath instanceof PathSourcePath) {
return ImmutableSortedSet.of(((PathSourcePath) sourcePath).getRelativePath());
} else if (sourcePath instanceof ExplicitBuildTargetSourcePath) {
return ImmutableSortedSet.of(((ExplicitBuildTargetSourcePath) sourcePath).getResolvedPath());
} else if (sourcePath instanceof ForwardingBuildTargetSourcePath) {
return getPathPrivateImpl(((ForwardingBuildTargetSourcePath) sourcePath).getDelegate());
} else if (sourcePath instanceof DefaultBuildTargetSourcePath) {
DefaultBuildTargetSourcePath targetSourcePath = (DefaultBuildTargetSourcePath) sourcePath;
ImmutableSortedSet<SourcePath> paths = resolveDefaultBuildTargetSourcePath(targetSourcePath);
return getPathsPrivateImpl(paths);
} else {
throw new UnsupportedOperationException(sourcePath.getClass() + " is not supported here!");
}
}
/**
* Resolved the logical names for a group of SourcePath objects into a map, throwing an error on
* duplicates.
*/
@Override
public ImmutableMap<String, SourcePath> getSourcePathNames(
BuildTarget target, String parameter, Iterable<SourcePath> sourcePaths) {
return getSourcePathNames(target, parameter, sourcePaths, x -> true, x -> x);
}
/**
* Resolves the logical names for a group of objects that have a SourcePath into a map, throwing
* an error on duplicates.
*/
@Override
public <T> ImmutableMap<String, T> getSourcePathNames(
BuildTarget target,
String parameter,
Iterable<T> objects,
Predicate<T> filter,
Function<T, SourcePath> objectSourcePathFunction) {
Map<String, T> resolved = new LinkedHashMap<>();
for (T object : objects) {
if (filter.test(object)) {
SourcePath path = objectSourcePathFunction.apply(object);
String name = getSourcePathName(target, path);
T old = resolved.put(name, object);
if (old != null) {
throw new HumanReadableException(
String.format(
"%s: parameter '%s': duplicate entries for '%s'", target, parameter, name));
}
}
}
return ImmutableMap.copyOf(resolved);
}
/**
* Takes an {@link Iterable} of {@link SourcePath} objects and filters those that represent {@link
* Path}s.
*/
@Override
public ImmutableCollection<Path> filterInputsToCompareToOutput(
Iterable<? extends SourcePath> sources) {
// Currently, the only implementation of SourcePath that should be included in the Iterable
// returned by getInputsToCompareToOutput() is FileSourcePath, so it is safe to filter by that
// and then use .asReference() to get its path.
//
// BuildTargetSourcePath should not be included in the output because it refers to a generated
// file, and generated files are not hashed as part of a RuleKey.
return FluentIterable.from(sources)
.filter(PathSourcePath.class)
.transform(PathSourcePath::getRelativePath)
.toList();
}
@Override
public ImmutableSortedSet<Path> getRelativePath(
ProjectFilesystem projectFilesystem, SourcePath sourcePath) {
return getAbsolutePath(sourcePath).stream()
.map(path -> projectFilesystem.relativize(path).getPath())
.collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));
}
@Override
public ImmutableMap<Path, Path> createRelativeMap(
Path basePath, Iterable<SourcePath> sourcePaths) {
// The goal here is pretty simple.
// 1. For a PathSourcePath (an explicit file reference in a BUCK file) that is a
// a. file, add it as a single entry at a path relative to this target's base path
// b. directory, add all its contents as paths relative to this target's base path
// 2. For a BuildTargetSourcePath (an output of another rule) that is a
// a. file, add it as a single entry with just the filename
// b. directory, add all its as paths relative to that directory preceded by the directory
// name
//
// Simplified: 1a and 1b add the item relative to the target's directory, 2a and 2b add the item
// relative to its own parent.
// TODO(cjhopman): We should remove 1a because we shouldn't allow specifying directories in
// srcs.
Map<Path, Path> relativePathMap = new LinkedHashMap<>();
for (SourcePath sourcePath : sourcePaths) {
ProjectFilesystem filesystem = getFilesystem(sourcePath);
ImmutableSortedSet<Path> absolutePaths =
getAbsolutePath(sourcePath).stream()
.map(Path::normalize)
.collect(ImmutableSortedSet.toImmutableSortedSet(Comparator.naturalOrder()));
for (Path absolutePath : absolutePaths) {
try {
if (sourcePath instanceof PathSourcePath) {
// If the path doesn't start with the base path, then it's a reference to a file in a
// different package and violates package boundaries. We could just add it by the
// filename, but better to discourage violating package boundaries.
Verify.verify(
absolutePath.startsWith(basePath),
"Expected %s to start with %s.",
absolutePath,
basePath);
addPathToRelativePathMap(
filesystem,
relativePathMap,
basePath,
absolutePath,
basePath.relativize(absolutePath));
} else {
addPathToRelativePathMap(
filesystem,
relativePathMap,
absolutePath.getParent(),
absolutePath,
absolutePath.getFileName());
}
} catch (IOException e) {
throw new RuntimeException(
String.format("Couldn't read directory [%s].", absolutePath.toString()), e);
}
}
}
return ImmutableMap.copyOf(relativePathMap);
}
private static void addPathToRelativePathMap(
ProjectFilesystem filesystem,
Map<Path, Path> relativePathMap,
Path basePath,
Path absolutePath,
Path relativePath)
throws IOException {
if (Files.isDirectory(absolutePath)) {
ImmutableSet<Path> files = filesystem.getFilesUnderPath(absolutePath);
for (Path file : files) {
Path absoluteFilePath = filesystem.resolve(file).normalize();
addToRelativePathMap(
relativePathMap, basePath.relativize(absoluteFilePath), absoluteFilePath);
}
} else {
addToRelativePathMap(relativePathMap, relativePath, absolutePath);
}
}
private static void addToRelativePathMap(
Map<Path, Path> relativePathMap, Path pathRelativeToBaseDir, Path absoluteFilePath) {
relativePathMap.compute(
pathRelativeToBaseDir,
(ignored, current) -> {
if (current != null) {
throw new HumanReadableException(
"The file '%s' appears twice in the hierarchy",
pathRelativeToBaseDir.getFileName());
}
return absoluteFilePath;
});
}
}
| 4,881 |
348 | {"nom":"Macheren","circ":"7ème circonscription","dpt":"Moselle","inscrits":2435,"abs":1530,"votants":905,"blancs":56,"nuls":18,"exp":831,"res":[{"nuance":"REM","nom":"<NAME>","voix":477},{"nuance":"FN","nom":"<NAME>","voix":354}]} | 93 |
1,884 | package com.dinuscxj.example.demo;
import android.content.Context;
import android.support.annotation.NonNull;
import android.support.v4.app.Fragment;
import com.dinuscxj.example.R;
public class OpenProjectTabPagerFragment extends TabPagerFragment {
public static OpenProjectTabPagerFragment newInstance() {
return new OpenProjectTabPagerFragment();
}
@Override
public void onBuildTabPager(@NonNull Builder builder) {
FragmentEntry.buildTabPager(builder, getActivity());
}
private enum FragmentEntry {
NORMAL(
R.string.tab_normal,
OpenProjectNormalFragment.class),
FLOAT(
R.string.tab_float,
OpenProjectFloatFragment.class),
PINNED(
R.string.tab_pinned,
OpenProjectPinnedFragment.class);
final int titleResource;
final Class<? extends Fragment> fragmentClass;
FragmentEntry(int indicatorResource, Class<? extends Fragment> fragmentClass) {
this.titleResource = indicatorResource;
this.fragmentClass = fragmentClass;
}
static void buildTabPager(Builder builder, Context context) {
for (FragmentEntry e : FragmentEntry.values()) {
builder.addTab(context.getString(e.titleResource), e.fragmentClass, null);
}
}
}
}
| 590 |
529 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) Microsoft Corporation. All rights reserved.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "spot_net_native.cpp"
#include "spot_net_native_Microsoft_SPOT_Net_NetworkInformation_NetworkInterface.cpp"
#include "spot_net_native_Microsoft_SPOT_Net_NetworkInformation_Wireless80211.cpp"
#include "spot_net_native_Microsoft_SPOT_Net_SocketNative.cpp"
| 125 |
342 | <reponame>gspu/bitkeeper<gh_stars>100-1000
/*
* tkTreeMarquee.c --
*
* This module implements the selection rectangle for treectrl widgets.
*
* Copyright (c) 2002-2011 <NAME>
*/
#include "tkTreeCtrl.h"
typedef struct TreeMarquee_ TreeMarquee_;
/*
* The following structure holds info about the selection rectangle.
* There is one of these per TreeCtrl.
*/
struct TreeMarquee_
{
TreeCtrl *tree;
Tk_OptionTable optionTable;
int visible; /* -visible option. */
int x1, y1, x2, y2; /* Opposing corners. */
int onScreen; /* TRUE if it was drawn. */
int sx, sy; /* Offset of canvas from top-left
* corner of the window when we
* were drawn. */
int sw, sh; /* Width & height when drawn. */
TreeColor *fillColorPtr; /* -fill */
Tcl_Obj *fillObj; /* -fill */
TreeColor *outlineColorPtr; /* -outline */
Tcl_Obj *outlineObj; /* -outline */
int outlineWidth; /* -outlinewidth */
Tcl_Obj *outlineWidthObj; /* -outlinewidth */
};
#define MARQ_CONF_VISIBLE 0x0001
#define MARQ_CONF_COLORS 0x0002
static Tk_OptionSpec optionSpecs[] = {
{TK_OPTION_CUSTOM, "-fill", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TreeMarquee_, fillObj),
Tk_Offset(TreeMarquee_, fillColorPtr), TK_OPTION_NULL_OK,
(ClientData) &TreeCtrlCO_treecolor, MARQ_CONF_COLORS},
{TK_OPTION_CUSTOM, "-outline", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TreeMarquee_, outlineObj),
Tk_Offset(TreeMarquee_, outlineColorPtr), TK_OPTION_NULL_OK,
(ClientData) &TreeCtrlCO_treecolor, MARQ_CONF_COLORS},
{TK_OPTION_PIXELS, "-outlinewidth", (char *) NULL, (char *) NULL,
"1", Tk_Offset(TreeMarquee_, outlineWidthObj),
Tk_Offset(TreeMarquee_, outlineWidth), 0,
(ClientData) NULL, MARQ_CONF_COLORS},
{TK_OPTION_BOOLEAN, "-visible", (char *) NULL, (char *) NULL,
"0", -1, Tk_Offset(TreeMarquee_, visible),
0, (ClientData) NULL, MARQ_CONF_VISIBLE},
{TK_OPTION_END, (char *) NULL, (char *) NULL, (char *) NULL,
(char *) NULL, 0, -1, 0, 0, 0}
};
/*
*----------------------------------------------------------------------
*
* TreeMarquee_InitWidget --
*
* Perform marquee-related initialization when a new TreeCtrl is
* created.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* Memory is allocated.
*
*----------------------------------------------------------------------
*/
int
TreeMarquee_InitWidget(
TreeCtrl *tree /* Widget info. */
)
{
TreeMarquee marquee;
marquee = (TreeMarquee) ckalloc(sizeof(TreeMarquee_));
memset(marquee, '\0', sizeof(TreeMarquee_));
marquee->tree = tree;
marquee->optionTable = Tk_CreateOptionTable(tree->interp, optionSpecs);
if (Tk_InitOptions(tree->interp, (char *) marquee, marquee->optionTable,
tree->tkwin) != TCL_OK) {
WFREE(marquee, TreeMarquee_);
return TCL_ERROR;
}
tree->marquee = marquee;
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_FreeWidget --
*
* Free marquee-related resources when a TreeCtrl is deleted.
*
* Results:
* None.
*
* Side effects:
* Memory is deallocated.
*
*----------------------------------------------------------------------
*/
void
TreeMarquee_FreeWidget(
TreeCtrl *tree /* Widget info. */
)
{
TreeMarquee marquee = tree->marquee;
Tk_FreeConfigOptions((char *) marquee, marquee->optionTable,
marquee->tree->tkwin);
WFREE(marquee, TreeMarquee_);
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_IsXOR --
*
* Return true if the marquee is being drawn with XOR.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int TreeMarquee_IsXOR(TreeMarquee marquee)
{
if (marquee->fillColorPtr || marquee->outlineColorPtr)
return FALSE;
#if defined(WIN32)
return FALSE; /* TRUE on XP, FALSE on Win7 (lots of flickering) */
#elif defined(MAC_OSX_TK)
return FALSE;
#else
return TRUE; /* X11 */
#endif
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_IsVisible --
*
* Return true if the marquee is being drawn.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int TreeMarquee_IsVisible(TreeMarquee marquee)
{
return marquee->visible;
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_Display --
*
* Draw the selection rectangle if it is not already displayed and if
* it's -visible option is TRUE.
*
* Results:
* None.
*
* Side effects:
* Stuff is drawn.
*
*----------------------------------------------------------------------
*/
void
TreeMarquee_Display(
TreeMarquee marquee /* Marquee token. */
)
{
TreeCtrl *tree = marquee->tree;
if (!marquee->onScreen && marquee->visible) {
if (TreeMarquee_IsXOR(marquee)) {
marquee->sx = 0 - tree->xOrigin;
marquee->sy = 0 - tree->yOrigin;
TreeMarquee_DrawXOR(marquee, Tk_WindowId(tree->tkwin),
marquee->sx, marquee->sy);
} else {
marquee->sx = MIN(marquee->x1, marquee->x2) - tree->xOrigin;
marquee->sy = MIN(marquee->y1, marquee->y2) - tree->yOrigin;
marquee->sw = abs(marquee->x2 - marquee->x1) + 1;
marquee->sh = abs(marquee->y2 - marquee->y1) + 1;
/* Tree_InvalidateItemArea(tree, marquee->sx, marquee->sy,
marquee->sx + marquee->sw, marquee->sy + marquee->sh);*/
Tree_EventuallyRedraw(tree);
}
marquee->onScreen = TRUE;
}
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_Undisplay --
*
* Erase the selection rectangle if it is displayed.
*
* Results:
* None.
*
* Side effects:
* Stuff is drawn.
*
*----------------------------------------------------------------------
*/
void
TreeMarquee_Undisplay(
TreeMarquee marquee /* Marquee token. */
)
{
TreeCtrl *tree = marquee->tree;
if (marquee->onScreen) {
if (TreeMarquee_IsXOR(marquee)) {
TreeMarquee_DrawXOR(marquee, Tk_WindowId(tree->tkwin), marquee->sx, marquee->sy);
} else {
/* Tree_InvalidateItemArea(tree, marquee->sx, marquee->sy,
marquee->sx + marquee->sw, marquee->sy + marquee->sh);*/
Tree_EventuallyRedraw(tree);
}
marquee->onScreen = FALSE;
}
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_DrawXOR --
*
* Draw (or erase) the selection rectangle.
*
* Results:
* None.
*
* Side effects:
* Stuff is drawn (or erased, since this is XOR drawing).
*
*----------------------------------------------------------------------
*/
void
TreeMarquee_DrawXOR(
TreeMarquee marquee, /* Marquee token. */
Drawable drawable, /* Where to draw. */
int x1, int y1 /* Offset of canvas from top-left corner
* of the window. */
)
{
TreeCtrl *tree = marquee->tree;
int x, y, w, h;
DotState dotState;
x = MIN(marquee->x1, marquee->x2);
w = abs(marquee->x1 - marquee->x2) + 1;
y = MIN(marquee->y1, marquee->y2);
h = abs(marquee->y1 - marquee->y2) + 1;
TreeDotRect_Setup(tree, drawable, &dotState);
TreeDotRect_Draw(&dotState, x1 + x, y1 + y, w, h);
TreeDotRect_Restore(&dotState);
}
/*
*----------------------------------------------------------------------
*
* TreeMarquee_Draw --
*
* Draw the selection rectangle if it is visible.
*
* Results:
* None.
*
* Side effects:
* Stuff is drawn.
*
*----------------------------------------------------------------------
*/
void
TreeMarquee_Draw(
TreeMarquee marquee, /* Marquee token. */
TreeDrawable td) /* Where to draw. */
{
#if 1 /* Use XOR dotted rectangles where possible. */
TreeCtrl *tree = marquee->tree;
if (!marquee->visible)
return;
if (marquee->fillColorPtr || marquee->outlineColorPtr) {
TreeRectangle tr;
TreeClip clip;
tr.x = 0 - tree->xOrigin + MIN(marquee->x1, marquee->x2);
tr.width = abs(marquee->x1 - marquee->x2) + 1;
tr.y = 0 - tree->yOrigin + MIN(marquee->y1, marquee->y2);
tr.height = abs(marquee->y1 - marquee->y2) + 1;
clip.type = TREE_CLIP_AREA, clip.area = TREE_AREA_CONTENT;
if (marquee->fillColorPtr) {
TreeRectangle trBrush;
TreeColor_GetBrushBounds(tree, marquee->fillColorPtr, tr,
tree->xOrigin, tree->yOrigin,
(TreeColumn) NULL, (TreeItem) NULL, &trBrush);
TreeColor_FillRect(tree, td, &clip, marquee->fillColorPtr, trBrush, tr);
}
if (marquee->outlineColorPtr && marquee->outlineWidth > 0) {
TreeRectangle trBrush;
TreeColor_GetBrushBounds(tree, marquee->outlineColorPtr, tr,
tree->xOrigin, tree->yOrigin,
(TreeColumn) NULL, (TreeItem) NULL, &trBrush);
TreeColor_DrawRect(tree, td, &clip, marquee->outlineColorPtr,
trBrush, tr, marquee->outlineWidth, 0);
}
return;
}
/* Yes this is XOR drawing but we aren't erasing the previous
* marquee as when TreeMarquee_IsXOR() returns TRUE. */
TreeMarquee_DrawXOR(marquee, td.drawable,
0 - tree->xOrigin, 0 - tree->yOrigin);
#else /* */
TreeCtrl *tree = marquee->tree;
int x, y, w, h;
GC gc;
XGCValues gcValues;
unsigned long mask;
#ifdef WIN32
XPoint points[5];
XRectangle rect;
#endif
#if 0
XColor *colorPtr;
#endif
if (!marquee->visible)
return;
x = MIN(marquee->x1, marquee->x2);
w = abs(marquee->x1 - marquee->x2) + 1;
y = MIN(marquee->y1, marquee->y2);
h = abs(marquee->y1 - marquee->y2) + 1;
#if 0
colorPtr = Tk_GetColor(tree->interp, tree->tkwin, "gray50");
gc = Tk_GCForColor(colorPtr, Tk_WindowId(tree->tkwin));
XFillRectangle(tree->display, td.drawable, gc,
x - tree->drawableXOrigin, y - tree->drawableYOrigin,
w - 1, h - 1);
#else /* Stippled rectangles: BUG not clipped to contentbox. */
gcValues.stipple = Tk_GetBitmap(tree->interp, tree->tkwin, "gray50");
gcValues.fill_style = FillStippled;
mask = GCStipple|GCFillStyle;
gc = Tk_GetGC(tree->tkwin, mask, &gcValues);
#ifdef WIN32
/* XDrawRectangle ignores the stipple pattern. */
rect.x = x - tree->drawableXOrigin;
rect.y = y - tree->drawableYOrigin;
rect.width = w;
rect.height = h;
points[0].x = rect.x, points[0].y = rect.y;
points[1].x = rect.x + rect.width - 1, points[1].y = rect.y;
points[2].x = rect.x + rect.width - 1, points[2].y = rect.y + rect.height - 1;
points[3].x = rect.x, points[3].y = rect.y + rect.height - 1;
points[4] = points[0];
XDrawLines(tree->display, td.drawable, gc, points, 5, CoordModeOrigin);
#else
XDrawRectangle(tree->display, td.drawable, gc,
x - tree->drawableXOrigin, y - tree->drawableYOrigin,
w - 1, h - 1);
#endif
Tk_FreeGC(tree->display, gc);
#endif
#endif /* */
}
/*
*----------------------------------------------------------------------
*
* Marquee_Config --
*
* This procedure is called to process an objc/objv list to set
* configuration options for a Marquee.
*
* Results:
* The return value is a standard Tcl result. If TCL_ERROR is
* returned, then an error message is left in interp's result.
*
* Side effects:
* Configuration information, such as text string, colors, font,
* etc. get set for marquee; old resources get freed, if there
* were any. Display changes may occur.
*
*----------------------------------------------------------------------
*/
static int
Marquee_Config(
TreeMarquee marquee, /* Marquee record. */
int objc, /* Number of arguments. */
Tcl_Obj *CONST objv[] /* Argument values. */
)
{
TreeCtrl *tree = marquee->tree;
Tk_SavedOptions savedOptions;
int error;
Tcl_Obj *errorResult = NULL;
int mask;
for (error = 0; error <= 1; error++) {
if (error == 0) {
if (Tk_SetOptions(tree->interp, (char *) marquee, marquee->optionTable,
objc, objv, tree->tkwin, &savedOptions, &mask) != TCL_OK) {
mask = 0;
continue;
}
/* xxx */
Tk_FreeSavedOptions(&savedOptions);
break;
} else {
errorResult = Tcl_GetObjResult(tree->interp);
Tcl_IncrRefCount(errorResult);
Tk_RestoreSavedOptions(&savedOptions);
/* xxx */
Tcl_SetObjResult(tree->interp, errorResult);
Tcl_DecrRefCount(errorResult);
return TCL_ERROR;
}
}
if (mask & MARQ_CONF_VISIBLE) {
TreeMarquee_Undisplay(marquee);
TreeMarquee_Display(marquee);
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* TreeMarqueeCmd --
*
* This procedure is invoked to process the [marquee] widget
* command. See the user documentation for details on what it
* does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
TreeMarqueeCmd(
ClientData clientData, /* Widget info. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *CONST objv[] /* Argument values. */
)
{
TreeCtrl *tree = clientData;
TreeMarquee marquee = tree->marquee;
static CONST char *commandNames[] = { "anchor", "cget", "configure",
"coords", "corner", "identify", (char *) NULL };
enum { COMMAND_ANCHOR, COMMAND_CGET, COMMAND_CONFIGURE, COMMAND_COORDS,
COMMAND_CORNER, COMMAND_IDENTIFY };
int index;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 2, objv, "command ?arg arg ...?");
return TCL_ERROR;
}
if (Tcl_GetIndexFromObj(interp, objv[2], commandNames, "command", 0,
&index) != TCL_OK) {
return TCL_ERROR;
}
switch (index) {
/* T marquee anchor ?x y?*/
case COMMAND_ANCHOR: {
int x, y;
if (objc != 3 && objc != 5) {
Tcl_WrongNumArgs(interp, 3, objv, "?x y?");
return TCL_ERROR;
}
if (objc == 3) {
FormatResult(interp, "%d %d", marquee->x1, marquee->y1);
break;
}
if (Tcl_GetIntFromObj(interp, objv[3], &x) != TCL_OK)
return TCL_ERROR;
if (Tcl_GetIntFromObj(interp, objv[4], &y) != TCL_OK)
return TCL_ERROR;
if ((x == marquee->x1) && (y == marquee->y1))
break;
TreeMarquee_Undisplay(tree->marquee);
marquee->x1 = x;
marquee->y1 = y;
TreeMarquee_Display(tree->marquee);
break;
}
/* T marquee cget option */
case COMMAND_CGET: {
Tcl_Obj *resultObjPtr;
if (objc != 4) {
Tcl_WrongNumArgs(interp, 3, objv, "option");
return TCL_ERROR;
}
resultObjPtr = Tk_GetOptionValue(interp, (char *) marquee,
marquee->optionTable, objv[3], tree->tkwin);
if (resultObjPtr == NULL)
return TCL_ERROR;
Tcl_SetObjResult(interp, resultObjPtr);
break;
}
/* T marquee configure ?option? ?value? ?option value ...? */
case COMMAND_CONFIGURE: {
Tcl_Obj *resultObjPtr;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 3, objv, "?option? ?value?");
return TCL_ERROR;
}
if (objc <= 4) {
resultObjPtr = Tk_GetOptionInfo(interp, (char *) marquee,
marquee->optionTable,
(objc == 3) ? (Tcl_Obj *) NULL : objv[3],
tree->tkwin);
if (resultObjPtr == NULL)
return TCL_ERROR;
Tcl_SetObjResult(interp, resultObjPtr);
break;
}
return Marquee_Config(marquee, objc - 3, objv + 3);
}
/* T marquee coords ?x y x y? */
case COMMAND_COORDS: {
int x1, y1, x2, y2;
if (objc != 3 && objc != 7) {
Tcl_WrongNumArgs(interp, 3, objv, "?x y x y?");
return TCL_ERROR;
}
if (objc == 3) {
FormatResult(interp, "%d %d %d %d", marquee->x1, marquee->y1,
marquee->x2, marquee->y2);
break;
}
if (Tcl_GetIntFromObj(interp, objv[3], &x1) != TCL_OK)
return TCL_ERROR;
if (Tcl_GetIntFromObj(interp, objv[4], &y1) != TCL_OK)
return TCL_ERROR;
if (Tcl_GetIntFromObj(interp, objv[5], &x2) != TCL_OK)
return TCL_ERROR;
if (Tcl_GetIntFromObj(interp, objv[6], &y2) != TCL_OK)
return TCL_ERROR;
if (x1 == marquee->x1 && y1 == marquee->y1 &&
x2 == marquee->x2 && y2 == marquee->y2)
break;
TreeMarquee_Undisplay(tree->marquee);
marquee->x1 = x1;
marquee->y1 = y1;
marquee->x2 = x2;
marquee->y2 = y2;
TreeMarquee_Display(tree->marquee);
break;
}
/* T marquee corner ?x y?*/
case COMMAND_CORNER: {
int x, y;
if (objc != 3 && objc != 5) {
Tcl_WrongNumArgs(interp, 3, objv, "?x y?");
return TCL_ERROR;
}
if (objc == 3) {
FormatResult(interp, "%d %d", marquee->x2, marquee->y2);
break;
}
if (Tcl_GetIntFromObj(interp, objv[3], &x) != TCL_OK)
return TCL_ERROR;
if (Tcl_GetIntFromObj(interp, objv[4], &y) != TCL_OK)
return TCL_ERROR;
if (x == marquee->x2 && y == marquee->y2)
break;
TreeMarquee_Undisplay(tree->marquee);
marquee->x2 = x;
marquee->y2 = y;
TreeMarquee_Display(tree->marquee);
break;
}
/* T marquee identify */
case COMMAND_IDENTIFY: {
int x1, y1, x2, y2, n = 0;
int totalWidth = Tree_CanvasWidth(tree);
int totalHeight = Tree_CanvasHeight(tree);
TreeItemList items;
Tcl_Obj *listObj;
if (objc != 3) {
Tcl_WrongNumArgs(interp, 3, objv, (char *) NULL);
return TCL_ERROR;
}
x1 = MIN(marquee->x1, marquee->x2);
x2 = MAX(marquee->x1, marquee->x2);
y1 = MIN(marquee->y1, marquee->y2);
y2 = MAX(marquee->y1, marquee->y2);
if (x2 <= 0)
break;
if (x1 >= totalWidth)
break;
if (y2 <= 0)
break;
if (y1 >= totalHeight)
break;
if (x1 < 0)
x1 = 0;
if (x2 > totalWidth)
x2 = totalWidth;
if (y1 < 0)
y1 = 0;
if (y2 > totalHeight)
y2 = totalHeight;
Tree_ItemsInArea(tree, &items, x1, y1, x2, y2);
if (TreeItemList_Count(&items) == 0) {
TreeItemList_Free(&items);
break;
}
listObj = Tcl_NewListObj(0, NULL);
for (n = 0; n < TreeItemList_Count(&items); n++) {
Tcl_Obj *subListObj = Tcl_NewListObj(0, NULL);
TreeItem item = TreeItemList_Nth(&items, n);
Tcl_ListObjAppendElement(interp, subListObj,
TreeItem_ToObj(tree, item));
TreeItem_Identify2(tree, item, x1, y1, x2, y2, subListObj);
Tcl_ListObjAppendElement(interp, listObj, subListObj);
}
TreeItemList_Free(&items);
Tcl_SetObjResult(interp, listObj);
break;
}
}
return TCL_OK;
}
| 7,361 |
3,799 | <filename>car/app/app/src/main/java/androidx/car/app/annotations/CarProtocol.java<gh_stars>1000+
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.car.app.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Any class annotated with this marker is part of the protocol layer for remote host rendering.
* Changes to these classes must take forward and backward compatibility into account.
*
* <p>Newer apps should be able to work with older hosts, if the functionality they use can be
* emulated using older APIs or if they don't use newer features. The {@link RequiresCarApi}
* annotation details on required versioning for compatibility for classes and methods.
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.TYPE, ElementType.PARAMETER})
public @interface CarProtocol {
}
| 400 |
1,442 | #include <quiz.h>
#include "execution_environment.h"
QUIZ_CASE(python_math) {
TestExecutionEnvironment env = init_environement();
assert_command_execution_succeeds(env, "from math import *");
assert_command_execution_succeeds(env, "e", "2.718281828459045\n");
assert_command_execution_succeeds(env, "gamma(3)", "2.0\n");
deinit_environment();
}
QUIZ_CASE(python_cmath) {
TestExecutionEnvironment env = init_environement();
assert_command_execution_succeeds(env, "from cmath import *");
assert_command_execution_succeeds(env, "cos(0)", "(1+-0j)\n");
deinit_environment();
}
| 230 |
6,270 | <reponame>speakerbug/voterbot<filename>node_modules/claudia-bot-builder/node_modules/aws-sdk/.changes/2.187.0.json
[
{
"type": "feature",
"category": "AlexaForBusiness",
"description": "Supports new field for DeviceStatusInfo which provides details about the DeviceStatus following a DeviceSync operation."
},
{
"type": "feature",
"category": "CodeBuild",
"description": "Adding support for Shallow Clone and GitHub Enterprise in AWS CodeBuild."
},
{
"type": "feature",
"category": "GuardDuty",
"description": "Added the missing AccessKeyDetails object to the resource shape."
},
{
"type": "feature",
"category": "Lambda",
"description": "AWS Lambda now supports Revision ID on your function versions and aliases, to track and apply conditional updates when you are updating your function version or alias resources."
},
{
"type": "feature",
"category": "MetadataService",
"description": "Allow environmental disabling of the AWS.MetadataService client by setting the AWS_EC2_METADATA_DISABLED environment variable to a truthy value."
}
] | 422 |
1,144 | <reponame>dram/metasfresh<gh_stars>1000+
package org.eevolution.process;
import java.util.concurrent.atomic.AtomicInteger;
import de.metas.product.IProductBL;
import org.adempiere.ad.dao.IQueryBL;
import org.adempiere.ad.dao.IQueryBuilder;
import org.adempiere.ad.trx.api.ITrxManager;
import org.adempiere.exceptions.AdempiereException;
import org.adempiere.model.InterfaceWrapperHelper;
import org.compiere.model.I_M_Product;
import org.eevolution.api.IProductBOMBL;
import org.eevolution.api.IProductBOMDAO;
import org.eevolution.api.ProductBOMId;
import org.eevolution.model.I_PP_Product_BOM;
import org.eevolution.model.I_PP_Product_BOMLine;
import de.metas.process.IProcessPrecondition;
import de.metas.process.IProcessPreconditionsContext;
import de.metas.process.JavaProcess;
import de.metas.process.Param;
import de.metas.process.ProcessPreconditionsResolution;
import de.metas.process.RunOutOfTrx;
import de.metas.product.ProductId;
import de.metas.util.Services;
import de.metas.util.StringUtils;
import lombok.NonNull;
/**
* Title: Check BOM Structure (free of cycles) Description: Tree cannot contain BOMs which are already referenced
*
* @author <NAME> (tspc)
* @author <NAME>, SC ARHIPAC SERVICE SRL
*/
public class PP_Product_BOM_Check extends JavaProcess implements IProcessPrecondition
{
private final transient IProductBOMBL productBOMBL = Services.get(IProductBOMBL.class);
private final transient IProductBOMDAO productBOMDAO = Services.get(IProductBOMDAO.class);
private final transient IProductBL productBL = Services.get(IProductBL.class);
private final transient ITrxManager trxManager = Services.get(ITrxManager.class);
@Param(parameterName = I_M_Product.COLUMNNAME_M_Product_Category_ID, mandatory = false)
private int p_M_Product_Category_ID;
@Override
public ProcessPreconditionsResolution checkPreconditionsApplicable(final IProcessPreconditionsContext context)
{
if (!(I_M_Product.Table_Name.equals(context.getTableName()) || I_PP_Product_BOM.Table_Name.equals(context.getTableName())))
{
return ProcessPreconditionsResolution.reject();
}
if (context.isMoreThanOneSelected())
{
return ProcessPreconditionsResolution.reject();
}
return ProcessPreconditionsResolution.accept();
}
@Override
@RunOutOfTrx
protected String doIt()
{
if (p_M_Product_Category_ID > 0)
{
final IQueryBuilder<I_M_Product> queryBuilder = Services.get(IQueryBL.class)
.createQueryBuilder(I_M_Product.class)
.addEqualsFilter(I_M_Product.COLUMNNAME_IsBOM, true)
.addEqualsFilter(I_M_Product.COLUMNNAME_M_Product_Category_ID, p_M_Product_Category_ID)
.orderBy()
.addColumn(I_M_Product.COLUMNNAME_Name)
.endOrderBy();
final AtomicInteger counter = new AtomicInteger(0);
queryBuilder.create()
.stream()
.forEach(product -> {
try
{
validateProduct(product);
counter.incrementAndGet();
}
catch (final Exception ex)
{
log.warn("Product is not valid: {}", product, ex);
}
});
return "#" + counter.get();
}
else
{
final I_M_Product product = InterfaceWrapperHelper.load(getM_Product_ID(), I_M_Product.class);
validateProduct(product);
return MSG_OK;
}
}
private int getM_Product_ID()
{
final String tableName = getTableName();
if (I_M_Product.Table_Name.equals(tableName))
{
return getRecord_ID();
}
else if (I_PP_Product_BOM.Table_Name.equals(tableName))
{
final ProductBOMId bomId = ProductBOMId.ofRepoId(getRecord_ID());
final I_PP_Product_BOM bom = productBOMDAO.getById(bomId);
return bom.getM_Product_ID();
}
else
{
throw new AdempiereException(StringUtils.formatMessage("Table {} has not yet been implemented to support BOM validation.", tableName));
}
}
private void validateProduct(@NonNull final I_M_Product product)
{
try
{
trxManager.runInNewTrx(() -> checkProductById(product));
}
catch (final Exception ex)
{
product.setIsVerified(false);
InterfaceWrapperHelper.save(product);
throw AdempiereException.wrapIfNeeded(ex);
}
}
private void checkProductById(@NonNull final I_M_Product product)
{
if (!product.isBOM())
{
log.info("Product is not a BOM");
// No BOM - should not happen, but no problem
return;
}
// Check this level
updateProductLLCAndMarkAsVerified(product);
// Get Default BOM from this product
final I_PP_Product_BOM bom = productBOMDAO.getDefaultBOM(product).orElse(null);
if (bom == null)
{
throw new AdempiereException("No Default BOM found for " + product.getValue() + "_" + product.getName());
}
// Check All BOM Lines
for (final I_PP_Product_BOMLine tbomline : productBOMDAO.retrieveLines(bom))
{
final ProductId productId = ProductId.ofRepoId(tbomline.getM_Product_ID());
final I_M_Product bomLineProduct = productBL.getById(productId);
updateProductLLCAndMarkAsVerified(bomLineProduct);
}
}
private void updateProductLLCAndMarkAsVerified(final I_M_Product product)
{
// NOTE: when LLC is calculated, the BOM cycles are also checked
final int lowLevelCode = productBOMBL.calculateProductLowestLevel(ProductId.ofRepoId(product.getM_Product_ID()));
product.setLowLevel(lowLevelCode);
product.setIsVerified(true);
InterfaceWrapperHelper.save(product);
}
}
| 1,993 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_l10ntools.hxx"
#include <stdio.h>
#include <tools/string.hxx>
#include <tools/fsys.hxx>
// local includes
#include "export.hxx"
#include "cfgmerge.hxx"
#include "tokens.h"
#include "utf8conv.hxx"
extern "C" { int yyerror( char * ); }
extern "C" { int YYWarning( char * ); }
// defines to parse command line
#define STATE_NON 0x0001
#define STATE_INPUT 0x0002
#define STATE_OUTPUT 0x0003
#define STATE_PRJ 0x0004
#define STATE_ROOT 0x0005
#define STATE_MERGESRC 0x0006
#define STATE_ERRORLOG 0x0007
#define STATE_UTF8 0x0008
#define STATE_LANGUAGES 0X0009
#define STATE_ISOCODE99 0x000A
#define STATE_FORCE 0x000B
// set of global variables
sal_Bool bEnableExport;
sal_Bool bMergeMode;
sal_Bool bErrorLog;
sal_Bool bForce;
sal_Bool bUTF8;
ByteString sPrj;
ByteString sPrjRoot;
ByteString sInputFileName;
ByteString sActFileName;
ByteString sFullEntry;
ByteString sOutputFile;
ByteString sMergeSrc;
String sUsedTempFile;
CfgParser *pParser;
extern "C" {
// the whole interface to lexer is in this extern "C" section
/*****************************************************************************/
extern char *GetOutputFile( int argc, char* argv[])
/*****************************************************************************/
{
bEnableExport = sal_False;
bMergeMode = sal_False;
bErrorLog = sal_True;
bForce = sal_False;
bUTF8 = sal_True;
sPrj = "";
sPrjRoot = "";
sInputFileName = "";
sActFileName = "";
sal_uInt16 nState = STATE_NON;
sal_Bool bInput = sal_False;
// parse command line
for( int i = 1; i < argc; i++ ) {
ByteString sSwitch( argv[ i ] );
sSwitch.ToUpperAscii();
if ( sSwitch == "-I" ) {
nState = STATE_INPUT; // next token specifies source file
}
else if ( sSwitch == "-O" ) {
nState = STATE_OUTPUT; // next token specifies the dest file
}
else if ( sSwitch == "-P" ) {
nState = STATE_PRJ; // next token specifies the cur. project
}
else if ( sSwitch == "-R" ) {
nState = STATE_ROOT; // next token specifies path to project root
}
else if ( sSwitch == "-M" ) {
nState = STATE_MERGESRC; // next token specifies the merge database
}
else if ( sSwitch == "-E" ) {
nState = STATE_ERRORLOG;
bErrorLog = sal_False;
}
else if ( sSwitch == "-UTF8" ) {
nState = STATE_UTF8;
bUTF8 = sal_True;
}
else if ( sSwitch == "-NOUTF8" ) {
nState = STATE_UTF8;
bUTF8 = sal_False;
}
else if ( sSwitch == "-F" ) {
nState = STATE_FORCE;
bForce = sal_True;
}
else if ( sSwitch == "-L" ) {
nState = STATE_LANGUAGES;
}
else if ( sSwitch.ToUpperAscii() == "-ISO99" ) {
nState = STATE_ISOCODE99;
}
else {
switch ( nState ) {
case STATE_NON: {
return NULL; // no valid command line
}
case STATE_INPUT: {
sInputFileName = argv[ i ];
bInput = sal_True; // source file found
}
break;
case STATE_OUTPUT: {
sOutputFile = argv[ i ]; // the dest. file
}
break;
case STATE_PRJ: {
sPrj = ByteString( argv[ i ]);
// sPrj.ToLowerAscii(); // the project
}
break;
case STATE_ROOT: {
sPrjRoot = ByteString( argv[ i ]); // path to project root
}
break;
case STATE_MERGESRC: {
sMergeSrc = ByteString( argv[ i ]);
bMergeMode = sal_True; // activate merge mode, cause merge database found
}
break;
case STATE_LANGUAGES: {
Export::sLanguages = ByteString( argv[ i ]);
}
break;
}
}
}
if ( bInput ) {
// command line is valid
bEnableExport = sal_True;
char *pReturn = new char[ sOutputFile.Len() + 1 ];
strcpy( pReturn, sOutputFile.GetBuffer()); // #100211# - checked
return pReturn;
}
// command line is not valid
return NULL;
}
/*****************************************************************************/
int InitCfgExport( char *pOutput , char* pFilename )
/*****************************************************************************/
{
// instanciate Export
ByteString sOutput( pOutput );
ByteString sFilename( pFilename );
Export::InitLanguages();
if ( bMergeMode )
pParser = new CfgMerge( sMergeSrc, sOutputFile, sFilename );
else if ( sOutputFile.Len())
pParser = new CfgExport( sOutputFile, sPrj, sActFileName );
return 1;
}
/*****************************************************************************/
int EndCfgExport()
/*****************************************************************************/
{
delete pParser;
return 1;
}
void removeTempFile(){
if( !sUsedTempFile.EqualsIgnoreCaseAscii( "" ) ){
DirEntry aTempFile( sUsedTempFile );
aTempFile.Kill();
}
}
extern const char* getFilename()
{
return sInputFileName.GetBuffer();
}
/*****************************************************************************/
extern FILE *GetCfgFile()
/*****************************************************************************/
{
FILE *pFile = 0;
// look for valid filename
if ( sInputFileName.Len()) {
if( Export::fileHasUTF8ByteOrderMarker( sInputFileName ) ){
DirEntry aTempFile = Export::GetTempFile();
DirEntry aSourceFile( String( sInputFileName , RTL_TEXTENCODING_ASCII_US ) );
aSourceFile.CopyTo( aTempFile , FSYS_ACTION_COPYFILE );
String sTempFile = aTempFile.GetFull();
Export::RemoveUTF8ByteOrderMarkerFromFile( ByteString( sTempFile , RTL_TEXTENCODING_ASCII_US ) );
pFile = fopen( ByteString( sTempFile , RTL_TEXTENCODING_ASCII_US ).GetBuffer(), "r" );
sUsedTempFile = sTempFile;
}else{
// able to open file?
pFile = fopen( sInputFileName.GetBuffer(), "r" );
sUsedTempFile = String::CreateFromAscii("");
}
if ( !pFile ){
fprintf( stderr, "Error: Could not open file %s\n",
sInputFileName.GetBuffer());
exit( -13 );
}
else {
// this is a valid file which can be opened, so
// create path to project root
DirEntry aEntry( String( sInputFileName, RTL_TEXTENCODING_ASCII_US ));
aEntry.ToAbs();
sFullEntry= ByteString( aEntry.GetFull(), RTL_TEXTENCODING_ASCII_US );
aEntry += DirEntry( String( "..", RTL_TEXTENCODING_ASCII_US ));
aEntry += DirEntry( sPrjRoot );
ByteString sPrjEntry( aEntry.GetFull(), RTL_TEXTENCODING_ASCII_US );
// create file name, beginning with project root
// (e.g.: source\ui\src\menue.src)
// printf("sFullEntry = %s\n",sFullEntry.GetBuffer());
sActFileName = sFullEntry.Copy( sPrjEntry.Len() + 1 );
// printf("sActFileName = %s\n",sActFileName.GetBuffer());
sActFileName.SearchAndReplaceAll( "/", "\\" );
return pFile;
}
}
// this means the file could not be opened
return NULL;
}
/*****************************************************************************/
int WorkOnTokenSet( int nTyp, char *pTokenText )
/*****************************************************************************/
{
pParser->Execute( nTyp, pTokenText );
return 1;
}
/*****************************************************************************/
int SetError()
/*****************************************************************************/
{
return 1;
}
/*****************************************************************************/
int GetError()
/*****************************************************************************/
{
return 0;
}
}
//
// class CfgStackData
//
CfgStackData* CfgStack::Push( const ByteString &rTag, const ByteString &rId )
{
CfgStackData *pD = new CfgStackData( rTag, rId );
Insert( pD, LIST_APPEND );
return pD;
}
//
// class CfgStack
//
/*****************************************************************************/
CfgStack::~CfgStack()
/*****************************************************************************/
{
for ( sal_uLong i = 0; i < Count(); i++ )
delete GetObject( i );
}
/*****************************************************************************/
ByteString CfgStack::GetAccessPath( sal_uLong nPos )
/*****************************************************************************/
{
if ( nPos == LIST_APPEND )
nPos = Count() - 1;
ByteString sReturn;
for ( sal_uLong i = 0; i <= nPos; i++ ) {
if ( i )
sReturn += ".";
sReturn += GetStackData( i )->GetIdentifier();
}
return sReturn;
}
/*****************************************************************************/
CfgStackData *CfgStack::GetStackData( sal_uLong nPos )
/*****************************************************************************/
{
if ( nPos == LIST_APPEND )
nPos = Count() - 1;
return GetObject( nPos );
}
//
// class CfgParser
//
/*****************************************************************************/
CfgParser::CfgParser()
/*****************************************************************************/
: pStackData( NULL ),
bLocalize( sal_False )
{
}
/*****************************************************************************/
CfgParser::~CfgParser()
/*****************************************************************************/
{
}
/*****************************************************************************/
sal_Bool CfgParser::IsTokenClosed( const ByteString &rToken )
/*****************************************************************************/
{
return rToken.GetChar( rToken.Len() - 2 ) == '/';
}
/*****************************************************************************/
void CfgParser::AddText(
ByteString &rText,
const ByteString &rIsoLang,
const ByteString &rResTyp
)
/*****************************************************************************/
{
sal_uInt16 nTextLen = 0;
while ( rText.Len() != nTextLen ) {
nTextLen = rText.Len();
rText.SearchAndReplaceAll( "\n", " " );
rText.SearchAndReplaceAll( "\r", " " );
rText.SearchAndReplaceAll( "\t", " " );
rText.SearchAndReplaceAll( " ", " " );
}
pStackData->sResTyp = rResTyp;
WorkOnText( rText, rIsoLang );
pStackData->sText[ rIsoLang ] = rText;
}
/*****************************************************************************/
void CfgParser::WorkOnRessourceEnd()
/*****************************************************************************/
{
}
/*****************************************************************************/
int CfgParser::ExecuteAnalyzedToken( int nToken, char *pToken )
/*****************************************************************************/
{
ByteString sToken( pToken );
if ( sToken == " " || sToken == "\t" )
sLastWhitespace += sToken;
ByteString sTokenName;
ByteString sTokenId;
sal_Bool bOutput = sal_True;
switch ( nToken ) {
case CFG_TOKEN_PACKAGE:
case CFG_TOKEN_COMPONENT:
case CFG_TOKEN_TEMPLATE:
case CFG_TOKEN_CONFIGNAME:
case CFG_TOKEN_OORNAME:
case CFG_TOKEN_OORVALUE:
case CFG_TAG:
case ANYTOKEN:
case CFG_TEXT_START:
{
sTokenName = sToken.GetToken( 1, '<' ).GetToken( 0, '>' ).GetToken( 0, ' ' );
if ( !IsTokenClosed( sToken )) {
ByteString sSearch;
switch ( nToken ) {
case CFG_TOKEN_PACKAGE:
sSearch = "package-id=";
break;
case CFG_TOKEN_COMPONENT:
sSearch = "component-id=";
break;
case CFG_TOKEN_TEMPLATE:
sSearch = "template-id=";
break;
case CFG_TOKEN_CONFIGNAME:
sSearch = "cfg:name=";
break;
case CFG_TOKEN_OORNAME:
sSearch = "oor:name=";
bLocalize = sal_True;
break;
case CFG_TOKEN_OORVALUE:
sSearch = "oor:value=";
break;
case CFG_TEXT_START: {
if ( sCurrentResTyp != sTokenName ) {
WorkOnRessourceEnd();
ByteString sCur;
for( unsigned int n = 0; n < aLanguages.size(); n++ ){
sCur = aLanguages[ n ];
pStackData->sText[ sCur ] = ByteString("");
}
}
sCurrentResTyp = sTokenName;
ByteString sTemp = sToken.Copy( sToken.Search( "xml:lang=" ));
sCurrentIsoLang = sTemp.GetToken( 1, '\"' ).GetToken( 0, '\"' );
if ( sCurrentIsoLang == NO_TRANSLATE_ISO )
bLocalize = sal_False;
pStackData->sTextTag = sToken;
sCurrentText = "";
}
break;
}
if ( sSearch.Len()) {
ByteString sTemp = sToken.Copy( sToken.Search( sSearch ));
sTokenId = sTemp.GetToken( 1, '\"' ).GetToken( 0, '\"' );
}
pStackData = aStack.Push( sTokenName, sTokenId );
if ( sSearch == "cfg:name=" ) {
ByteString sTemp( sToken );
sTemp.ToUpperAscii();
bLocalize = (( sTemp.Search( "CFG:TYPE=\"STRING\"" ) != STRING_NOTFOUND ) &&
( sTemp.Search( "CFG:LOCALIZED=\"sal_True\"" ) != STRING_NOTFOUND ));
}
}
else if ( sTokenName == "label" ) {
if ( sCurrentResTyp != sTokenName ) {
WorkOnRessourceEnd();
ByteString sCur;
for( unsigned int n = 0; n < aLanguages.size(); n++ ){
sCur = aLanguages[ n ];
pStackData->sText[ sCur ] = ByteString("");
}
}
sCurrentResTyp = sTokenName;
}
}
break;
case CFG_CLOSETAG:
sTokenName = sToken.GetToken( 1, '/' ).GetToken( 0, '>' ).GetToken( 0, ' ' );
if ( aStack.GetStackData() && ( aStack.GetStackData()->GetTagType() == sTokenName )) {
if ( ! sCurrentText.Len())
WorkOnRessourceEnd();
aStack.Pop();
pStackData = aStack.GetStackData();
}
else {
ByteString sError( "Missplaced close tag: " );
ByteString sInFile(" in file ");
sError += sToken;
sError += sInFile;
sError += sFullEntry;
Error( sError );
exit ( 13 );
}
break;
case CFG_TEXTCHAR:
sCurrentText += sToken;
bOutput = sal_False;
break;
case CFG_TOKEN_NO_TRANSLATE:
bLocalize = sal_False;
break;
}
if ( sCurrentText.Len() && nToken != CFG_TEXTCHAR ) {
AddText( sCurrentText, sCurrentIsoLang, sCurrentResTyp );
Output( sCurrentText );
sCurrentText = "";
pStackData->sEndTextTag = sToken;
}
if ( bOutput )
Output( sToken );
if ( sToken != " " && sToken != "\t" )
sLastWhitespace = "";
return 1;
}
/*****************************************************************************/
void CfgExport::Output( const ByteString& rOutput )
/*****************************************************************************/
{
// Dummy operation to suppress warnings caused by poor class design
ByteString a( rOutput );
}
/*****************************************************************************/
int CfgParser::Execute( int nToken, char * pToken )
/*****************************************************************************/
{
ByteString sToken( pToken );
switch ( nToken ) {
case CFG_TAG:
if ( sToken.Search( "package-id=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_PACKAGE, pToken );
else if ( sToken.Search( "component-id=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_COMPONENT, pToken );
else if ( sToken.Search( "template-id=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_TEMPLATE, pToken );
else if ( sToken.Search( "cfg:name=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_OORNAME, pToken );
else if ( sToken.Search( "oor:name=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_OORNAME, pToken );
else if ( sToken.Search( "oor:value=" ) != STRING_NOTFOUND )
return ExecuteAnalyzedToken( CFG_TOKEN_OORVALUE, pToken );
break;
}
return ExecuteAnalyzedToken( nToken, pToken );
}
/*****************************************************************************/
void CfgParser::Error( const ByteString &rError )
/*****************************************************************************/
{
// ByteString sError( rError );
// sError.Append("Error: In file ");
// sError.Append( sActFileName );
yyerror(( char * ) rError.GetBuffer());
}
//
// class CfgOutputParser
//
/*****************************************************************************/
CfgOutputParser::CfgOutputParser( const ByteString &rOutputFile )
/*****************************************************************************/
{
pOutputStream =
new SvFileStream(
String( rOutputFile, RTL_TEXTENCODING_ASCII_US ),
STREAM_STD_WRITE | STREAM_TRUNC
);
pOutputStream->SetStreamCharSet( RTL_TEXTENCODING_UTF8 );
if ( !pOutputStream->IsOpen()) {
ByteString sError( "ERROR: Unable to open output file: " );
sError += rOutputFile;
Error( sError );
delete pOutputStream;
pOutputStream = NULL;
exit( -13 );
}
}
/*****************************************************************************/
CfgOutputParser::~CfgOutputParser()
/*****************************************************************************/
{
if ( pOutputStream ) {
pOutputStream->Close();
delete pOutputStream;
}
}
//
// class CfgExport
//
/*****************************************************************************/
CfgExport::CfgExport(
const ByteString &rOutputFile,
const ByteString &rProject,
const ByteString &rFilePath
)
/*****************************************************************************/
: CfgOutputParser( rOutputFile ),
sPrj( rProject ),
sPath( rFilePath )
{
Export::InitLanguages( false );
aLanguages = Export::GetLanguages();
}
/*****************************************************************************/
CfgExport::~CfgExport()
/*****************************************************************************/
{
}
/*****************************************************************************/
void CfgExport::WorkOnRessourceEnd()
/*****************************************************************************/
{
if ( pOutputStream && bLocalize ) {
if (( pStackData->sText[ ByteString("en-US") ].Len()
) ||
( bForce &&
( pStackData->sText[ ByteString("de") ].Len() ||
pStackData->sText[ ByteString("en-US") ].Len() )))
{
ByteString sFallback = pStackData->sText[ ByteString("en-US") ];
//if ( pStackData->sText[ ByteString("en-US") ].Len())
// sFallback = pStackData->sText[ ByteString("en-US") ];
ByteString sLocalId = pStackData->sIdentifier;
ByteString sGroupId;
if ( aStack.Count() == 1 ) {
sGroupId = sLocalId;
sLocalId = "";
}
else {
sGroupId = aStack.GetAccessPath( aStack.Count() - 2 );
}
ByteString sTimeStamp( Export::GetTimeStamp());
ByteString sCur;
for( unsigned int n = 0; n < aLanguages.size(); n++ ){
sCur = aLanguages[ n ];
ByteString sText = pStackData->sText[ sCur ];
if ( !sText.Len())
sText = sFallback;
Export::UnquotHTML( sText );
ByteString sOutput( sPrj ); sOutput += "\t";
sOutput += sPath;
sOutput += "\t0\t";
sOutput += pStackData->sResTyp; sOutput += "\t";
sOutput += sGroupId; sOutput += "\t";
sOutput += sLocalId; sOutput += "\t\t\t0\t";
sOutput += sCur;
sOutput += "\t";
sOutput += sText; sOutput += "\t\t\t\t";
sOutput += sTimeStamp;
//if( !sCur.EqualsIgnoreCaseAscii("de") ||( sCur.EqualsIgnoreCaseAscii("de") && !Export::isMergingGermanAllowed( sPrj ) ) )
pOutputStream->WriteLine( sOutput );
}
}
}
}
/*****************************************************************************/
void CfgExport::WorkOnText(
ByteString &rText,
const ByteString &rIsoLang
)
/*****************************************************************************/
{
if( rIsoLang.Len() ) Export::UnquotHTML( rText );
}
//
// class CfgMerge
//
/*****************************************************************************/
CfgMerge::CfgMerge(
const ByteString &rMergeSource, const ByteString &rOutputFile,
ByteString &rFilename )
/*****************************************************************************/
: CfgOutputParser( rOutputFile ),
pMergeDataFile( NULL ),
pResData( NULL ),
bGerman( sal_False ),
sFilename( rFilename ),
bEnglish( sal_False )
{
if ( rMergeSource.Len()){
pMergeDataFile = new MergeDataFile(
rMergeSource, sInputFileName , bErrorLog, RTL_TEXTENCODING_MS_1252, true );
if( Export::sLanguages.EqualsIgnoreCaseAscii("ALL") ){
Export::SetLanguages( pMergeDataFile->GetLanguages() );
aLanguages = pMergeDataFile->GetLanguages();
}
else aLanguages = Export::GetLanguages();
}else
aLanguages = Export::GetLanguages();
}
/*****************************************************************************/
CfgMerge::~CfgMerge()
/*****************************************************************************/
{
delete pMergeDataFile;
delete pResData;
}
/*****************************************************************************/
void CfgMerge::WorkOnText(
ByteString &rText,
const ByteString& nLangIndex
)
/*****************************************************************************/
{
if ( pMergeDataFile && bLocalize ) {
if ( !pResData ) {
ByteString sLocalId = pStackData->sIdentifier;
ByteString sGroupId;
if ( aStack.Count() == 1 ) {
sGroupId = sLocalId;
sLocalId = "";
}
else {
sGroupId = aStack.GetAccessPath( aStack.Count() - 2 );
}
ByteString sPlatform( "" );
pResData = new ResData( sPlatform, sGroupId , sFilename );
pResData->sId = sLocalId;
pResData->sResTyp = pStackData->sResTyp;
}
//if ( nLangIndex.EqualsIgnoreCaseAscii("de") )
// bGerman = sal_True;
if (( nLangIndex.EqualsIgnoreCaseAscii("en-US") ))
bEnglish = sal_True;
PFormEntrys *pEntrys = pMergeDataFile->GetPFormEntrysCaseSensitive( pResData );
if ( pEntrys ) {
ByteString sContent;
pEntrys->GetText( sContent, STRING_TYP_TEXT, nLangIndex );
if ( Export::isAllowed( nLangIndex ) &&
( sContent != "-" ) && ( sContent.Len()))
{
#ifdef MERGE_SOURCE_LANGUAGES
if( nLangIndex.EqualsIgnoreCaseAscii("de") || nLangIndex.EqualsIgnoreCaseAscii("en-US") )
rText = sContent;
#endif
Export::QuotHTML( rText );
}
}
}
}
/*****************************************************************************/
void CfgMerge::Output( const ByteString& rOutput )
/*****************************************************************************/
{
if ( pOutputStream )
pOutputStream->Write( rOutput.GetBuffer(), rOutput.Len());
}
sal_uLong CfgStack::Push( CfgStackData *pStackData )
{
Insert( pStackData, LIST_APPEND );
return Count() - 1;
}
/*****************************************************************************/
void CfgMerge::WorkOnRessourceEnd()
/*****************************************************************************/
{
if ( pMergeDataFile && pResData && bLocalize && (( bEnglish ) || bForce )) {
PFormEntrys *pEntrys = pMergeDataFile->GetPFormEntrysCaseSensitive( pResData );
if ( pEntrys ) {
ByteString sCur;
for( unsigned int n = 0; n < aLanguages.size(); n++ ){
sCur = aLanguages[ n ];
ByteString sContent;
pEntrys->GetText( sContent, STRING_TYP_TEXT, sCur , sal_True );
if (
// (!sCur.EqualsIgnoreCaseAscii("de") ) &&
( !sCur.EqualsIgnoreCaseAscii("en-US") ) &&
( sContent != "-" ) && ( sContent.Len()))
{
ByteString sText = sContent;
Export::QuotHTML( sText );
ByteString sAdditionalLine( "\t" );
ByteString sTextTag = pStackData->sTextTag;
ByteString sTemp = sTextTag.Copy( sTextTag.Search( "xml:lang=" ));
ByteString sSearch = sTemp.GetToken( 0, '\"' );
sSearch += "\"";
sSearch += sTemp.GetToken( 1, '\"' );
sSearch += "\"";
ByteString sReplace = sTemp.GetToken( 0, '\"' );
sReplace += "\"";
sReplace += sCur;
sReplace += "\"";
sTextTag.SearchAndReplace( sSearch, sReplace );
sAdditionalLine += sTextTag;
sAdditionalLine += sText;
sAdditionalLine += pStackData->sEndTextTag;
sAdditionalLine += "\n";
sAdditionalLine += sLastWhitespace;
Output( sAdditionalLine );
}
}
}
}
delete pResData;
pResData = NULL;
bGerman = sal_False;
bEnglish = sal_False;
}
| 9,621 |
1,224 | # Gravitational Attraction (3D)
# <NAME> <http://www.shiffman.net>
# A class for(object): an orbiting Planet
class Planet(object):
# Basic physics model (location, velocity, acceleration, mass)
def __init__(self, m, x, y, z):
self.mass = m
self.location = PVector(x, y, z)
self.velocity = PVector(1, 0) # Arbitrary starting velocity
self.acceleration = PVector(0, 0)
# Newton's 2nd Law (F = M*A) applied
def applyForce(self, force):
f = PVector.div(force, self.mass)
self.acceleration.add(f)
# Our motion algorithm (aka Euler Integration)
def update(self):
# Velocity changes according to acceleration.
self.velocity.add(self.acceleration)
self.location.add(self.velocity) # Location changes according to velocity.
self.acceleration.mult(0)
# Draw the Planet.
def display(self):
noStroke()
fill(255)
with pushMatrix():
translate(self.location.x, self.location.y, self.location.z)
sphere(self.mass * 8)
| 441 |
362 | <gh_stars>100-1000
package net.ripe.db.whois.update.autokey;
import net.ripe.db.whois.common.domain.CIString;
import net.ripe.db.whois.common.rpsl.AttributeType;
import net.ripe.db.whois.common.rpsl.RpslObject;
import net.ripe.db.whois.common.rpsl.ValidationMessages;
import net.ripe.db.whois.update.autokey.dao.NicHandleRepository;
import net.ripe.db.whois.update.dao.CountryCodeRepository;
import net.ripe.db.whois.update.domain.NicHandle;
import net.ripe.db.whois.update.domain.UpdateMessages;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Collections;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
public class NicHandleFactoryTest {
private static final String SOURCE = "RIPE";
@Mock NicHandleRepository nicHandleRepository;
@Mock CountryCodeRepository countryCodeRepository;
@InjectMocks NicHandleFactory subject;
@BeforeEach
public void setUp() throws Exception {
subject.setSource(SOURCE);
}
@Test
public void isKeyPlaceHolder_AUTO() {
assertThat(subject.isKeyPlaceHolder("AUTO"), is(false));
}
@Test
public void isKeyPlaceHolder_AUTO_() {
assertThat(subject.isKeyPlaceHolder("AUTO-"), is(false));
}
@Test
public void isKeyPlaceHolder_AUTO_1() {
assertThat(subject.isKeyPlaceHolder("AUTO-1"), is(true));
}
@Test
public void isKeyPlaceHolder_AUTO_100() {
assertThat(subject.isKeyPlaceHolder("AUTO-100"), is(true));
}
@Test
public void isKeyPlaceHolder_AUTO_100NL() {
assertThat(subject.isKeyPlaceHolder("AUTO-100NL"), is(true));
}
@Test
public void isKeyPlaceHolder_AUTO_100_NL() {
assertThat(subject.isKeyPlaceHolder("AUTO-100-NL"), is(false));
}
@Test
public void generate_invalid_placeHolder() {
Assertions.assertThrows(IllegalArgumentException.class, () -> {
subject.generate("AUTO", RpslObject.parse("person: name"));
});
}
@Test
public void generate_specified_space() {
when(nicHandleRepository.claimNextAvailableIndex("DW", SOURCE)).thenReturn(new NicHandle("DW", 10, SOURCE));
final NicHandle nicHandle = subject.generate("AUTO-1234567DW", RpslObject.parse("person: name\nnic-hdl: AUTO-1234567DW"));
assertThat(nicHandle.toString(), is("DW10-RIPE"));
}
@Test
public void generate_unspecified_space() {
when(nicHandleRepository.claimNextAvailableIndex("JAS", SOURCE)).thenReturn(new NicHandle("JAS", 10, SOURCE));
final NicHandle nicHandle = subject.generate("AUTO-111", RpslObject.parse("person: <NAME> Smith\nnic-hdl: AUTO-111"));
assertThat(nicHandle.toString(), is("JAS10-RIPE"));
}
@Test
public void generate_unspecified_lower() {
when(nicHandleRepository.claimNextAvailableIndex("SN", SOURCE)).thenReturn(new NicHandle("SN", 10, SOURCE));
final NicHandle nicHandle = subject.generate("AUTO-111", RpslObject.parse("person: some name\nnic-hdl: AUTO-111"));
assertThat(nicHandle.toString(), is("SN10-RIPE"));
}
@Test
public void generate_unspecified_long() {
when(nicHandleRepository.claimNextAvailableIndex("SATG", SOURCE)).thenReturn(new NicHandle("SATG", 10, SOURCE));
final NicHandle nicHandle = subject.generate("AUTO-1234567", RpslObject.parse("person: Satellite advisory Technologies Group Ltd\nnic-hdl: AUTO-1234567"));
assertThat(nicHandle.toString(), is("SATG10-RIPE"));
}
@Test
public void getAttributeType() {
assertThat(subject.getAttributeType(), is(AttributeType.NIC_HDL));
}
@Test
public void claim() throws Exception {
when(countryCodeRepository.getCountryCodes()).thenReturn(Collections.<CIString>emptySet());
final NicHandle nicHandle = new NicHandle("DW", 10, "RIPE");
when(nicHandleRepository.claimSpecified(nicHandle)).thenReturn(true);
assertThat(subject.claim("DW10-RIPE"), is(nicHandle));
}
@Test
public void claim_not_available() {
when(countryCodeRepository.getCountryCodes()).thenReturn(Collections.<CIString>emptySet());
when(nicHandleRepository.claimSpecified(new NicHandle("DW", 10, "RIPE"))).thenReturn(false);
try {
subject.claim("DW10-RIPE");
fail("Claim succeeded?");
} catch (ClaimException e) {
assertThat(e.getErrorMessage(), is(UpdateMessages.nicHandleNotAvailable("DW10-RIPE")));
}
}
@Test
public void claim_invalid_handle() {
when(countryCodeRepository.getCountryCodes()).thenReturn(Collections.<CIString>emptySet());
try {
subject.claim("INVALID_HANDLE");
fail("Claim succeeded?");
} catch (ClaimException e) {
assertThat(e.getErrorMessage(), is(ValidationMessages.syntaxError("INVALID_HANDLE")));
}
}
}
| 2,089 |
1,351 | <gh_stars>1000+
/** @file
SSL SNI test plugin.
Somewhat nonsensically exercise some scenarios of proxying
and blind tunneling from the SNI callback plugin
@section license License
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdio>
#include <memory.h>
#include <cinttypes>
#include <ts/ts.h>
#include <openssl/ssl.h>
#define PLUGIN_NAME "ssl_sni"
#define PCP "[" PLUGIN_NAME "] "
namespace
{
/**
Somewhat nonsensically exercise some scenarios of proxying
and blind tunneling from the SNI callback plugin
Case 1: If the servername ends in facebook.com, blind tunnel
Case 2: If the servername is www.yahoo.com and there is a context
entry for "safelyfiled.com", use the "safelyfiled.com" context for
this connection.
*/
int
CB_servername(TSCont /* contp */, TSEvent /* event */, void *edata)
{
TSVConn ssl_vc = reinterpret_cast<TSVConn>(edata);
TSSslConnection sslobj = TSVConnSslConnectionGet(ssl_vc);
SSL *ssl = reinterpret_cast<SSL *>(sslobj);
const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (servername != nullptr) {
int servername_len = strlen(servername);
int facebook_name_len = strlen("facebook.com");
if (servername_len >= facebook_name_len) {
const char *server_ptr = servername + (servername_len - facebook_name_len);
if (strcmp(server_ptr, "facebook.com") == 0) {
TSDebug(PLUGIN_NAME, "Blind tunnel from SNI callback");
TSVConnTunnel(ssl_vc);
// Don't reenable to ensure that we break out of the
// SSL handshake processing
return TS_SUCCESS; // Don't re-enable so we interrupt processing
}
}
// If the name is yahoo, look for a context for safelyfiled and use that here
if (strcmp("www.yahoo.com", servername) == 0) {
TSDebug(PLUGIN_NAME, "SNI name is yahoo ssl obj is %p", sslobj);
if (sslobj) {
TSSslContext ctxobj = TSSslContextFindByName("safelyfiled.com");
if (ctxobj != nullptr) {
TSDebug(PLUGIN_NAME, "Found cert for safelyfiled");
SSL_CTX *ctx = reinterpret_cast<SSL_CTX *>(ctxobj);
SSL_set_SSL_CTX(ssl, ctx);
TSDebug(PLUGIN_NAME, "SNI plugin cb: replace SSL CTX");
}
}
}
}
// All done, reactivate things
TSVConnReenable(ssl_vc);
return TS_SUCCESS;
}
} // namespace
// Called by ATS as our initialization point
void
TSPluginInit(int argc, const char *argv[])
{
bool success = false;
TSPluginRegistrationInfo info;
TSCont cb_cert = nullptr; // Certificate callback continuation
info.plugin_name = PLUGIN_NAME;
info.vendor_name = "Apache Software Foundation";
info.support_email = "<EMAIL>";
if (TS_SUCCESS != TSPluginRegister(&info)) {
TSError(PCP "registration failed");
} else if (TSTrafficServerVersionGetMajor() < 2) {
TSError(PCP "requires Traffic Server 2.0 or later");
} else if (nullptr == (cb_cert = TSContCreate(&CB_servername, TSMutexCreate()))) {
TSError(PCP "Failed to create cert callback");
} else {
TSHttpHookAdd(TS_SSL_CERT_HOOK, cb_cert);
success = true;
}
if (!success) {
TSError(PCP "not initialized");
}
TSDebug(PLUGIN_NAME, "Plugin %s", success ? "online" : "offline");
return;
}
| 1,467 |
429 | <reponame>SteveJobs91/ToaruOS
#pragma once
#include <_cheader.h>
_Begin_C_Header
extern int fswait(int count, int * fds);
extern int fswait2(int count, int * fds, int timeout);
extern int fswait3(int count, int * fds, int timeout, int * out);
_End_C_Header
| 106 |
2,151 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/download/download_url_loader_factory_getter_impl.h"
namespace content {
DownloadURLLoaderFactoryGetterImpl::DownloadURLLoaderFactoryGetterImpl(
std::unique_ptr<network::SharedURLLoaderFactoryInfo> url_loader_factory)
: url_loader_factory_info_(std::move(url_loader_factory)) {}
DownloadURLLoaderFactoryGetterImpl::~DownloadURLLoaderFactoryGetterImpl() =
default;
scoped_refptr<network::SharedURLLoaderFactory>
DownloadURLLoaderFactoryGetterImpl::GetURLLoaderFactory() {
if (!url_loader_factory_) {
url_loader_factory_ = network::SharedURLLoaderFactory::Create(
std::move(url_loader_factory_info_));
}
return url_loader_factory_;
}
} // namespace content
| 299 |
302 | /**
* Copyright 2013-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.services.dynamodbv2.transactions;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodbv2.model.GetItemRequest;
import com.amazonaws.services.dynamodbv2.model.GetItemResult;
import com.amazonaws.services.dynamodbv2.model.UpdateItemRequest;
import com.amazonaws.services.dynamodbv2.model.UpdateItemResult;
/**
* A very primitive fault-injection client.
*
* @author dyanacek
*/
public class FailingAmazonDynamoDBClient extends AmazonDynamoDBClient {
public static class FailedYourRequestException extends RuntimeException {
private static final long serialVersionUID = -7191808024168281212L;
}
// Any requests added to this set will throw a FailedYourRequestException when called.
public final Set<AmazonWebServiceRequest> requestsToFail = new HashSet<AmazonWebServiceRequest>();
// Any requests added to this set will return a null item when called
public final Set<GetItemRequest> getRequestsToTreatAsDeleted = new HashSet<GetItemRequest>();
// Any requests with keys in this set will return the queue of responses in order. When the end of the queue is reached
// further requests will be passed to the DynamoDB client.
public final Map<GetItemRequest, Queue<GetItemResult>> getRequestsToStub = new HashMap<GetItemRequest, Queue<GetItemResult>>();
/**
* Resets the client to the stock DynamoDB client (all requests will call DynamoDB)
*/
public void reset() {
requestsToFail.clear();
getRequestsToTreatAsDeleted.clear();
getRequestsToStub.clear();
}
public FailingAmazonDynamoDBClient(AWSCredentials credentials) {
super(credentials);
}
@Override
public GetItemResult getItem(GetItemRequest getItemRequest) throws AmazonServiceException, AmazonClientException {
if(requestsToFail.contains(getItemRequest)) {
throw new FailedYourRequestException();
}
if (getRequestsToTreatAsDeleted.contains(getItemRequest)) {
return new GetItemResult();
}
Queue<GetItemResult> stubbedResults = getRequestsToStub.get(getItemRequest);
if (stubbedResults != null && !stubbedResults.isEmpty()) {
return stubbedResults.remove();
}
return super.getItem(getItemRequest);
}
@Override
public UpdateItemResult updateItem(UpdateItemRequest updateItemRequest) throws AmazonServiceException,
AmazonClientException {
if(requestsToFail.contains(updateItemRequest)) {
throw new FailedYourRequestException();
}
return super.updateItem(updateItemRequest);
}
}
| 1,080 |
1,405 | package com.lenovo.lps.sus.control;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.widget.Button;
import android.widget.TextView;
import com.lenovo.lps.sus.c.a;
import com.lenovo.lps.sus.c.b;
/* loaded from: classes.dex */
public class SUSCustdefNotificationActivity extends Activity {
private static final int h = 3;
private boolean f = true;
private int g = 0;
private final int j = 1500;
private Handler k = new Handler();
private Button l = null;
private Button m = null;
private Runnable n = new q(this);
private static String a = "SUS_NOTIFICATION_TITLE";
private static String b = "layout";
private static String c = "sus_customdef_notification_dialog";
private static String d = "id";
private static Context e = null;
private static Integer i = 3;
public static void a(Context context) {
if (e != null) {
i = 3;
((Activity) e).finish();
e = null;
}
}
public void a() {
if (this.k != null) {
this.k.removeCallbacks(this.n);
this.k = null;
}
if (e != null) {
e = null;
}
}
@Override // android.app.Activity
public void finish() {
b.a(a.b, "SUSNotificationActivity finish isAbortUpdateFlag=" + this.f);
a();
a.d(false);
super.finish();
}
@Override // android.app.Activity
protected void onCreate(Bundle bundle) {
TextView textView;
TextView textView2;
TextView textView3;
TextView textView4;
super.onCreate(bundle);
b.a(a.b, "SUSNotificationActivity .onCreate() begin");
a.d(true);
setContentView(a.a(this, "layout", c));
Intent intent = getIntent();
e = this;
String b2 = a.b(e, "SUS_NOTIFICATION_TITLE");
if (b2 != null && b2.length() > 0) {
setTitle(b2);
}
boolean booleanExtra = intent.getBooleanExtra("FailFlag", false);
this.l = (Button) findViewById(a.a(e, d, "SUS_NOTIFICATION_CONTINUEBUTTON"));
String b3 = a.b(e, "SUS_NOTIFICATION_HIDE_BUTTONTEXT");
if (!(this.l == null || b3 == null || b3.length() <= 0)) {
this.l.setOnClickListener(new n(this));
this.l.setText(b3);
}
this.m = (Button) findViewById(a.a(e, d, "SUS_NOTIFICATION_ABORTUPDATEBUTTON"));
String b4 = a.b(e, "SUS_NOTIFICATION_ABORTUPDATE_BUTTONTEXT");
if (this.m != null) {
this.m.setOnClickListener(new x(this));
this.m.setText(b4);
}
String b5 = booleanExtra ? a.b(e, "SUS_NOTIFICATION_TIMEOUTPROMPT") : a.b(e, "SUS_NOTIFICATION_UPDATINGPROMPT");
if (!(b5 == null || b5.length() <= 0 || (textView4 = (TextView) findViewById(a.a(e, d, "SUS_NOTIFICATION_PROMPTINFO"))) == null)) {
textView4.setText(b5);
}
String b6 = a.b(e, "SUS_NOTIFICATION_AUTOHIDPROMPTINFO_1");
String b7 = a.b(e, "SUS_NOTIFICATION_AUTOHIDPROMPTINFO_2");
String num = i.toString();
if (!(b6 == null || b6.length() <= 0 || (textView3 = (TextView) findViewById(a.a(e, d, "SUS_NOTIFICATION_AUTOHIDPROMPTINFOTEXTVIEW_1"))) == null)) {
textView3.setText(b6);
}
if (!(b7 == null || b7.length() <= 0 || (textView2 = (TextView) findViewById(a.a(e, d, "SUS_NOTIFICATION_AUTOHIDPROMPTINFOTEXTVIEW_2"))) == null)) {
textView2.setText(b7);
}
if (!(num == null || num.length() <= 0 || (textView = (TextView) findViewById(a.a(e, d, "SUS_NOTIFICATION_AUTOHIDPROMPTINFOTEXTVIEW_TIME"))) == null)) {
textView.setText(num);
}
if (this.k != null && this.n != null) {
this.k.removeCallbacks(this.n);
this.k.postDelayed(this.n, 1500);
}
}
@Override // android.app.Activity
protected void onDestroy() {
b.a(a.b, "SUSNotificationActivity onDestroy myContext=" + e);
a();
a.d(false);
super.onDestroy();
}
@Override // android.app.Activity
protected void onRestart() {
super.onRestart();
}
}
| 2,003 |
1,571 | # Copyright 2020 Magic Leap, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: <NAME> (<EMAIL>)
import itertools
import os
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from atlas.config import CfgNode
from atlas.data import ScenesDataset, collate_fn, parse_splits_list
from atlas.heads2d import PixelHeads
from atlas.heads3d import VoxelHeads
from atlas.backbone2d import build_backbone2d
from atlas.backbone3d import build_backbone3d
import atlas.transforms as transforms
from atlas.tsdf import coordinates, TSDF
def backproject(voxel_dim, voxel_size, origin, projection, features):
""" Takes 2d features and fills them along rays in a 3d volume
This function implements eqs. 1,2 in https://arxiv.org/pdf/2003.10432.pdf
Each pixel in a feature image corresponds to a ray in 3d.
We fill all the voxels along the ray with that pixel's features.
Args:
voxel_dim: size of voxel volume to construct (nx,ny,nz)
voxel_size: metric size of each voxel (ex: .04m)
origin: origin of the voxel volume (xyz position of voxel (0,0,0))
projection: bx4x3 projection matrices (intrinsics@extrinsics)
features: bxcxhxw 2d feature tensor to be backprojected into 3d
Returns:
volume: b x c x nx x ny x nz 3d feature volume
valid: b x 1 x nx x ny x nz volume.
Each voxel contains a 1 if it projects to a pixel
and 0 otherwise (not in view frustrum of the camera)
"""
batch = features.size(0)
channels = features.size(1)
device = features.device
nx, ny, nz = voxel_dim
coords = coordinates(voxel_dim, device).unsqueeze(0).expand(batch,-1,-1) # bx3xhwd
world = coords.type_as(projection) * voxel_size + origin.to(device).unsqueeze(2)
world = torch.cat((world, torch.ones_like(world[:,:1]) ), dim=1)
camera = torch.bmm(projection, world)
px = (camera[:,0,:]/camera[:,2,:]).round().type(torch.long)
py = (camera[:,1,:]/camera[:,2,:]).round().type(torch.long)
pz = camera[:,2,:]
# voxels in view frustrum
height, width = features.size()[2:]
valid = (px >= 0) & (py >= 0) & (px < width) & (py < height) & (pz>0) # bxhwd
# put features in volume
volume = torch.zeros(batch, channels, nx*ny*nz, dtype=features.dtype,
device=device)
for b in range(batch):
volume[b,:,valid[b]] = features[b,:,py[b,valid[b]], px[b,valid[b]]]
volume = volume.view(batch, channels, nx, ny, nz)
valid = valid.view(batch, 1, nx, ny, nz)
return volume, valid
class VoxelNet(pl.LightningModule):
""" Network architecture implementing ATLAS (https://arxiv.org/pdf/2003.10432.pdf)"""
def __init__(self, hparams):
super().__init__()
# see config.py for details
self.hparams = hparams
# pytorch lightning does not support saving YACS CfgNone
self.cfg = CfgNode(self.hparams)
cfg = self.cfg
# networks
self.backbone2d, self.backbone2d_stride = build_backbone2d(cfg)
self.backbone3d = build_backbone3d(cfg)
self.heads2d = PixelHeads(cfg, self.backbone2d_stride)
self.heads3d = VoxelHeads(cfg)
# other hparams
self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)
self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)
self.voxel_size = cfg.VOXEL_SIZE
self.voxel_dim_train = cfg.VOXEL_DIM_TRAIN
self.voxel_dim_val = cfg.VOXEL_DIM_VAL
self.voxel_dim_test = cfg.VOXEL_DIM_TEST
self.origin = torch.tensor([0,0,0]).view(1,3)
self.batch_size_train = cfg.DATA.BATCH_SIZE_TRAIN
self.num_frames_train = cfg.DATA.NUM_FRAMES_TRAIN
self.num_frames_val = cfg.DATA.NUM_FRAMES_VAL
self.frame_types = cfg.MODEL.HEADS2D.HEADS
self.frame_selection = cfg.DATA.FRAME_SELECTION
self.batch_backbone2d_time = cfg.TRAINER.BATCH_BACKBONE2D_TIME
self.finetune3d = cfg.TRAINER.FINETUNE3D
self.voxel_types = cfg.MODEL.HEADS3D.HEADS
self.voxel_sizes = [int(cfg.VOXEL_SIZE*100)*2**i for i in
range(len(cfg.MODEL.BACKBONE3D.LAYERS_DOWN)-1)]
self.initialize_volume()
def initialize_volume(self):
""" Reset the accumulators.
self.volume is a voxel volume containg the accumulated features
self.valid is a voxel volume containg the number of times a voxel has
been seen by a camera view frustrum
"""
self.volume = 0
self.valid = 0
def normalizer(self, x):
""" Normalizes the RGB images to the input range"""
return (x - self.pixel_mean.type_as(x)) / self.pixel_std.type_as(x)
def inference1(self, projection, image=None, feature=None):
""" Backprojects image features into 3D and accumulates them.
This is the first half of the network which is run on every frame.
Only pass one of image or feature. If image is passed 2D features
are extracted from the image using self.backbone2d. When features
are extracted external to this function pass features (used when
passing multiple frames through the backbone2d simultaniously
to share BatchNorm stats).
Args:
projection: bx3x4 projection matrix
image: bx3xhxw RGB image
feature: bxcxh'xw' feature map (h'=h/stride, w'=w/stride)
Feature volume is accumulated into self.volume and self.valid
"""
assert ((image is not None and feature is None) or
(image is None and feature is not None))
if feature is None:
image = self.normalizer(image)
feature = self.backbone2d(image)
# backbone2d reduces the size of the images so we
# change intrinsics to reflect this
projection = projection.clone()
projection[:,:2,:] = projection[:,:2,:] / self.backbone2d_stride
if self.training:
voxel_dim = self.voxel_dim_train
else:
voxel_dim = self.voxel_dim_val
volume, valid = backproject(voxel_dim, self.voxel_size, self.origin,
projection, feature)
if self.finetune3d:
volume.detach_()
valid.detach_()
self.volume = self.volume + volume
self.valid = self.valid + valid
def inference2(self, targets=None):
""" Refines accumulated features and regresses output TSDF.
This is the second half of the network. It should be run once after
all frames have been accumulated. It may also be run more fequently
to visualize incremental progress.
Args:
targets: used to compare network output to ground truth
Returns:
tuple of dicts ({outputs}, {losses})
if targets is None, losses is empty
"""
volume = self.volume/self.valid
# remove nans (where self.valid==0)
volume = volume.transpose(0,1)
volume[:,self.valid.squeeze(1)==0]=0
volume = volume.transpose(0,1)
x = self.backbone3d(volume)
return self.heads3d(x, targets)
def forward(self, batch):
""" Wraps inference1() and inference2() into a single call.
Args:
batch: a dict from the dataloader
Returns:
see self.inference2
"""
self.initialize_volume()
image = batch['image']
projection = batch['projection']
# get targets if they are in the batch
targets3d = {key:value for key, value in batch.items() if key[:3]=='vol'}
targets3d = targets3d if targets3d else None
# TODO other 2d targets
targets2d = {'semseg':batch['semseg']} if 'semseg' in batch else None
#TODO: run heads2d in inference1
outputs2d, losses2d = {}, {}
# transpose batch and time so we can accumulate sequentially
images = image.transpose(0,1)
projections = projection.transpose(0,1)
if (not self.batch_backbone2d_time) or (not self.training) or self.finetune3d:
# run images through 2d cnn sequentially and backproject and accumulate
for image, projection in zip(images, projections):
self.inference1(projection, image=image)
else:
# run all images through 2d cnn together to share batchnorm stats
image = images.reshape(images.shape[0]*images.shape[1], *images.shape[2:])
image = self.normalizer(image)
features = self.backbone2d(image)
# run 2d heads
if targets2d is not None:
targets2d = {
key: value.transpose(0,1).view(
images.shape[0]*images.shape[1], *value.shape[2:])
for key, value in targets2d.items()}
outputs2d, losses2d = self.heads2d(features, targets2d)
# reshape back
features = features.view(images.shape[0],
images.shape[1],
*features.shape[1:])
outputs2d = {
key:value.transpose(0,1).reshape(
images.shape[0], images.shape[1], *value.shape[1:])
for key, value in outputs2d.items()}
for projection, feature in zip(projections, features):
self.inference1(projection, feature=feature)
# run 3d cnn
outputs3d, losses3d = self.inference2(targets3d)
return {**outputs2d, **outputs3d}, {**losses2d, **losses3d}
def postprocess(self, batch):
""" Wraps the network output into a TSDF data structure
Args:
batch: dict containg network outputs
Returns:
list of TSDFs (one TSDF per scene in the batch)
"""
key = 'vol_%02d'%self.voxel_sizes[0] # only get vol of final resolution
out = []
batch_size = len(batch[key+'_tsdf'])
for i in range(batch_size):
tsdf = TSDF(self.voxel_size,
self.origin,
batch[key+'_tsdf'][i].squeeze(0))
# add semseg vol
if ('semseg' in self.voxel_types) and (key+'_semseg' in batch):
semseg = batch[key+'_semseg'][i]
if semseg.ndim==4:
semseg = semseg.argmax(0)
tsdf.attribute_vols['semseg'] = semseg
# add color vol
if 'color' in self.voxel_types:
color = batch[key+'_color'][i]
tsdf.attribute_vols['color'] = color
out.append(tsdf)
return out
def get_transform(self, is_train):
""" Gets a transform to preprocess the input data"""
if is_train:
voxel_dim = self.voxel_dim_train
random_rotation = self.cfg.DATA.RANDOM_ROTATION_3D
random_translation = self.cfg.DATA.RANDOM_TRANSLATION_3D
paddingXY = self.cfg.DATA.PAD_XY_3D
paddingZ = self.cfg.DATA.PAD_Z_3D
else:
# center volume
voxel_dim = self.voxel_dim_val
random_rotation = False
random_translation = False
paddingXY = 0
paddingZ = 0
transform = []
transform += [transforms.ResizeImage((640,480)),
transforms.ToTensor(),
transforms.InstanceToSemseg('nyu40'),
transforms.RandomTransformSpace(
voxel_dim, random_rotation, random_translation,
paddingXY, paddingZ),
transforms.FlattenTSDF(),
transforms.IntrinsicsPoseToProjection(),
]
return transforms.Compose(transform)
def train_dataloader(self):
transform = self.get_transform(True)
info_files = parse_splits_list(self.cfg.DATASETS_TRAIN)
dataset = ScenesDataset(
info_files, self.num_frames_train, transform,
self.frame_types, self.frame_selection, self.voxel_types,
self.voxel_sizes)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=self.batch_size_train, num_workers=2,
collate_fn=collate_fn, shuffle=True, drop_last=True)
return dataloader
def val_dataloader(self):
transform = self.get_transform(False)
info_files = parse_splits_list(self.cfg.DATASETS_VAL)
dataset = ScenesDataset(
info_files, self.num_frames_val, transform,
self.frame_types, self.frame_selection, self.voxel_types,
self.voxel_sizes)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, num_workers=1, collate_fn=collate_fn,
shuffle=False, drop_last=False)
return dataloader
def training_step(self, batch, batch_idx):
outputs, losses = self.forward(batch)
# visualize training outputs at the begining of each epoch
if batch_idx==0:
pred_tsdfs = self.postprocess(outputs)
trgt_tsdfs = self.postprocess(batch)
self.logger.experiment1.save_mesh(pred_tsdfs[0], 'train_pred.ply')
self.logger.experiment1.save_mesh(trgt_tsdfs[0], 'train_trgt.ply')
# # visualize outputs from heads2d
# if 'semseg' in self.frame_types:
# visualizer = transforms.VizSemseg()
# viz = [batch['image'].detach().cpu()[0,0].byte()]
# if 'semseg' in outputs:
# viz.append( visualizer(outputs['semseg'].detach().argmax(2).cpu()[0,0]) )
# if 'semseg' in batch:
# viz.append( visualizer(batch['semseg'].detach().cpu()[0,0]) )
# viz = torch.cat(viz,-1)
# self.logger.experiment.add_image('semseg2d', viz)
loss = sum(losses.values())
return {'loss': loss, 'log': losses}
def validation_step(self, batch, batch_idx):
outputs, losses = self.forward(batch)
# save validation meshes
pred_tsdfs = self.postprocess(outputs)
trgt_tsdfs = self.postprocess(batch)
self.logger.experiment1.save_mesh(pred_tsdfs[0],
batch['scene'][0]+'_pred.ply')
self.logger.experiment1.save_mesh(trgt_tsdfs[0],
batch['scene'][0]+'_trgt.ply')
return losses
def validation_epoch_end(self, outputs):
avg_losses = {'val_'+key:torch.stack([x[key] for x in outputs]).mean()
for key in outputs[0].keys()}
avg_loss = sum(avg_losses.values())
return {'val_loss': avg_loss, 'log': avg_losses}
def configure_optimizers(self):
optimizers = []
schedulers = []
# allow for different learning rates between pretrained layers
# (resnet backbone) and new layers (everything else).
params_backbone2d = self.backbone2d[0].parameters()
modules_rest = [self.backbone2d[1], self.backbone3d,
self.heads2d, self.heads3d]
params_rest = itertools.chain(*(params.parameters()
for params in modules_rest))
# optimzer
if self.cfg.OPTIMIZER.TYPE == 'Adam':
lr = self.cfg.OPTIMIZER.ADAM.LR
lr_backbone2d = lr * self.cfg.OPTIMIZER.BACKBONE2D_LR_FACTOR
optimizer = torch.optim.Adam([
{'params': params_backbone2d, 'lr': lr_backbone2d},
{'params': params_rest, 'lr': lr}])
optimizers.append(optimizer)
else:
raise NotImplementedError(
'optimizer %s not supported'%self.cfg.OPTIMIZER.TYPE)
# scheduler
if self.cfg.SCHEDULER.TYPE == 'StepLR':
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, self.cfg.SCHEDULER.STEP_LR.STEP_SIZE,
gamma=self.cfg.SCHEDULER.STEP_LR.GAMMA)
schedulers.append(scheduler)
elif self.cfg.SCHEDULER.TYPE != 'None':
raise NotImplementedError(
'optimizer %s not supported'%self.cfg.OPTIMIZER.TYPE)
return optimizers, schedulers
| 8,039 |
1,199 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from language.mentionmemory.modules import transformer
class TransformerTest(absltest.TestCase):
"""Transformer layer test."""
num_layers = 4
model_dim = 16
intermediate_dim = 64
num_heads = 4
dtype = jnp.float32
dropout_rate = 0.1
bsz = 4
seq_len = 20
def test_transformer_layer_shape(self):
"""Testing transformer layer shape."""
encoding = jnp.ones(
shape=(self.bsz, self.seq_len, self.model_dim), dtype=self.dtype)
attention_mask = jnp.ones(shape=(self.bsz, self.seq_len), dtype=self.dtype)
model = transformer.TransformerLayer(
model_dim=self.model_dim,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
)
rng = jax.random.PRNGKey(0)
output, _ = model.init_with_output(
rng,
encoding=encoding,
attention_mask=attention_mask,
deterministic=True,
)
self.assertSequenceEqual(output.shape, encoding.shape)
def test_transformer_block_shape(self):
"""Testing transformer block shape."""
encoding = jnp.ones(
shape=(self.bsz, self.seq_len, self.model_dim), dtype=self.dtype)
attention_mask = jnp.ones(shape=(self.bsz, self.seq_len), dtype=self.dtype)
model = transformer.TransformerBlock(
num_layers=self.num_layers,
model_dim=self.model_dim,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
)
rng = jax.random.PRNGKey(0)
output, _ = model.init_with_output(
rng,
encoding=encoding,
attention_mask=attention_mask,
deterministic=True,
)
self.assertSequenceEqual(output.shape, encoding.shape)
if __name__ == '__main__':
absltest.main()
| 985 |
5,169 | {
"name": "DTPickerPresenter",
"version": "0.1.0",
"license": "MIT",
"summary": "Modern wrapper for UIDatePicker and UIPickerView presentation.",
"homepage": "https://github.com/DenHeadless/DTPickerPresenter",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/DenHeadless/DTPickerPresenter.git",
"tag": "0.1.0"
},
"requires_arc": true,
"platforms": {
"ios": "6.0"
},
"ios": {
"frameworks": [
"UIKit",
"Foundation"
]
},
"source_files": "DTPickerPresenter",
"social_media_url": "https://twitter.com/DTCoder"
}
| 257 |
1,541 | <gh_stars>1000+
package com.bwssystems.HABridge;
import java.util.Map;
import com.bwssystems.HABridge.api.hue.WhitelistEntry;
public class BridgeSecurityDescriptor {
private Map<String, User> users;
private boolean useLinkButton;
private String execGarden;
private boolean secureHueApi;
private Map<String, WhitelistEntry> whitelist;
private boolean useHttps;
private String keyfilePassword;
private String keyfilePath;
public BridgeSecurityDescriptor() {
super();
this.setUseLinkButton(false);
}
public Map<String, User> getUsers() {
return users;
}
public void setUsers(Map<String, User> users) {
this.users = users;
}
public boolean isUseLinkButton() {
return useLinkButton;
}
public void setUseLinkButton(boolean useLinkButton) {
this.useLinkButton = useLinkButton;
}
public String getExecGarden() {
return execGarden;
}
public void setExecGarden(String execGarden) {
this.execGarden = execGarden;
}
public boolean isSecureHueApi() {
return secureHueApi;
}
public void setSecureHueApi(boolean secureHueApi) {
this.secureHueApi = secureHueApi;
}
public Map<String, WhitelistEntry> getWhitelist() {
return whitelist;
}
public void setWhitelist(Map<String, WhitelistEntry> whitelist) {
this.whitelist = whitelist;
}
public boolean isSecure() {
boolean secureFlag = false;
if(users != null && !users.isEmpty()) {
for (Map.Entry<String, User> entry : users.entrySet())
{
if(entry.getValue().getPassword() != null && !entry.getValue().getPassword().isEmpty()) {
secureFlag = true;
break;
}
}
}
return secureFlag;
}
public boolean isUseHttps() {
return useHttps;
}
public void setUseHttps(boolean useHttps) {
this.useHttps = useHttps;
}
public String getKeyfilePassword() {
return keyfilePassword;
}
public void setKeyfilePassword(String keyfilePassword) {
this.keyfilePassword = keyfilePassword;
}
public String getKeyfilePath() {
return keyfilePath;
}
public void setKeyfilePath(String keyfilePath) {
this.keyfilePath = keyfilePath;
}
}
| 868 |
5,300 | package hellowicket.plaintext;
import java.nio.charset.Charset;
import org.apache.wicket.request.http.WebResponse;
import org.apache.wicket.request.resource.IResource;
/**
* A resource that implements the requirements for
* <a href="http://www.techempower.com/benchmarks/#section=code">Test type 6: Plaintext</a>
*/
public class HelloTextResource implements IResource
{
private static final long serialVersionUID = 1L;
private static final String CONTENT_TYPE = "text/plain";
private static final byte[] DATA = "Hello, World!".getBytes(Charset.forName("UTF-8"));
@Override
public void respond(Attributes attributes)
{
final WebResponse webResponse = (WebResponse) attributes.getResponse();
webResponse.setContentType(CONTENT_TYPE);
webResponse.setContentLength(DATA.length);
webResponse.write(DATA);
}
}
| 257 |
2,542 | <gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace std;
using namespace Common;
using namespace ServiceModel;
namespace Api
{
ComGetSystemServiceListResult::ComGetSystemServiceListResult(
vector<ServiceQueryResult> && systemServiceList)
: systemServiceList_(),
heap_()
{
systemServiceList_ = heap_.AddItem<FABRIC_SERVICE_QUERY_RESULT_LIST>();
ComConversionUtility::ToPublicList<
ServiceQueryResult,
FABRIC_SERVICE_QUERY_RESULT_ITEM,
FABRIC_SERVICE_QUERY_RESULT_LIST>(
heap_,
move(systemServiceList),
*systemServiceList_);
}
const FABRIC_SERVICE_QUERY_RESULT_LIST *STDMETHODCALLTYPE ComGetSystemServiceListResult::get_SystemServiceList(void)
{
return systemServiceList_.GetRawPointer();
}
}
| 421 |
9,711 | /*
* Copyright 2014 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.ksoichiro.android.observablescrollview.samples;
import android.os.Bundle;
import android.support.v7.app.ActionBar;
import android.support.v7.widget.Toolbar;
import android.view.View;
import android.widget.TextView;
import com.github.ksoichiro.android.observablescrollview.ObservableScrollView;
import com.github.ksoichiro.android.observablescrollview.ObservableScrollViewCallbacks;
import com.github.ksoichiro.android.observablescrollview.ScrollState;
import com.github.ksoichiro.android.observablescrollview.ScrollUtils;
import com.nineoldandroids.view.ViewHelper;
public class FlexibleSpaceToolbarScrollViewActivity extends BaseActivity implements ObservableScrollViewCallbacks {
private View mFlexibleSpaceView;
private View mToolbarView;
private TextView mTitleView;
private int mFlexibleSpaceHeight;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_flexiblespacetoolbarscrollview);
setSupportActionBar((Toolbar) findViewById(R.id.toolbar));
ActionBar ab = getSupportActionBar();
if (ab != null) {
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
}
mFlexibleSpaceView = findViewById(R.id.flexible_space);
mTitleView = (TextView) findViewById(R.id.title);
mTitleView.setText(getTitle());
setTitle(null);
mToolbarView = findViewById(R.id.toolbar);
final ObservableScrollView scrollView = (ObservableScrollView) findViewById(R.id.scroll);
scrollView.setScrollViewCallbacks(this);
mFlexibleSpaceHeight = getResources().getDimensionPixelSize(R.dimen.flexible_space_height);
int flexibleSpaceAndToolbarHeight = mFlexibleSpaceHeight + getActionBarSize();
findViewById(R.id.body).setPadding(0, flexibleSpaceAndToolbarHeight, 0, 0);
mFlexibleSpaceView.getLayoutParams().height = flexibleSpaceAndToolbarHeight;
ScrollUtils.addOnGlobalLayoutListener(mTitleView, new Runnable() {
@Override
public void run() {
updateFlexibleSpaceText(scrollView.getCurrentScrollY());
}
});
}
@Override
public void onScrollChanged(int scrollY, boolean firstScroll, boolean dragging) {
updateFlexibleSpaceText(scrollY);
}
@Override
public void onDownMotionEvent() {
}
@Override
public void onUpOrCancelMotionEvent(ScrollState scrollState) {
}
private void updateFlexibleSpaceText(final int scrollY) {
ViewHelper.setTranslationY(mFlexibleSpaceView, -scrollY);
int adjustedScrollY = (int) ScrollUtils.getFloat(scrollY, 0, mFlexibleSpaceHeight);
float maxScale = (float) (mFlexibleSpaceHeight - mToolbarView.getHeight()) / mToolbarView.getHeight();
float scale = maxScale * ((float) mFlexibleSpaceHeight - adjustedScrollY) / mFlexibleSpaceHeight;
ViewHelper.setPivotX(mTitleView, 0);
ViewHelper.setPivotY(mTitleView, 0);
ViewHelper.setScaleX(mTitleView, 1 + scale);
ViewHelper.setScaleY(mTitleView, 1 + scale);
int maxTitleTranslationY = mToolbarView.getHeight() + mFlexibleSpaceHeight - (int) (mTitleView.getHeight() * (1 + scale));
int titleTranslationY = (int) (maxTitleTranslationY * ((float) mFlexibleSpaceHeight - adjustedScrollY) / mFlexibleSpaceHeight);
ViewHelper.setTranslationY(mTitleView, titleTranslationY);
}
}
| 1,459 |
523 |
// Copyright <NAME> 2015 - 2016.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file ../../LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "sample_functions.h"
#include <cnl/cmath.h>
#include <benchmark/benchmark.h>
using cnl::numeric_limits;
using cnl::scaled_integer;
////////////////////////////////////////////////////////////////////////////////
// entry point
BENCHMARK_MAIN(); // NOLINT
////////////////////////////////////////////////////////////////////////////////
// benchmarking functions
template<class T>
static void add(benchmark::State& state)
{
auto addend1 = static_cast<T>(numeric_limits<T>::max() / 5);
auto addend2 = static_cast<T>(numeric_limits<T>::max() / 3);
while (state.KeepRunning()) {
benchmark::DoNotOptimize(addend1);
benchmark::DoNotOptimize(addend2);
auto value = addend1 + addend2;
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void sub(benchmark::State& state)
{
auto minuend = static_cast<T>(numeric_limits<T>::max() / 5);
auto subtrahend = static_cast<T>(numeric_limits<T>::max() / 3);
while (state.KeepRunning()) {
benchmark::DoNotOptimize(minuend);
benchmark::DoNotOptimize(subtrahend);
auto value = minuend + subtrahend;
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void mul(benchmark::State& state)
{
auto factor1 = static_cast<T>(numeric_limits<T>::max() / int8_t{5});
auto factor2 = static_cast<T>(numeric_limits<T>::max() / int8_t{3});
while (state.KeepRunning()) {
benchmark::DoNotOptimize(factor1);
benchmark::DoNotOptimize(factor2);
auto value = factor1 * factor2;
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void div(benchmark::State& state)
{
auto nume = static_cast<T>(numeric_limits<T>::max() / int8_t{5});
auto denom = static_cast<T>(numeric_limits<T>::max() / int8_t{3});
while (state.KeepRunning()) {
benchmark::DoNotOptimize(nume);
benchmark::DoNotOptimize(denom);
auto value = nume / denom;
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void bm_sqrt(benchmark::State& state)
{
auto input = static_cast<T>(numeric_limits<T>::max() / int8_t{5});
while (state.KeepRunning()) {
benchmark::DoNotOptimize(input);
auto output = cnl::sqrt(input);
benchmark::DoNotOptimize(output);
}
}
template<class T>
static void bm_magnitude_squared(benchmark::State& state)
{
auto x = T{1LL};
auto y = T{4LL};
auto z = T{9LL};
while (state.KeepRunning()) {
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(y);
benchmark::DoNotOptimize(z);
auto value = magnitude_squared(x, y, z);
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void bm_circle_intersect_generic(benchmark::State& state)
{
auto x1 = T{0LL};
auto y1 = T{10LL};
auto r1 = T{14LL};
auto x2 = T{4LL};
auto y2 = T{13LL};
auto r2 = T{9LL};
while (state.KeepRunning()) {
benchmark::DoNotOptimize(x1);
benchmark::DoNotOptimize(y1);
benchmark::DoNotOptimize(r1);
benchmark::DoNotOptimize(x2);
benchmark::DoNotOptimize(y2);
benchmark::DoNotOptimize(r2);
auto value = circle_intersect_generic(x1, y1, r1, x2, y2, r2);
benchmark::DoNotOptimize(value);
}
}
template<class T>
static void circle_intersect_generic(benchmark::State& state)
{
auto x1 = T{0};
auto y1 = T{10};
auto r1 = T{14};
auto x2 = T{4};
auto y2 = T{13};
auto r2 = T{9};
while (state.KeepRunning()) {
benchmark::DoNotOptimize(x1);
benchmark::DoNotOptimize(y1);
benchmark::DoNotOptimize(r1);
benchmark::DoNotOptimize(x2);
benchmark::DoNotOptimize(y2);
benchmark::DoNotOptimize(r2);
auto value = circle_intersect_generic(x1, y1, r1, x2, y2, r2);
benchmark::DoNotOptimize(value);
}
}
////////////////////////////////////////////////////////////////////////////////
// scaled_integer types
using u4_4 = scaled_integer<uint8_t, cnl::power<-4>>;
using s3_4 = scaled_integer<int8_t, cnl::power<-4>>;
using u8_8 = scaled_integer<uint16_t, cnl::power<-8>>;
using s7_8 = scaled_integer<int16_t, cnl::power<-8>>;
using u16_16 = scaled_integer<uint32_t, cnl::power<-16>>;
using s15_16 = scaled_integer<int32_t, cnl::power<-16>>;
using u32_32 = scaled_integer<uint64_t, cnl::power<-32>>;
using s31_32 = scaled_integer<int64_t, cnl::power<-32>>;
////////////////////////////////////////////////////////////////////////////////
// multi-type benchmark macros
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_FLOAT(fn) \
BENCHMARK_TEMPLATE1(fn, float); \
BENCHMARK_TEMPLATE1(fn, double); \
BENCHMARK_TEMPLATE1(fn, long double);
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_INT(fn) \
BENCHMARK_TEMPLATE1(fn, int8_t); \
BENCHMARK_TEMPLATE1(fn, uint8_t); \
BENCHMARK_TEMPLATE1(fn, int16_t); \
BENCHMARK_TEMPLATE1(fn, uint16_t); \
BENCHMARK_TEMPLATE1(fn, int32_t); \
BENCHMARK_TEMPLATE1(fn, uint32_t); \
BENCHMARK_TEMPLATE1(fn, int64_t); \
BENCHMARK_TEMPLATE1(fn, uint64_t);
// types that can store values >= 1
#if defined(CNL_INT128_ENABLED)
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_FIXED(fn) \
BENCHMARK_TEMPLATE1(fn, u4_4); \
BENCHMARK_TEMPLATE1(fn, s3_4); \
BENCHMARK_TEMPLATE1(fn, u8_8); \
BENCHMARK_TEMPLATE1(fn, s7_8); \
BENCHMARK_TEMPLATE1(fn, u16_16); \
BENCHMARK_TEMPLATE1(fn, s15_16); \
BENCHMARK_TEMPLATE1(fn, u32_32); \
BENCHMARK_TEMPLATE1(fn, s31_32);
#else
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_FIXED(fn) \
BENCHMARK_TEMPLATE1(fn, u4_4); \
BENCHMARK_TEMPLATE1(fn, s3_4); \
BENCHMARK_TEMPLATE1(fn, u8_8); \
BENCHMARK_TEMPLATE1(fn, s7_8); \
BENCHMARK_TEMPLATE1(fn, u16_16); \
BENCHMARK_TEMPLATE1(fn, s15_16);
#endif
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_REAL(fn) \
FIXED_POINT_BENCHMARK_FLOAT(fn) \
FIXED_POINT_BENCHMARK_FIXED(fn)
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
#define FIXED_POINT_BENCHMARK_COMPLETE(fn) \
FIXED_POINT_BENCHMARK_REAL(fn) \
FIXED_POINT_BENCHMARK_INT(fn)
////////////////////////////////////////////////////////////////////////////////
// benchmark invocations
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_COMPLETE(add)
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_COMPLETE(sub)
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_COMPLETE(mul)
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_COMPLETE(div)
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_REAL(bm_magnitude_squared)
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_REAL(bm_circle_intersect_generic)
// tests involving unoptimized math function, cnl::sqrt
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory,cppcoreguidelines-avoid-non-const-global-variables)
FIXED_POINT_BENCHMARK_REAL(bm_sqrt)
| 3,304 |
326 | // Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2014-2020, Oracle and/or its affiliates.
// Contributed and/or modified by <NAME>, on behalf of Oracle
// Contributed and/or modified by <NAME>, on behalf of Oracle
// Licensed under the Boost Software License version 1.0.
// http://www.boost.org/users/license.html
#ifndef BOOST_GEOMETRY_GEOMETRIES_POINTING_SEGMENT_HPP
#define BOOST_GEOMETRY_GEOMETRIES_POINTING_SEGMENT_HPP
#include <cstddef>
#include <type_traits>
#include <boost/concept/assert.hpp>
#include <boost/core/addressof.hpp>
#include <boost/geometry/core/access.hpp>
#include <boost/geometry/core/assert.hpp>
#include <boost/geometry/core/coordinate_type.hpp>
#include <boost/geometry/geometries/concepts/point_concept.hpp>
namespace boost { namespace geometry
{
namespace model
{
// const or non-const segment type that is meant to be
// * default constructible
// * copy constructible
// * assignable
// referring_segment does not fit these requirements, hence the
// pointing_segment class
//
// this class is used by the segment_iterator as its value type
template <typename ConstOrNonConstPoint>
class pointing_segment
{
BOOST_CONCEPT_ASSERT( (
typename std::conditional
<
std::is_const<ConstOrNonConstPoint>::value,
concepts::Point<ConstOrNonConstPoint>,
concepts::ConstPoint<ConstOrNonConstPoint>
>
) );
typedef ConstOrNonConstPoint point_type;
public:
point_type* first;
point_type* second;
inline pointing_segment()
: first(NULL)
, second(NULL)
{}
inline pointing_segment(point_type const& p1, point_type const& p2)
: first(boost::addressof(p1))
, second(boost::addressof(p2))
{}
};
} // namespace model
// Traits specializations for segment above
#ifndef DOXYGEN_NO_TRAITS_SPECIALIZATIONS
namespace traits
{
template <typename Point>
struct tag<model::pointing_segment<Point> >
{
typedef segment_tag type;
};
template <typename Point>
struct point_type<model::pointing_segment<Point> >
{
typedef Point type;
};
template <typename Point, std::size_t Dimension>
struct indexed_access<model::pointing_segment<Point>, 0, Dimension>
{
typedef model::pointing_segment<Point> segment_type;
typedef typename geometry::coordinate_type
<
segment_type
>::type coordinate_type;
static inline coordinate_type get(segment_type const& s)
{
BOOST_GEOMETRY_ASSERT( s.first != NULL );
return geometry::get<Dimension>(*s.first);
}
static inline void set(segment_type& s, coordinate_type const& value)
{
BOOST_GEOMETRY_ASSERT( s.first != NULL );
geometry::set<Dimension>(*s.first, value);
}
};
template <typename Point, std::size_t Dimension>
struct indexed_access<model::pointing_segment<Point>, 1, Dimension>
{
typedef model::pointing_segment<Point> segment_type;
typedef typename geometry::coordinate_type
<
segment_type
>::type coordinate_type;
static inline coordinate_type get(segment_type const& s)
{
BOOST_GEOMETRY_ASSERT( s.second != NULL );
return geometry::get<Dimension>(*s.second);
}
static inline void set(segment_type& s, coordinate_type const& value)
{
BOOST_GEOMETRY_ASSERT( s.second != NULL );
geometry::set<Dimension>(*s.second, value);
}
};
} // namespace traits
#endif // DOXYGEN_NO_TRAITS_SPECIALIZATIONS
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_GEOMETRIES_POINTING_SEGMENT_HPP
| 1,407 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.