max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,724 | #include <libmath/Vec2.h>
#include "tests/Driver.h"
TEST(math_vec2_add)
{
Math::Vec2f a{2.0f, 1.0f};
Math::Vec2f b{3.0f, 4.0f};
auto c = a + b;
Assert::equal(c.x(), 5.0f);
Assert::equal(c.y(), 5.0f);
}
TEST(math_vec2_sub)
{
Math::Vec2f a{2.0f, 1.0f};
Math::Vec2f b{3.0f, 4.0f};
auto c = a - b;
Assert::equal(c.x(), -1.0f);
Assert::equal(c.y(), -3.0f);
}
TEST(math_vec2_dot)
{
Math::Vec2f a{1.0f, 0.0f};
Math::Vec2f b{-1.0f, 1.0f};
auto c = a.dot(b);
Assert::equal(c, -1.0f);
} | 330 |
416 | <reponame>bakedpotato191/sdks<gh_stars>100-1000
//
// INSpeakableString.h
// Intents
//
// Copyright (c) 2016-2017 Apple Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <Intents/INSpeakable.h>
NS_ASSUME_NONNULL_BEGIN
API_AVAILABLE(macosx(10.12), ios(10.0), watchos(3.2))
@interface INSpeakableString : NSObject <INSpeakable, NSSecureCoding>
- (instancetype)init NS_UNAVAILABLE;
- (instancetype)initWithVocabularyIdentifier:(NSString *)vocabularyIdentifier
spokenPhrase:(NSString *)spokenPhrase
pronunciationHint:(nullable NSString *)pronunciationHint NS_DESIGNATED_INITIALIZER;
- (instancetype)initWithIdentifier:(NSString *)identifier
spokenPhrase:(NSString *)spokenPhrase
pronunciationHint:(nullable NSString *)pronunciationHint API_DEPRECATED("Please use -initWithVocabularyIdentifier:spokenPhrase:pronunciationHint:", ios(10.0, 11.0), watchos(3.2, 4.0), macosx(10.12, 10.13));
- (instancetype)initWithSpokenPhrase:(NSString *)spokenPhrase API_AVAILABLE(ios(10.2), macosx(10.12.2));
@end
NS_ASSUME_NONNULL_END
| 491 |
1,848 | <reponame>j-channings/rust-bindgen
// bindgen-flags: --rustified-enum ".*"
enum Foo {
Bar = 1,
Dupe = 1
};
| 54 |
561 | <gh_stars>100-1000
//
// Test for read_conf_file
//
#include <string>
#include "gtest/gtest.h"
#include "common/kafka/kafka_config.h"
namespace kafka {
TEST(KafkaConfigTest, ConfigFileTest) {
ConfigMap configMap;
std::string configFile = std::string("client_config.properties");
EXPECT_TRUE(KafkaConfig::read_conf_file(configFile, &configMap));
EXPECT_TRUE(configMap.find("enable.sparse.connections") != configMap.end());
EXPECT_TRUE(configMap.find("enable.sparse.connections")->second == "true");
EXPECT_TRUE(configMap.find("timeout_millis") != configMap.end());
EXPECT_TRUE(configMap.find("timeout_millis")->second == "1200");
EXPECT_TRUE(configMap.find("socket_timeout_ms") != configMap.end());
EXPECT_TRUE(configMap.find("socket_timeout_ms")->first == "socket_timeout_ms");
EXPECT_TRUE(configMap.find("socket_timeout_ms")->second == "300");
EXPECT_TRUE(configMap.find("not_enabled_config_file") == configMap.end());
EXPECT_TRUE(configMap.find("comment.not.visible") == configMap.end());
}
} // namespace kafka
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 420 |
8,468 |
#ifndef __NTDDPACKET
#define __NTDDPACKET 1
#include "devioctl.h"
/*#include <packon.h> */
struct _PACKET_OID_DATA {
ULONG Oid;
ULONG Length;
UCHAR Data[1];
};
typedef struct _PACKET_OID_DATA PACKET_OID_DATA, *PPACKET_OID_DATA;
/*#include <packoff.h> */
#define FILE_DEVICE_PROTOCOL 0x8000
#define IOCTL_PROTOCOL_QUERY_OID CTL_CODE(FILE_DEVICE_PROTOCOL, 0 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_SET_OID CTL_CODE(FILE_DEVICE_PROTOCOL, 1 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_STATISTICS CTL_CODE(FILE_DEVICE_PROTOCOL, 2 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_RESET CTL_CODE(FILE_DEVICE_PROTOCOL, 3 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_READ CTL_CODE(FILE_DEVICE_PROTOCOL, 4 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_WRITE CTL_CODE(FILE_DEVICE_PROTOCOL, 5 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PROTOCOL_MACNAME CTL_CODE(FILE_DEVICE_PROTOCOL, 6 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_OPEN CTL_CODE(FILE_DEVICE_PROTOCOL, 7 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_CLOSE CTL_CODE(FILE_DEVICE_PROTOCOL, 8 , METHOD_BUFFERED, FILE_ANY_ACCESS)
#endif
| 660 |
1,241 | /*
* Copyright (c) 2017.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.itfsw.mybatis.generator.plugins;
import com.itfsw.mybatis.generator.plugins.utils.BasePlugin;
import com.itfsw.mybatis.generator.plugins.utils.FormatTools;
import com.itfsw.mybatis.generator.plugins.utils.JavaElementGeneratorTools;
import com.itfsw.mybatis.generator.plugins.utils.PluginTools;
import com.itfsw.mybatis.generator.plugins.utils.enhanced.InnerTypeFullyQualifiedJavaType;
import com.itfsw.mybatis.generator.plugins.utils.hook.ILombokPluginHook;
import com.itfsw.mybatis.generator.plugins.utils.hook.IModelBuilderPluginHook;
import org.mybatis.generator.api.IntrospectedColumn;
import org.mybatis.generator.api.IntrospectedTable;
import org.mybatis.generator.api.dom.java.*;
import org.mybatis.generator.internal.util.JavaBeansUtil;
import java.util.List;
/**
* ---------------------------------------------------------------------------
* 增加Model Builder方法
* ---------------------------------------------------------------------------
* @author: hewei
* @time:2016/12/28 14:56
* ---------------------------------------------------------------------------
*/
public class ModelBuilderPlugin extends BasePlugin implements ILombokPluginHook {
public static final String BUILDER_CLASS_NAME = "Builder"; // Builder 类名
/**
* Model Methods 生成
* 具体执行顺序 http://www.mybatis.org/generator/reference/pluggingIn.html
* @param topLevelClass
* @param introspectedTable
* @return
*/
@Override
public boolean modelBaseRecordClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
// 判断是否有生成Model的WithBLOBs类
List<IntrospectedColumn> columns = introspectedTable.getRules().generateRecordWithBLOBsClass() ? introspectedTable.getNonBLOBColumns() : introspectedTable.getAllColumns();
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, columns);
topLevelClass.addInnerClass(innerClass);
return super.modelBaseRecordClassGenerated(topLevelClass, introspectedTable);
}
/**
* Model Methods 生成
* 具体执行顺序 http://www.mybatis.org/generator/reference/pluggingIn.html
* @param topLevelClass
* @param introspectedTable
* @return
*/
@Override
public boolean modelRecordWithBLOBsClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, introspectedTable.getAllColumns());
topLevelClass.addInnerClass(innerClass);
return super.modelRecordWithBLOBsClassGenerated(topLevelClass, introspectedTable);
}
/**
* 具体执行顺序 http://www.mybatis.org/generator/reference/pluggingIn.html
* @param topLevelClass
* @param introspectedTable
* @return
*/
@Override
public boolean modelPrimaryKeyClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, introspectedTable.getPrimaryKeyColumns());
topLevelClass.addInnerClass(innerClass);
return super.modelPrimaryKeyClassGenerated(topLevelClass, introspectedTable);
}
// ------------------------------------------------------- LombokPluginHook -------------------------------------------------------
@Override
public boolean modelBaseRecordBuilderClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
// 判断是否有生成Model的WithBLOBs类
List<IntrospectedColumn> columns = introspectedTable.getRules().generateRecordWithBLOBsClass() ? introspectedTable.getNonBLOBColumns() : introspectedTable.getAllColumns();
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, columns);
topLevelClass.addInnerClass(innerClass);
return true;
}
@Override
public boolean modelPrimaryKeyBuilderClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, introspectedTable.getPrimaryKeyColumns());
topLevelClass.addInnerClass(innerClass);
return true;
}
@Override
public boolean modelRecordWithBLOBsBuilderClassGenerated(TopLevelClass topLevelClass, IntrospectedTable introspectedTable) {
InnerClass innerClass = this.generateModelBuilder(topLevelClass, introspectedTable, introspectedTable.getAllColumns());
topLevelClass.addInnerClass(innerClass);
return true;
}
/**
* 生成ModelBuilder
* @param topLevelClass
* @param introspectedTable
* @param columns
* @return
*/
private InnerClass generateModelBuilder(TopLevelClass topLevelClass, IntrospectedTable introspectedTable, List<IntrospectedColumn> columns) {
// 生成内部Builder类
InnerClass innerClass = new InnerClass(BUILDER_CLASS_NAME);
innerClass.setVisibility(JavaVisibility.PUBLIC);
innerClass.setStatic(true);
if (topLevelClass.getSuperClass() != null) {
innerClass.setSuperClass(topLevelClass.getSuperClass().getShortName() + "." + BUILDER_CLASS_NAME);
}
// 具体执行顺序 http://www.mybatis.org/generator/reference/pluggingIn.html
// 顺序为 key base withBLOBs
InnerTypeFullyQualifiedJavaType builderType = new InnerTypeFullyQualifiedJavaType(topLevelClass.getType().getFullyQualifiedName() + "." + BUILDER_CLASS_NAME);
// 增加静态builder方法实现和lombok一样
Method builder = JavaElementGeneratorTools.generateMethod(
"builder",
JavaVisibility.PUBLIC,
builderType
);
commentGenerator.addGeneralMethodComment(builder, introspectedTable);
builder.setStatic(true);
builder.addBodyLine("return new " + builderType.getShortName() + "();");
FormatTools.addMethodWithBestPosition(topLevelClass, builder);
commentGenerator.addClassComment(innerClass, introspectedTable);
logger.debug("itfsw(数据Model链式构建插件):" + topLevelClass.getType().getShortName() + "增加内部Builder类。");
// 构建内部obj变量
Field f = JavaElementGeneratorTools.generateField("obj", JavaVisibility.PRIVATE, topLevelClass.getType(), null);
commentGenerator.addFieldComment(f, introspectedTable);
innerClass.addField(f);
// 构造构造方法
Method constructor = new Method(BUILDER_CLASS_NAME);
constructor.setVisibility(JavaVisibility.PUBLIC);
constructor.setConstructor(true);
constructor.addBodyLine(new StringBuilder("this.obj = new ").append(topLevelClass.getType().getShortName()).append("();").toString());
commentGenerator.addGeneralMethodComment(constructor, introspectedTable);
FormatTools.addMethodWithBestPosition(innerClass, constructor);
logger.debug("itfsw(数据Model链式构建插件):" + topLevelClass.getType().getShortName() + ".Builder增加的构造方法。");
for (IntrospectedColumn introspectedColumn : columns) {
Field field = JavaBeansUtil.getJavaBeansField(introspectedColumn, context, introspectedTable);
Method setterMethod = JavaBeansUtil.getJavaBeansSetter(introspectedColumn, context, introspectedTable);
Method method = JavaElementGeneratorTools.generateMethod(
field.getName(),
JavaVisibility.PUBLIC,
innerClass.getType(),
new Parameter(field.getType(), field.getName())
);
commentGenerator.addSetterComment(method, introspectedTable, introspectedColumn);
method = JavaElementGeneratorTools.generateMethodBody(
method,
"obj." + setterMethod.getName() + "(" + field.getName() + ");",
"return this;"
);
// hook
if (PluginTools.getHook(IModelBuilderPluginHook.class).modelBuilderSetterMethodGenerated(method, topLevelClass, innerClass, introspectedColumn, introspectedTable)) {
FormatTools.addMethodWithBestPosition(innerClass, method);
logger.debug("itfsw(数据Model链式构建插件):" + topLevelClass.getType().getShortName() + ".Builder增加" + method.getName() + "方法(复合主键)。");
}
}
Method build = JavaElementGeneratorTools.generateMethod(
"build",
JavaVisibility.PUBLIC,
topLevelClass.getType()
);
build.addBodyLine("return this.obj;");
commentGenerator.addGeneralMethodComment(build, introspectedTable);
FormatTools.addMethodWithBestPosition(innerClass, build);
logger.debug("itfsw(数据Model链式构建插件):" + topLevelClass.getType().getShortName() + ".Builder增加build方法。");
// hook
PluginTools.getHook(IModelBuilderPluginHook.class).modelBuilderClassGenerated(topLevelClass, innerClass, columns, introspectedTable);
return innerClass;
}
}
| 3,692 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Roquecor","circ":"2ème circonscription","dpt":"Tarn-et-Garonne","inscrits":281,"abs":122,"votants":159,"blancs":25,"nuls":3,"exp":131,"res":[{"nuance":"RDG","nom":"Mme <NAME>","voix":97},{"nuance":"FN","nom":"<NAME>","voix":34}]} | 117 |
575 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import multiprocessing
import os
import platform
import subprocess
import sys
import common
def is_linux():
return sys.platform.startswith('linux')
def get_free_disk_space(failures):
"""Returns the amount of free space on the current disk, in GiB.
Returns:
The amount of free space on the current disk, measured in GiB.
"""
if os.name == 'posix':
# Stat the current path for info on the current disk.
stat_result = os.statvfs('.')
# Multiply block size by number of free blocks, express in GiB.
return stat_result.f_frsize * stat_result.f_bavail / (1024.0 ** 3)
failures.append('get_free_disk_space: OS %s not supported.' % os.name)
return 0
def get_num_cpus(failures):
"""Returns the number of logical CPUs on this machine.
Returns:
The number of logical CPUs on this machine, or 'unknown' if indeterminate.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
failures.append('get_num_cpus')
return 'unknown'
def get_device_info(args, failures):
"""Parses the device info for each attached device, and returns a summary
of the device info and any mismatches.
Returns:
A dict indicating the result.
"""
if not is_linux():
return {}
with common.temporary_file() as tempfile_path:
test_cmd = [
sys.executable,
os.path.join(args.paths['checkout'],
'third_party',
'catapult',
'devil',
'devil',
'android',
'tools',
'device_status.py'),
'--json-output', tempfile_path,
'--denylist-file', os.path.join(
args.paths['checkout'], 'out', 'bad_devices.json')
]
if args.args:
test_cmd.extend(args.args)
rc = common.run_command(test_cmd)
if rc:
failures.append('device_status')
return {}
with open(tempfile_path, 'r') as src:
device_info = json.load(src)
results = {}
results['devices'] = sorted(v['serial'] for v in device_info)
details = [
v['ro.build.fingerprint'] for v in device_info if not v['denylisted']]
def unique_build_details(index):
return sorted(list(set([v.split(':')[index] for v in details])))
parsed_details = {
'device_names': unique_build_details(0),
'build_versions': unique_build_details(1),
'build_types': unique_build_details(2),
}
for k, v in parsed_details.iteritems():
if len(v) == 1:
results[k] = v[0]
else:
results[k] = 'MISMATCH'
results['%s_list' % k] = v
failures.append(k)
for v in device_info:
if v['denylisted']:
failures.append('Device %s denylisted' % v['serial'])
return results
def main_run(args):
failures = []
host_info = {}
host_info['os_system'] = platform.system()
host_info['os_release'] = platform.release()
host_info['processor'] = platform.processor()
host_info['num_cpus'] = get_num_cpus(failures)
host_info['free_disk_space'] = get_free_disk_space(failures)
host_info['python_version'] = platform.python_version()
host_info['python_path'] = sys.executable
host_info['devices'] = get_device_info(args, failures)
json.dump({
'valid': True,
'failures': failures,
'_host_info': host_info,
}, args.output)
if len(failures) != 0:
return common.INFRA_FAILURE_EXIT_CODE
return 0
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| 1,528 |
7,886 | <filename>litho-intellij-plugin/src/main/java/com/facebook/litho/intellij/completion/RequiredPropLookupElement.java
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.litho.intellij.completion;
import com.facebook.litho.intellij.extensions.EventLogger;
import com.facebook.litho.intellij.logging.LithoLoggerProvider;
import com.intellij.codeInsight.completion.InsertionContext;
import com.intellij.codeInsight.completion.PrioritizedLookupElement;
import com.intellij.codeInsight.lookup.LookupElement;
import com.intellij.codeInsight.lookup.LookupElementDecorator;
import com.intellij.codeInsight.lookup.LookupElementPresentation;
import java.util.HashMap;
import java.util.Map;
/** Emphasizes the lookup element passed as a delegate by adding "required Prop" tail text. */
class RequiredPropLookupElement extends LookupElementDecorator<LookupElement> {
static RequiredPropLookupElement create(LookupElement delegate, boolean shouldPrioritize) {
if (shouldPrioritize) {
delegate = PrioritizedLookupElement.withPriority(delegate, Integer.MAX_VALUE);
}
return new RequiredPropLookupElement(delegate);
}
private RequiredPropLookupElement(LookupElement delegate) {
super(delegate);
}
@Override
public void renderElement(LookupElementPresentation presentation) {
super.renderElement(presentation);
presentation.appendTailText(" - required Prop", false);
}
@Override
public void handleInsert(InsertionContext context) {
super.handleInsert(context);
final Map<String, String> data = new HashMap<>();
data.put(EventLogger.KEY_TYPE, "required_prop");
data.put(EventLogger.KEY_TARGET, EventLogger.VALUE_COMPLETION_TARGET_CALL);
LithoLoggerProvider.getEventLogger().log(EventLogger.EVENT_COMPLETION, data);
}
}
| 725 |
3,602 | from api.models import *
from workflow import general, direct, direct_list
from core import utils
def parse_special_line(line):
parts = line.split(';;')
jsonl = {}
for part in parts:
key = part.split('|')[0].lower()
value = ''.join(part.split('|')[1:])
jsonl[key] = value
return jsonl
def update_field(default, value, update_type='partial'):
final_value = ''
if update_type.lower() == 'full':
final_value = value
if default == 'N/A' or default == '' or default == 'None':
final_value = value.strip(',')
else:
final_value = default.strip(',') + ',' + value.strip(',')
if ',' in final_value:
results = [x.strip() for x in final_value.split(',')]
final_value = ",".join(list(set(results)))
return final_value
def clean_up(record):
model_fields = ['domain', 'ip_address', 'technologies', 'ports', 'workspace', 'paths', 'screenshot', 'note', 'checksum']
keys = list(record.keys())
for key in keys:
if key not in model_fields:
del record[key]
return record
def parse_summary_field(instance, jsonl, update_type):
# just something we don't want to update
blacklist = ['domain']
record = instance.as_json()
checksum = record.get('checksum')
for key, value in jsonl.items():
if key not in blacklist:
record[key] = update_field(record[key], str(value), update_type)
record = clean_up(record)
updated = Summaries.objects.filter(checksum=checksum).update(**record)
def import_domain_summary(jsonl, workspace, update_type):
# print(jsonl)
domain = jsonl.get('domain', None)
if domain is None:
domain = jsonl.get('ip_address')
instance, created = Summaries.objects.get_or_create(domain=domain, workspace=workspace)
parse_summary_field(instance, jsonl, update_type)
# Summaries part
def parse_domains(line):
if utils.is_json(line.strip()):
jsonl = utils.get_json(line)
elif ';;' in line.strip():
jsonl = parse_special_line(line)
else:
jsonl = {'domain': line.strip()}
return jsonl
# remove report part
def removeReport(speed):
if speed.lower() in 'report':
return True
else:
return False
def clean_input(raw_input, module='general'):
if 'general' in module.lower():
return utils.get_domain(raw_input)
elif 'dir' in module.lower():
return raw_input
def gen_default_config(config_path):
config_path = utils.absolute_path(config_path)
utils.file_copy(utils.TEMPLATE_SERVER_CONFIG, config_path)
configs = utils.just_read_config(config_path, raw=True)
workspaces = utils.join_path(utils.get_parent(
utils.DEAFULT_CONFIG_PATH), 'workspaces')
plugins_path = utils.join_path(utils.ROOT_PATH, 'plugins')
go_path = utils.join_path(utils.ROOT_PATH, 'plugins/go')
data_path = utils.join_path(utils.ROOT_PATH, 'data')
alias_path = utils.join_path(utils.ROOT_PATH, 'lib/alias')
# set some path
configs.set('Enviroments', 'workspaces', workspaces)
configs.set('Enviroments', 'plugins_path', plugins_path)
configs.set('Enviroments', 'data_path', data_path)
configs.set('Enviroments', 'alias_path', alias_path)
configs.set('Enviroments', 'go_path', go_path)
# set some tokens
github_api_key = utils.get_enviroment("GITHUB_API_KEY")
slack_bot_token = utils.get_enviroment("SLACK_BOT_TOKEN")
log_channel = utils.get_enviroment("LOG_CHANNEL")
status_channel = utils.get_enviroment("STATUS_CHANNEL")
report_channel = utils.get_enviroment("REPORT_CHANNEL")
stds_channel = utils.get_enviroment("STDS_CHANNEL")
verbose_report_channel = utils.get_enviroment("VERBOSE_REPORT_CHANNEL")
configs.set('Enviroments', 'github_api_key', github_api_key)
configs.set('Slack', 'slack_bot_token', slack_bot_token)
configs.set('Slack', 'log_channel', log_channel)
configs.set('Slack', 'status_channel', status_channel)
configs.set('Slack', 'report_channel', report_channel)
configs.set('Slack', 'stds_channel', stds_channel)
configs.set('Slack', 'verbose_report_channel', verbose_report_channel)
# monitor mode
backups = utils.join_path(utils.get_parent(
utils.DEAFULT_CONFIG_PATH), 'backups')
utils.make_directory(backups)
monitors = utils.join_path(utils.get_parent(
utils.DEAFULT_CONFIG_PATH), 'monitors')
utils.make_directory(monitors)
configs.set('Monitor', 'monitors', monitors)
configs.set('Monitor', 'backups', backups)
monitor_level = utils.get_enviroment("monitor_level", 'final')
configs.set('Monitor', 'monitor_level', monitor_level)
# monitor bot
slack_monitor_token = utils.get_enviroment("SLACK_MONITOR_TOKEN")
new_channel = utils.get_enviroment("NEW_CHANNEL")
new_name = utils.get_enviroment("NEW_NAME")
missing_channel = utils.get_enviroment("MISSING_CHANNEL")
missing_name = utils.get_enviroment("MISSING_NAME")
configs.set('Monitor', 'slack_monitor_token', slack_monitor_token)
configs.set('Monitor', 'new_channel', new_channel)
configs.set('Monitor', 'new_name', new_name)
configs.set('Monitor', 'missing_channel', missing_channel)
configs.set('Monitor', 'missing_name', missing_name)
# write it again
with open(config_path, 'w+') as configfile:
configs.write(configfile)
# read it again and return
options = utils.just_read_config(config_path)
return options
def load_default_config(config_file=None, forced_reload=False):
if not config_file:
config_file = '~/.osmedeus/server.conf'
options = utils.just_read_config(config_file)
# no config found generate one from default config
if not options:
options = gen_default_config(config_file)
if forced_reload:
options = gen_default_config(config_file)
# looping and adding field to db
for key, value in options.items():
item = {
'name': key,
'value': value,
'alias': key,
'desc': key,
}
instance, created = Configurations.objects.get_or_create(
name=key)
Configurations.objects.filter(name=key).update(**item)
return options
def get_stateless_options(config_file=None):
if config_file:
options = utils.just_read_config(config_file)
else:
raw_options = list(Configurations.objects.values_list('name', 'value'))
options = {}
for item in raw_options:
options[item[0]] = item[1]
return options
# get variable to replace in the command
def get_stateful_options(workspace):
# finding workspace in db
record = Workspaces.objects.filter(workspace=workspace)
if not record.first():
record = Workspaces.objects.filter(target=workspace)
if not record.first():
record = Workspaces.objects.filter(raw_target=workspace)
if not record.first():
return False
stateless_options = get_stateless_options()
# options = record.as_json()
options = {**record.first().as_json(), **stateless_options}
argument_options = {}
# just upper all key
for key in options.keys():
argument_options[key.upper()] = options.get(key)
return argument_options
# @TODO should be done dynamic later
def get_modules(mode='general'):
general = [
'SubdomainScanning',
'Recon',
'ScreenShot',
'TakeOverScanning',
'AssestFinding',
'IPSpace',
'CorsScan',
'PortScan',
'VulnScan'
]
if 'general' in mode.lower():
return ','.join(general)
# really parse command from classes
def really_commands(mode):
modules = utils.get_classes('workflow.{0}'.format(mode))
for module in modules:
# get RCE if you can edit general file in workflow folder :)
module_name = module[0].strip()
module_object = eval('{0}.{1}'.format(mode, module_name))
# parsing commands
try:
routines = module_object.commands
except:
continue
for routine, commands in routines.items():
for command in commands:
item = command
item['mode'] = mode
item['speed'] = routine
item['module'] = module_name
item['alias'] = module_name + "__" + routine.lower() + "__" + \
str(item.get('banner')).lower()
Commands.objects.create(**item)
reports = module_object.reports
parse_report(reports, module_name, mode)
def internal_parse_commands(override=True):
if override:
Commands.objects.all().delete()
ReportsSkeleton.objects.all().delete()
really_commands('general')
really_commands('direct')
really_commands('direct_list')
def parse_report(reports, module, mode):
if type(reports) == str:
item = {
'report_path': reports,
'report_type': 'bash',
'module': module,
'mode': mode,
}
ReportsSkeleton.objects.create(**item)
elif type(reports) == list:
for report in reports:
item = {
'report_path': report.get('path'),
'report_type': report.get('type', 'bash'),
'note': report.get('note', ''),
'module': module,
'mode': mode,
}
ReportsSkeleton.objects.create(**item)
# parsing skeleton commands
def parse_commands(command_path):
if not utils.not_empty_file(command_path):
return False
content = utils.just_read(command_path, get_json=True)
if not content:
return False
modules = content.keys()
for module in modules:
for speed, values in content.get(module).items():
if speed.lower() == 'report':
parse_report(values, module)
else:
for value in values:
if not value.get('cmd'):
continue
item = {
'cmd': value.get('cmd'),
'output_path': value.get('output_path'),
'std_path': value.get('std_path'),
'banner': str(value.get('banner')),
'module': module,
'cmd_type': value.get('cmd_type') if value.get('cmd_type') else 'single',
'speed': speed.lower(),
'alias': module + "__" + speed.lower() + "__" + str(value.get('banner')).lower(),
'chunk': value.get('chunk') if value.get('chunk') else 0,
}
Commands.objects.create(**item)
# print(modules)
return True
| 4,667 |
6,557 | {
"dragDescriptionKeyboard": "Sürüklemeyi başlatmak için Enter'a basın.",
"dragDescriptionTouch": "Sürüklemeyi başlatmak için çift tıklayın.",
"dragDescriptionVirtual": "Sürüklemeyi başlatmak için tıklayın.",
"dragItem": "{itemText}’i sürükle",
"dragSelectedItems": "Sürükle {count, plural, one {# seçili öge} other {# seçili öge}}",
"dragStartedKeyboard": "Sürükleme başlatıldı. Bir bırakma hedefine gitmek için Tab’a basın, ardından bırakmak için Enter’a basın veya iptal etmek için Escape’e basın.",
"dragStartedTouch": "Sürükleme başlatıldı. Bir bırakma hedefine gidin, ardından bırakmak için çift tıklayın.",
"dragStartedVirtual": "Sürükleme başlatıldı. Bir bırakma hedefine gidin, ardından bırakmak için Enter’a tıklayın veya basın.",
"dropCanceled": "Bırakma iptal edildi.",
"dropComplete": "Bırakma tamamlandı.",
"dropDescriptionKeyboard": "Bırakmak için Enter'a basın. Sürüklemeyi iptal etmek için Escape'e basın.",
"dropDescriptionTouch": "Bırakmak için çift tıklayın.",
"dropDescriptionVirtual": "Bırakmak için tıklayın.",
"dropIndicator": "bırakma göstergesi",
"dropOnItem": "{itemText} üzerine bırak",
"dropOnRoot": "Bırakın",
"endDragKeyboard": "Sürükleme. Sürüklemeyi iptal etmek için Enter'a basın.",
"endDragTouch": "Sürükleme. Sürüklemeyi iptal etmek için çift tıklayın.",
"endDragVirtual": "Sürükleme. Sürüklemeyi iptal etmek için tıklayın.",
"insertAfter": "{itemText}’den sonra gir",
"insertBefore": "{itemText}’den önce gir",
"insertBetween": "{beforeItemText} ve {afterItemText} arasına gir"
}
| 722 |
852 | <reponame>ckamtsikis/cmssw
///
/// \class l1t::Stage1Layer2HFRingSumAlgorithm
///
/// Description: interface for MP firmware
///
/// Implementation:
///
/// \author: <NAME>
///
//
#ifndef Stage1Layer2HFRingSumAlgorithm_h
#define Stage1Layer2HFRingSumAlgorithm_h
#include "DataFormats/L1TCalorimeter/interface/CaloRegion.h"
#include "DataFormats/L1TCalorimeter/interface/CaloEmCand.h"
#include "DataFormats/L1Trigger/interface/CaloSpare.h"
#include "DataFormats/L1Trigger/interface/Tau.h"
#include <vector>
namespace l1t {
class Stage1Layer2HFRingSumAlgorithm {
public:
virtual void processEvent(const std::vector<l1t::CaloRegion>& regions,
const std::vector<l1t::CaloEmCand>& EMCands,
const std::vector<l1t::Tau>* taus,
l1t::CaloSpare* spare) = 0;
virtual ~Stage1Layer2HFRingSumAlgorithm(){};
};
} // namespace l1t
#endif
| 418 |
2,111 | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/basic/types.h>
#include <nvbio/basic/simd.h>
namespace nvbio {
namespace aln {
///
///@addtogroup Alignment
///@{
///
///
///@addtogroup AlignmentSink Alignment Sinks
/// An alignment sink is an object passed to alignment functions to handle
/// the terminal (cell,score)- pairs of all valid alignments.
/// A particular sink might decide to discard or store all such alignments, while
/// another might decide to store only the best, or the best N, and so on.
///@{
///
///
/// A no-op sink for valid alignments
///
struct NullSink
{
/// store a valid alignment
///
/// \param _score alignment's score
/// \param _sink alignment's end
///
template <typename ScoreType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void report(const ScoreType _score, const uint2 _sink) {}
};
///
/// A sink for valid alignments, mantaining only a single best alignment
///
template <typename ScoreType>
struct BestSink
{
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
BestSink();
/// invalidate
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void invalidate();
/// store a valid alignment
///
/// \param _score alignment's score
/// \param _sink alignment's end
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void report(const ScoreType _score, const uint2 _sink);
ScoreType score;
uint2 sink;
};
///
/// A sink for valid alignments, mantaining only a single best alignment
///
template <>
struct BestSink<simd4u8>
{
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
BestSink() : score( uint32(0) ) {}
/// store a valid alignment
///
/// \param _score alignment's score
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void report(const simd4u8 _score) { score = nvbio::max( score, _score ); }
simd4u8 score;
};
///
/// A sink for valid alignments, mantaining the best two alignments
///
template <typename ScoreType>
struct Best2Sink
{
/// constructor
///
/// \param distinct_distance the minimum text distance to consider two alignments distinct
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
Best2Sink(const uint32 distinct_dist = 0);
/// invalidate
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void invalidate();
/// store a valid alignment
///
/// \param score alignment's score
/// \param sink alignment's end
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void report(const ScoreType score, const uint2 sink);
ScoreType score1;
ScoreType score2;
uint2 sink1;
uint2 sink2;
private:
uint32 m_distinct_dist;
};
///
/// A sink for valid alignments, mantaining the best alignments by "column",
/// where columns have a specified width
///
template <typename ScoreType, uint32 N = 16>
struct BestColumnSink
{
/// constructor
///
/// \param column_width the width of each column
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
BestColumnSink(const uint32 column_width = 50, const ScoreType min_score = Field_traits<ScoreType>::min());
/// reset column width
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void set_column_width(const uint32 column_width) { m_column_width = column_width; }
/// reset minimum score
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void set_min_score(const uint32 min_score) { m_min_score = min_score; }
/// invalidate
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void invalidate();
/// store a valid alignment
///
/// \param score alignment's score
/// \param sink alignment's end
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void report(const ScoreType score, const uint2 sink);
/// return the index of the best and second-best alignments
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void best2(uint32& i1, uint32& i2, const uint32 min_dist) const;
ScoreType scores[N+1];
uint2 sinks[N+1];
private:
uint32 m_column_width;
ScoreType m_min_score;
};
///@} // end of the AlignmentSink group
///@} // end Alignment group
} // namespace aln
} // namespace nvbio
#include <nvbio/alignment/sink_inl.h>
| 2,033 |
1,225 | package com.d.ui.view.advertswitcher;
import android.annotation.TargetApi;
import android.app.Activity;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.view.View;
import com.d.lib.common.util.ViewHelper;
import com.d.lib.ui.view.advertswitcher.AdvertSwitcher;
import com.d.ui.view.R;
import java.util.ArrayList;
import java.util.List;
@TargetApi(Build.VERSION_CODES.HONEYCOMB)
public class AdvertSwitcherActivity extends Activity implements View.OnClickListener {
private AdvertSwitcher as_advert_text, as_advert_img;
@Override
public void onClick(View v) {
int resId = v.getId();
if (R.id.iv_title_left == resId) {
finish();
}
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_advert);
bindView();
init();
}
private void init() {
AdvertSwitcherTextAdapter textAdapter = new AdvertSwitcherTextAdapter(this, getTextDatas(),
R.layout.adapter_advert_text);
as_advert_text.setAdapter(textAdapter);
textAdapter.notifyDataSetChanged();
AdvertSwitcherImgAdapter imgAdapter = new AdvertSwitcherImgAdapter(this, getImgDatas(),
R.layout.adapter_advert_img);
as_advert_img.setAdapter(imgAdapter);
imgAdapter.notifyDataSetChanged();
}
private void bindView() {
as_advert_text = ViewHelper.findViewById(this, R.id.as_advert_text);
as_advert_img = ViewHelper.findViewById(this, R.id.as_advert_img);
ViewHelper.setOnClickListener(this, this, R.id.iv_title_left);
}
@Override
protected void onResume() {
super.onResume();
as_advert_text.start();
as_advert_img.start();
}
@Override
protected void onPause() {
super.onPause();
as_advert_text.stop();
as_advert_img.stop();
}
private List<AdvertSwitcherBean> getTextDatas() {
List<AdvertSwitcherBean> datas = new ArrayList<>();
for (int i = 0; i < 6; i++) {
datas.add(new AdvertSwitcherBean("Hot " + i, "Popular events!!!", 0));
}
return datas;
}
private List<AdvertSwitcherBean> getImgDatas() {
List<AdvertSwitcherBean> datas = new ArrayList<>();
for (int i = 0; i < 16; i++) {
datas.add(new AdvertSwitcherBean("" + i, i + ". Promotions", R.drawable.lib_pub_ic_btb_icon));
}
return datas;
}
}
| 1,102 |
4,283 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.spi.impl;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.serialization.InternalSerializationService;
import com.hazelcast.internal.serialization.impl.DefaultSerializationServiceBuilder;
import com.hazelcast.test.HazelcastParallelClassRunner;
import com.hazelcast.test.annotation.ParallelJVMTest;
import com.hazelcast.test.annotation.QuickTest;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static com.hazelcast.spi.impl.InternalCompletableFuture.completedExceptionally;
import static com.hazelcast.spi.impl.InternalCompletableFuture.newCompletedFuture;
import static com.hazelcast.test.HazelcastTestSupport.assertOpenEventually;
import static com.hazelcast.test.HazelcastTestSupport.assertTrueEventually;
import static com.hazelcast.test.HazelcastTestSupport.sleepSeconds;
import static java.util.concurrent.CompletableFuture.completedFuture;
import static java.util.concurrent.CompletableFuture.supplyAsync;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
@RunWith(HazelcastParallelClassRunner.class)
@Category({QuickTest.class, ParallelJVMTest.class})
public class DelegatingCompletableFutureTest {
private final InternalSerializationService serializationService
= new DefaultSerializationServiceBuilder().build();
@Test
public void test_get_Object() throws Exception {
Object value = "value";
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(value));
assertEquals(value, future.get());
}
@Test
public void test_get_withDefault() throws Exception {
String defaultValue = "defaultValue";
Future<String> future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(null), defaultValue);
assertSame(defaultValue, future.get());
}
@Test
public void test_get_Data() throws Exception {
Object value = "value";
Data data = serializationService.toData(value);
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(data));
assertEquals(value, future.get());
}
@Test
public void test_get_whenData_andMultipleTimesInvoked_thenSameInstanceReturned() throws Exception {
Object value = "value";
Data data = serializationService.toData(value);
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(data));
Object result1 = future.get();
Object result2 = future.get();
assertSame(result1, result2);
}
@Test
public void test_get_Object_withTimeout() throws Exception {
Object value = "value";
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(value));
assertEquals(value, future.get(1, TimeUnit.MILLISECONDS));
}
@Test
public void test_get_Data_withTimeout() throws Exception {
Object value = "value";
Data data = serializationService.toData(value);
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(data));
assertEquals(value, future.get(1, TimeUnit.MILLISECONDS));
}
@Test(expected = ExecutionException.class)
public void test_get_Exception() throws Exception {
Throwable error = new Throwable();
Future future = new DelegatingCompletableFuture(serializationService, completedExceptionally(error));
future.get();
}
@Test
public void test_cancel() {
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture(null));
assertFalse(future.cancel(true));
assertFalse(future.isCancelled());
}
@Test
public void test_isDone() {
Future future = new DelegatingCompletableFuture(serializationService, newCompletedFuture("value"));
assertTrue(future.isDone());
}
@Test
public void test_actionsTrigger_whenAlreadyCompletedFuture() {
CountDownLatch latch = new CountDownLatch(1);
CompletableFuture<String> future =
new DelegatingCompletableFuture(serializationService, newCompletedFuture("value"));
future.thenRun(latch::countDown);
assertOpenEventually(latch);
}
@Test
public void testInteroperability1() {
// given f1 (JDK CompletableFuture), is completed asynchronously after 3 seconds
// f2 (DelegatingCompletableFuture wrapping f1) returned from thenCompose
// then f2 is completed eventually
CompletableFuture f1 = CompletableFuture.runAsync(() -> sleepSeconds(2));
CompletableFuture f2 = CompletableFuture.completedFuture(null).thenCompose(v -> {
return new DelegatingCompletableFuture<>(serializationService, f1);
});
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
}
@Test
public void testInteroperability2() {
// given f1 (JDK CompletableFuture), is already completed
// f2 (DelegatingCompletableFuture wrapping f1) returned from thenCompose
// then f2 is completed eventually
CompletableFuture f1 = CompletableFuture.completedFuture(null);
CompletableFuture f2 = CompletableFuture.completedFuture(null).thenCompose(v ->
new DelegatingCompletableFuture<>(serializationService, f1)
);
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
}
@Test
public void testInteroperability3() {
// given f1 (DelegatingCompletableFuture wrapping completed future)
// f2 (JDK CompletableFuture) returned from thenCompose
// then f2 is completed eventually
CompletableFuture f1 = new DelegatingCompletableFuture<>(serializationService, completedFuture(null));
CompletableFuture f2 = f1.thenCompose(v -> CompletableFuture.runAsync(() -> sleepSeconds(2)));
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
}
@Test
public void testInteroperability_withValueDeserializationInFunction() {
// given f1 (DelegatingCompletableFuture wrapping completed future with Data as value)
// f2 (JDK CompletableFuture) returned from thenCompose
// then argument in compose function is deserialized
// f2 is completed eventually
String value = "test";
Data valueData = serializationService.toData(value);
CompletableFuture f1 = new DelegatingCompletableFuture<>(serializationService, completedFuture(valueData));
CompletableFuture f2 = f1.thenCompose(v -> {
assertEquals(value, v);
return CompletableFuture.runAsync(() -> sleepSeconds(2));
});
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
assertEquals(value, f1.join());
}
@Test
public void testInteroperability_withValueDeserializationInCompletedReturnedFuture() {
// given f1 (DelegatingCompletableFuture wrapping completed future)
// f2 (JDK CompletableFuture) returned from thenCompose
// then f2 is completed eventually
// completion value of f2 is deserialized
String value = "test";
Data valueData = serializationService.toData(value);
CompletableFuture f1 = completedFuture(null);
CompletableFuture<String> f2 = f1.thenCompose(v ->
new DelegatingCompletableFuture<String>(serializationService, completedFuture(valueData))
);
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
assertEquals(value, f2.join());
}
@Test
public void testInteroperability_withValueDeserializationInReturnedFuture() {
// given f1 (DelegatingCompletableFuture wrapping completed future)
// f2 (JDK CompletableFuture) returned from thenCompose
// then f2 is completed eventually
// completion value of f2 is deserialized
String value = "test";
Data valueData = serializationService.toData(value);
CompletableFuture f1 = completedFuture(null);
CompletableFuture<String> f2 = f1.thenCompose(v ->
new DelegatingCompletableFuture<String>(serializationService, supplyAsync(() -> {
sleepSeconds(2);
return valueData;
}))
);
assertTrueEventually(() -> assertTrue(f2.isDone() && !f2.isCompletedExceptionally()));
assertEquals(value, f2.join());
}
}
| 3,405 |
570 | #import <UIKit/UIKit.h>
FOUNDATION_EXPORT double Pods_TabDrawer_TestsVersionNumber;
FOUNDATION_EXPORT const unsigned char Pods_TabDrawer_TestsVersionString[];
| 56 |
2,151 | /*
* This file is part of Wireless Display Software for Linux OS
*
* Copyright (C) 2014 Intel Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include "libwds/source/cap_negotiation_state.h"
#include "libwds/rtsp/audiocodecs.h"
#include "libwds/rtsp/clientrtpports.h"
#include "libwds/rtsp/getparameter.h"
#include "libwds/rtsp/payload.h"
#include "libwds/rtsp/presentationurl.h"
#include "libwds/rtsp/reply.h"
#include "libwds/rtsp/setparameter.h"
#include "libwds/rtsp/videoformats.h"
#include "libwds/public/media_manager.h"
namespace wds {
using rtsp::AudioCodecs;
using rtsp::ClientRtpPorts;
using rtsp::GetParameter;
using rtsp::Message;
using rtsp::Payload;
using rtsp::Property;
using rtsp::Request;
using rtsp::Reply;
using rtsp::SetParameter;
using rtsp::VideoFormats;
namespace source {
class M3Handler final : public SequencedMessageSender {
public:
using SequencedMessageSender::SequencedMessageSender;
private:
std::unique_ptr<Message> CreateMessage() override;
bool HandleReply(Reply* reply) override;
};
class M4Handler final : public SequencedMessageSender {
public:
using SequencedMessageSender::SequencedMessageSender;
private:
std::unique_ptr<Message> CreateMessage() override;
bool HandleReply(Reply* reply) override;
};
std::unique_ptr<Message> M3Handler::CreateMessage() {
GetParameter* get_param = new GetParameter("rtsp://localhost/wfd1.0");
get_param->header().set_cseq(sender_->GetNextCSeq());
std::vector<std::string> props;
SessionType media_type = ToSourceMediaManager(manager_)->GetSessionType();
if (media_type & VideoSession)
props.push_back("wfd_video_formats");
if (media_type & AudioSession)
props.push_back("wfd_audio_codecs");
props.push_back("wfd_client_rtp_ports");
get_param->set_payload(
std::unique_ptr<Payload>(new rtsp::GetParameterPayload(props)));
return std::unique_ptr<Message>(get_param);
}
bool M3Handler::HandleReply(Reply* reply) {
if (reply->response_code() != rtsp::STATUS_OK)
return false;
SourceMediaManager* source_manager = ToSourceMediaManager(manager_);
auto payload = ToPropertyMapPayload(reply->payload());
if (!payload){
WDS_ERROR("Failed to obtain payload from reply.");
return false;
}
auto property = payload->GetProperty(rtsp::ClientRTPPortsPropertyType);
auto ports = static_cast<ClientRtpPorts*>(property.get());
if (!ports){
WDS_ERROR("Failed to obtain RTP ports from source.");
return false;
}
source_manager->SetSinkRtpPorts(ports->rtp_port_0(), ports->rtp_port_1());
auto video_formats = static_cast<VideoFormats*>(
payload->GetProperty(rtsp::VideoFormatsPropertyType).get());
auto audio_codecs = static_cast<AudioCodecs*>(
payload->GetProperty(rtsp::AudioCodecsPropertyType).get());
if (!video_formats && (source_manager->GetSessionType() & VideoSession)) {
WDS_ERROR("Failed to obtain WFD_VIDEO_FORMATS property");
return false;
}
if (!audio_codecs && (source_manager->GetSessionType() & AudioSession)) {
WDS_ERROR("Failed to obtain WFD_AUDIO_CODECS property");
return false;
}
if (video_formats && !source_manager->InitOptimalVideoFormat(
video_formats->GetNativeFormat(),
video_formats->GetH264VideoCodecs())) {
WDS_ERROR("Cannot initalize optimal video format from the supported by sink.");
return false;
}
if (audio_codecs && !source_manager->InitOptimalAudioFormat(
audio_codecs->audio_codecs())) {
WDS_ERROR("Cannot initalize optimal audio format from the supported by sink.");
return false;
}
return true;
}
std::unique_ptr<Message> M4Handler::CreateMessage() {
SetParameter* set_param = new SetParameter("rtsp://localhost/wfd1.0");
set_param->header().set_cseq(sender_->GetNextCSeq());
SourceMediaManager* source_manager = ToSourceMediaManager(manager_);
const auto& ports = source_manager->GetSinkRtpPorts();
auto payload = new rtsp::PropertyMapPayload();
payload->AddProperty(
std::shared_ptr<Property>(new ClientRtpPorts(ports.first, ports.second)));
std::string presentation_Url_1 = "rtsp://" + sender_->GetLocalIPAddress() + "/wfd1.0/streamid=0";
payload->AddProperty(
std::shared_ptr<Property>(new rtsp::PresentationUrl(presentation_Url_1, "")));
if (source_manager->GetSessionType() & VideoSession) {
payload->AddProperty(
std::shared_ptr<VideoFormats>(new VideoFormats(
NativeVideoFormat(), // Should be all zeros.
false,
{source_manager->GetOptimalVideoFormat()})));
}
if (source_manager->GetSessionType() & AudioSession) {
payload->AddProperty(
std::shared_ptr<AudioCodecs>(new AudioCodecs({source_manager->GetOptimalAudioFormat()})));
}
set_param->set_payload(std::unique_ptr<Payload>(payload));
return std::unique_ptr<Message>(set_param);
}
bool M4Handler::HandleReply(Reply* reply) {
return (reply->response_code() == rtsp::STATUS_OK);
}
CapNegotiationState::CapNegotiationState(const InitParams &init_params)
: MessageSequenceHandler(init_params) {
AddSequencedHandler(make_ptr(new M3Handler(init_params)));
AddSequencedHandler(make_ptr(new M4Handler(init_params)));
}
CapNegotiationState::~CapNegotiationState() {
}
} // namespace source
} // namespace wds
| 2,055 |
3,372 | /*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.healthlake;
import javax.annotation.Generated;
import com.amazonaws.services.healthlake.model.*;
import com.amazonaws.client.AwsAsyncClientParams;
import com.amazonaws.annotation.ThreadSafe;
import java.util.concurrent.ExecutorService;
/**
* Client for accessing HealthLake asynchronously. Each asynchronous method will return a Java Future object
* representing the asynchronous operation; overloads which accept an {@code AsyncHandler} can be used to receive
* notification when an asynchronous operation completes.
* <p>
* <p>
* Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their
* FHIR-formatted data in a consistent fashion in the cloud.
* </p>
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonHealthLakeAsyncClient extends AmazonHealthLakeClient implements AmazonHealthLakeAsync {
private static final int DEFAULT_THREAD_POOL_SIZE = 50;
private final java.util.concurrent.ExecutorService executorService;
public static AmazonHealthLakeAsyncClientBuilder asyncBuilder() {
return AmazonHealthLakeAsyncClientBuilder.standard();
}
/**
* Constructs a new asynchronous client to invoke service methods on HealthLake using the specified parameters.
*
* @param asyncClientParams
* Object providing client parameters.
*/
AmazonHealthLakeAsyncClient(AwsAsyncClientParams asyncClientParams) {
this(asyncClientParams, false);
}
/**
* Constructs a new asynchronous client to invoke service methods on HealthLake using the specified parameters.
*
* @param asyncClientParams
* Object providing client parameters.
* @param endpointDiscoveryEnabled
* true will enable endpoint discovery if the service supports it.
*/
AmazonHealthLakeAsyncClient(AwsAsyncClientParams asyncClientParams, boolean endpointDiscoveryEnabled) {
super(asyncClientParams, endpointDiscoveryEnabled);
this.executorService = asyncClientParams.getExecutor();
}
/**
* Returns the executor service used by this client to execute async requests.
*
* @return The executor service used by this client to execute async requests.
*/
public ExecutorService getExecutorService() {
return executorService;
}
@Override
public java.util.concurrent.Future<CreateFHIRDatastoreResult> createFHIRDatastoreAsync(CreateFHIRDatastoreRequest request) {
return createFHIRDatastoreAsync(request, null);
}
@Override
public java.util.concurrent.Future<CreateFHIRDatastoreResult> createFHIRDatastoreAsync(final CreateFHIRDatastoreRequest request,
final com.amazonaws.handlers.AsyncHandler<CreateFHIRDatastoreRequest, CreateFHIRDatastoreResult> asyncHandler) {
final CreateFHIRDatastoreRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<CreateFHIRDatastoreResult>() {
@Override
public CreateFHIRDatastoreResult call() throws Exception {
CreateFHIRDatastoreResult result = null;
try {
result = executeCreateFHIRDatastore(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<DeleteFHIRDatastoreResult> deleteFHIRDatastoreAsync(DeleteFHIRDatastoreRequest request) {
return deleteFHIRDatastoreAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeleteFHIRDatastoreResult> deleteFHIRDatastoreAsync(final DeleteFHIRDatastoreRequest request,
final com.amazonaws.handlers.AsyncHandler<DeleteFHIRDatastoreRequest, DeleteFHIRDatastoreResult> asyncHandler) {
final DeleteFHIRDatastoreRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<DeleteFHIRDatastoreResult>() {
@Override
public DeleteFHIRDatastoreResult call() throws Exception {
DeleteFHIRDatastoreResult result = null;
try {
result = executeDeleteFHIRDatastore(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<DescribeFHIRDatastoreResult> describeFHIRDatastoreAsync(DescribeFHIRDatastoreRequest request) {
return describeFHIRDatastoreAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeFHIRDatastoreResult> describeFHIRDatastoreAsync(final DescribeFHIRDatastoreRequest request,
final com.amazonaws.handlers.AsyncHandler<DescribeFHIRDatastoreRequest, DescribeFHIRDatastoreResult> asyncHandler) {
final DescribeFHIRDatastoreRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<DescribeFHIRDatastoreResult>() {
@Override
public DescribeFHIRDatastoreResult call() throws Exception {
DescribeFHIRDatastoreResult result = null;
try {
result = executeDescribeFHIRDatastore(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<DescribeFHIRExportJobResult> describeFHIRExportJobAsync(DescribeFHIRExportJobRequest request) {
return describeFHIRExportJobAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeFHIRExportJobResult> describeFHIRExportJobAsync(final DescribeFHIRExportJobRequest request,
final com.amazonaws.handlers.AsyncHandler<DescribeFHIRExportJobRequest, DescribeFHIRExportJobResult> asyncHandler) {
final DescribeFHIRExportJobRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<DescribeFHIRExportJobResult>() {
@Override
public DescribeFHIRExportJobResult call() throws Exception {
DescribeFHIRExportJobResult result = null;
try {
result = executeDescribeFHIRExportJob(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<DescribeFHIRImportJobResult> describeFHIRImportJobAsync(DescribeFHIRImportJobRequest request) {
return describeFHIRImportJobAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeFHIRImportJobResult> describeFHIRImportJobAsync(final DescribeFHIRImportJobRequest request,
final com.amazonaws.handlers.AsyncHandler<DescribeFHIRImportJobRequest, DescribeFHIRImportJobResult> asyncHandler) {
final DescribeFHIRImportJobRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<DescribeFHIRImportJobResult>() {
@Override
public DescribeFHIRImportJobResult call() throws Exception {
DescribeFHIRImportJobResult result = null;
try {
result = executeDescribeFHIRImportJob(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<ListFHIRDatastoresResult> listFHIRDatastoresAsync(ListFHIRDatastoresRequest request) {
return listFHIRDatastoresAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListFHIRDatastoresResult> listFHIRDatastoresAsync(final ListFHIRDatastoresRequest request,
final com.amazonaws.handlers.AsyncHandler<ListFHIRDatastoresRequest, ListFHIRDatastoresResult> asyncHandler) {
final ListFHIRDatastoresRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<ListFHIRDatastoresResult>() {
@Override
public ListFHIRDatastoresResult call() throws Exception {
ListFHIRDatastoresResult result = null;
try {
result = executeListFHIRDatastores(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<ListFHIRExportJobsResult> listFHIRExportJobsAsync(ListFHIRExportJobsRequest request) {
return listFHIRExportJobsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListFHIRExportJobsResult> listFHIRExportJobsAsync(final ListFHIRExportJobsRequest request,
final com.amazonaws.handlers.AsyncHandler<ListFHIRExportJobsRequest, ListFHIRExportJobsResult> asyncHandler) {
final ListFHIRExportJobsRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<ListFHIRExportJobsResult>() {
@Override
public ListFHIRExportJobsResult call() throws Exception {
ListFHIRExportJobsResult result = null;
try {
result = executeListFHIRExportJobs(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<ListFHIRImportJobsResult> listFHIRImportJobsAsync(ListFHIRImportJobsRequest request) {
return listFHIRImportJobsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListFHIRImportJobsResult> listFHIRImportJobsAsync(final ListFHIRImportJobsRequest request,
final com.amazonaws.handlers.AsyncHandler<ListFHIRImportJobsRequest, ListFHIRImportJobsResult> asyncHandler) {
final ListFHIRImportJobsRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<ListFHIRImportJobsResult>() {
@Override
public ListFHIRImportJobsResult call() throws Exception {
ListFHIRImportJobsResult result = null;
try {
result = executeListFHIRImportJobs(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<ListTagsForResourceResult> listTagsForResourceAsync(ListTagsForResourceRequest request) {
return listTagsForResourceAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListTagsForResourceResult> listTagsForResourceAsync(final ListTagsForResourceRequest request,
final com.amazonaws.handlers.AsyncHandler<ListTagsForResourceRequest, ListTagsForResourceResult> asyncHandler) {
final ListTagsForResourceRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<ListTagsForResourceResult>() {
@Override
public ListTagsForResourceResult call() throws Exception {
ListTagsForResourceResult result = null;
try {
result = executeListTagsForResource(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<StartFHIRExportJobResult> startFHIRExportJobAsync(StartFHIRExportJobRequest request) {
return startFHIRExportJobAsync(request, null);
}
@Override
public java.util.concurrent.Future<StartFHIRExportJobResult> startFHIRExportJobAsync(final StartFHIRExportJobRequest request,
final com.amazonaws.handlers.AsyncHandler<StartFHIRExportJobRequest, StartFHIRExportJobResult> asyncHandler) {
final StartFHIRExportJobRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<StartFHIRExportJobResult>() {
@Override
public StartFHIRExportJobResult call() throws Exception {
StartFHIRExportJobResult result = null;
try {
result = executeStartFHIRExportJob(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<StartFHIRImportJobResult> startFHIRImportJobAsync(StartFHIRImportJobRequest request) {
return startFHIRImportJobAsync(request, null);
}
@Override
public java.util.concurrent.Future<StartFHIRImportJobResult> startFHIRImportJobAsync(final StartFHIRImportJobRequest request,
final com.amazonaws.handlers.AsyncHandler<StartFHIRImportJobRequest, StartFHIRImportJobResult> asyncHandler) {
final StartFHIRImportJobRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<StartFHIRImportJobResult>() {
@Override
public StartFHIRImportJobResult call() throws Exception {
StartFHIRImportJobResult result = null;
try {
result = executeStartFHIRImportJob(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<TagResourceResult> tagResourceAsync(TagResourceRequest request) {
return tagResourceAsync(request, null);
}
@Override
public java.util.concurrent.Future<TagResourceResult> tagResourceAsync(final TagResourceRequest request,
final com.amazonaws.handlers.AsyncHandler<TagResourceRequest, TagResourceResult> asyncHandler) {
final TagResourceRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<TagResourceResult>() {
@Override
public TagResourceResult call() throws Exception {
TagResourceResult result = null;
try {
result = executeTagResource(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
@Override
public java.util.concurrent.Future<UntagResourceResult> untagResourceAsync(UntagResourceRequest request) {
return untagResourceAsync(request, null);
}
@Override
public java.util.concurrent.Future<UntagResourceResult> untagResourceAsync(final UntagResourceRequest request,
final com.amazonaws.handlers.AsyncHandler<UntagResourceRequest, UntagResourceResult> asyncHandler) {
final UntagResourceRequest finalRequest = beforeClientExecution(request);
return executorService.submit(new java.util.concurrent.Callable<UntagResourceResult>() {
@Override
public UntagResourceResult call() throws Exception {
UntagResourceResult result = null;
try {
result = executeUntagResource(finalRequest);
} catch (Exception ex) {
if (asyncHandler != null) {
asyncHandler.onError(ex);
}
throw ex;
}
if (asyncHandler != null) {
asyncHandler.onSuccess(finalRequest, result);
}
return result;
}
});
}
/**
* Shuts down the client, releasing all managed resources. This includes forcibly terminating all pending
* asynchronous service calls. Clients who wish to give pending asynchronous service calls time to complete should
* call {@code getExecutorService().shutdown()} followed by {@code getExecutorService().awaitTermination()} prior to
* calling this method.
*/
@Override
public void shutdown() {
super.shutdown();
executorService.shutdownNow();
}
}
| 8,821 |
16,989 | <reponame>kjlubick/bazel
// Copyright 2016 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <CoreServices/CoreServices.h>
#include <jni.h>
#include <pthread.h>
#include <stdlib.h>
#include <list>
#include <string>
namespace {
// A structure to pass around the FSEvents info and the list of paths.
struct JNIEventsDiffAwareness {
// FSEvents run loop (thread)
CFRunLoopRef runLoop;
// FSEvents stream reference (reference to the listened stream)
FSEventStreamRef stream;
// If true, fsevents dropped events so we don't know what changed exactly.
bool everything_changed;
// List of paths that have been changed since last polling.
std::list<std::string> paths;
// Mutex to protect concurrent accesses to paths and everything_changed.
pthread_mutex_t mutex;
JNIEventsDiffAwareness() : everything_changed(false) {
pthread_mutex_init(&mutex, nullptr);
}
~JNIEventsDiffAwareness() { pthread_mutex_destroy(&mutex); }
};
// Callback called when an event is reported by the FSEvents API
void FsEventsDiffAwarenessCallback(ConstFSEventStreamRef streamRef,
void *clientCallBackInfo, size_t numEvents,
void *eventPaths,
const FSEventStreamEventFlags eventFlags[],
const FSEventStreamEventId eventIds[]) {
char **paths = static_cast<char **>(eventPaths);
JNIEventsDiffAwareness *info =
static_cast<JNIEventsDiffAwareness *>(clientCallBackInfo);
pthread_mutex_lock(&(info->mutex));
for (int i = 0; i < numEvents; i++) {
if ((eventFlags[i] & kFSEventStreamEventFlagMustScanSubDirs) != 0) {
// Either we lost events or they were coalesced. Assume everything changed
// and give up, which matches the fsevents documentation in that the
// caller is expected to rescan the directory contents on its own.
info->everything_changed = true;
break;
} else if ((eventFlags[i] & kFSEventStreamEventFlagItemIsDir) != 0 &&
(eventFlags[i] & kFSEventStreamEventFlagItemRenamed) != 0) {
// A directory was renamed. When this happens, fsevents may or may not
// give us individual events about which files changed underneath, which
// means we have to rescan the directories in order to know what changed.
//
// The problem is that we cannot rescan the source of the move to discover
// which files "disappeared"... so we have no choice but to rescan
// everything. Well, in theory, we could try to track directory inodes and
// using those to guess which files within them moved... but that'd be way
// too much complexity for this rather-uncommon use case.
info->everything_changed = true;
break;
} else {
info->paths.push_back(std::string(paths[i]));
}
}
pthread_mutex_unlock(&(info->mutex));
}
extern "C" JNIEXPORT void JNICALL
Java_com_google_devtools_build_lib_skyframe_MacOSXFsEventsDiffAwareness_create(
JNIEnv *env, jobject fsEventsDiffAwareness, jobjectArray paths,
jdouble latency) {
// Create a FSEventStreamContext to pass around (env, fsEventsDiffAwareness)
JNIEventsDiffAwareness *info = new JNIEventsDiffAwareness();
FSEventStreamContext context;
context.version = 0;
context.info = static_cast<void *>(info);
context.retain = nullptr;
context.release = nullptr;
context.copyDescription = nullptr;
// Create an CFArrayRef of CFStringRef from the Java array of String
jsize length = env->GetArrayLength(paths);
CFStringRef *pathsArray = new CFStringRef[length];
for (int i = 0; i < length; i++) {
jstring path = (jstring)env->GetObjectArrayElement(paths, i);
const char *pathCStr = env->GetStringUTFChars(path, nullptr);
pathsArray[i] =
CFStringCreateWithCString(nullptr, pathCStr, kCFStringEncodingUTF8);
env->ReleaseStringUTFChars(path, pathCStr);
}
CFArrayRef pathsToWatch =
CFArrayCreate(nullptr, (const void **)pathsArray, 1, nullptr);
delete[] pathsArray;
info->stream = FSEventStreamCreate(
nullptr, &FsEventsDiffAwarenessCallback, &context, pathsToWatch,
kFSEventStreamEventIdSinceNow, static_cast<CFAbsoluteTime>(latency),
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents);
// Save the info pointer to FSEventsDiffAwareness#nativePointer
jbyteArray array = env->NewByteArray(sizeof(info));
env->SetByteArrayRegion(array, 0, sizeof(info),
reinterpret_cast<const jbyte *>(&info));
jclass clazz = env->GetObjectClass(fsEventsDiffAwareness);
jfieldID fid = env->GetFieldID(clazz, "nativePointer", "J");
env->SetLongField(fsEventsDiffAwareness, fid, reinterpret_cast<jlong>(info));
}
JNIEventsDiffAwareness *GetInfo(JNIEnv *env, jobject fsEventsDiffAwareness) {
jclass clazz = env->GetObjectClass(fsEventsDiffAwareness);
jfieldID fid = env->GetFieldID(clazz, "nativePointer", "J");
jlong field = env->GetLongField(fsEventsDiffAwareness, fid);
return reinterpret_cast<JNIEventsDiffAwareness *>(field);
}
} // namespace
extern "C" JNIEXPORT void JNICALL
Java_com_google_devtools_build_lib_skyframe_MacOSXFsEventsDiffAwareness_run(
JNIEnv *env, jobject fsEventsDiffAwareness, jobject listening) {
JNIEventsDiffAwareness *info = GetInfo(env, fsEventsDiffAwareness);
info->runLoop = CFRunLoopGetCurrent();
FSEventStreamScheduleWithRunLoop(info->stream, info->runLoop,
kCFRunLoopDefaultMode);
FSEventStreamStart(info->stream);
jclass countDownLatchClass = env->GetObjectClass(listening);
jmethodID countDownMethod =
env->GetMethodID(countDownLatchClass, "countDown", "()V");
env->CallVoidMethod(listening, countDownMethod);
CFRunLoopRun();
}
extern "C" JNIEXPORT jobjectArray JNICALL
Java_com_google_devtools_build_lib_skyframe_MacOSXFsEventsDiffAwareness_poll(
JNIEnv *env, jobject fsEventsDiffAwareness) {
JNIEventsDiffAwareness *info = GetInfo(env, fsEventsDiffAwareness);
pthread_mutex_lock(&(info->mutex));
jobjectArray result;
if (info->everything_changed) {
result = nullptr;
} else {
jclass classString = env->FindClass("java/lang/String");
result = env->NewObjectArray(info->paths.size(), classString, nullptr);
int i = 0;
for (auto it = info->paths.begin(); it != info->paths.end(); it++, i++) {
env->SetObjectArrayElement(result, i, env->NewStringUTF(it->c_str()));
}
}
info->everything_changed = false;
info->paths.clear();
pthread_mutex_unlock(&(info->mutex));
return result;
}
extern "C" JNIEXPORT void JNICALL
Java_com_google_devtools_build_lib_skyframe_MacOSXFsEventsDiffAwareness_doClose(
JNIEnv *env, jobject fsEventsDiffAwareness) {
JNIEventsDiffAwareness *info = GetInfo(env, fsEventsDiffAwareness);
CFRunLoopStop(info->runLoop);
FSEventStreamStop(info->stream);
FSEventStreamUnscheduleFromRunLoop(info->stream, info->runLoop,
kCFRunLoopDefaultMode);
FSEventStreamInvalidate(info->stream);
FSEventStreamRelease(info->stream);
delete info;
}
| 2,777 |
691 | // 2019/06/13 - modified by <NAME>
// - added TaskView interface
//
// 2019/04/17 - created by <NAME>
#pragma once
#include <iostream>
#include <sstream>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <atomic>
#include <memory>
#include <deque>
#include <optional>
#include <thread>
#include <algorithm>
#include <set>
#include <numeric>
#include <cassert>
#include "task.hpp"
namespace tf {
/**
@class: ExecutorObserverInterface
@brief The interface class for creating an executor observer.
The tf::ExecutorObserver class let users define methods to monitor the behaviors
of an executor.
This is particularly useful when you want to inspect the performance of an executor.
*/
class ExecutorObserverInterface {
public:
/**
@brief virtual destructor
*/
virtual ~ExecutorObserverInterface() = default;
/**
@brief constructor-like method to call when the executor observer is fully created
@param num_workers the number of the worker threads in the executor
*/
virtual void set_up(unsigned num_workers) {};
/**
@brief method to call before a worker thread executes a closure
@param worker_id the id of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_entry(unsigned worker_id, TaskView task_view) {};
/**
@brief method to call after a worker thread executed a closure
@param worker_id the id of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_exit(unsigned worker_id, TaskView task_view) {};
};
// ------------------------------------------------------------------
/**
@class: ExecutorObserver
@brief Default executor observer to dump the execution timelines
*/
class ExecutorObserver : public ExecutorObserverInterface {
friend class Executor;
// data structure to record each task execution
struct Execution {
TaskView task_view;
std::chrono::time_point<std::chrono::steady_clock> beg;
std::chrono::time_point<std::chrono::steady_clock> end;
Execution(
TaskView tv,
std::chrono::time_point<std::chrono::steady_clock> b
) :
task_view {tv}, beg {b} {
}
Execution(
TaskView tv,
std::chrono::time_point<std::chrono::steady_clock> b,
std::chrono::time_point<std::chrono::steady_clock> e
) :
task_view {tv}, beg {b}, end {e} {
}
};
// data structure to store the entire execution timeline
struct Timeline {
std::chrono::time_point<std::chrono::steady_clock> origin;
std::vector<std::vector<Execution>> executions;
};
public:
/**
@brief dump the timelines in JSON format to an ostream
@param ostream the target std::ostream to dump
*/
inline void dump(std::ostream& ostream) const;
/**
@brief dump the timelines in JSON to a std::string
@return a JSON string
*/
inline std::string dump() const;
/**
@brief clear the timeline data
*/
inline void clear();
/**
@brief get the number of total tasks in the observer
@return number of total tasks
*/
inline size_t num_tasks() const;
private:
inline void set_up(unsigned num_workers) override final;
inline void on_entry(unsigned worker_id, TaskView task_view) override final;
inline void on_exit(unsigned worker_id, TaskView task_view) override final;
Timeline _timeline;
};
// Procedure: set_up
inline void ExecutorObserver::set_up(unsigned num_workers) {
_timeline.executions.resize(num_workers);
for(unsigned w=0; w<num_workers; ++w) {
_timeline.executions[w].reserve(1024);
}
_timeline.origin = std::chrono::steady_clock::now();
}
// Procedure: on_entry
inline void ExecutorObserver::on_entry(unsigned w, TaskView tv) {
_timeline.executions[w].emplace_back(tv, std::chrono::steady_clock::now());
}
// Procedure: on_exit
inline void ExecutorObserver::on_exit(unsigned w, TaskView tv) {
assert(_timeline.executions[w].size() > 0);
_timeline.executions[w].back().end = std::chrono::steady_clock::now();
}
// Function: clear
inline void ExecutorObserver::clear() {
for(size_t w=0; w<_timeline.executions.size(); ++w) {
_timeline.executions[w].clear();
}
}
// Procedure: dump
inline void ExecutorObserver::dump(std::ostream& os) const {
os << '[';
for(size_t w=0; w<_timeline.executions.size(); w++) {
if(w != 0 && _timeline.executions[w].size() > 0 &&
_timeline.executions[w-1].size() > 0) {
os << ',';
}
for(size_t i=0; i<_timeline.executions[w].size(); i++) {
os << '{'
<< "\"cat\":\"ExecutorObserver\","
<< "\"name\":\"" << _timeline.executions[w][i].task_view.name() << "\","
<< "\"ph\":\"X\","
<< "\"pid\":1,"
<< "\"tid\":" << w << ','
<< "\"ts\":" << std::chrono::duration_cast<std::chrono::microseconds>(
_timeline.executions[w][i].beg - _timeline.origin
).count() << ','
<< "\"dur\":" << std::chrono::duration_cast<std::chrono::microseconds>(
_timeline.executions[w][i].end - _timeline.executions[w][i].beg
).count();
if(i != _timeline.executions[w].size() - 1) {
os << "},";
}
else {
os << '}';
}
}
}
os << "]\n";
}
// Function: dump
inline std::string ExecutorObserver::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Function: num_tasks
inline size_t ExecutorObserver::num_tasks() const {
return std::accumulate(
_timeline.executions.begin(), _timeline.executions.end(), size_t{0},
[](size_t sum, const auto& exe){
return sum + exe.size();
}
);
}
} // end of namespace tf -------------------------------------------
| 2,214 |
852 | #ifndef SimG4Core_BeginOfEvent_H
#define SimG4Core_BeginOfEvent_H
#include "G4Event.hh"
class BeginOfEvent {
public:
BeginOfEvent(const G4Event* tEvent) : anEvent(tEvent) {}
const G4Event* operator()() const { return anEvent; }
private:
const G4Event* anEvent;
};
#endif
| 108 |
742 | package org.support.project.knowledge.control.open;
import org.support.project.di.DI;
import org.support.project.di.Instance;
import org.support.project.knowledge.control.Control;
import org.support.project.web.boundary.Boundary;
import org.support.project.web.control.service.Get;
@DI(instance = Instance.Prototype)
public class NoticeControl extends Control {
/**
* 通知一覧画面を表示
*
* @return Boundary
*/
@Get
public Boundary list() {
return forward("list.jsp");
}
}
| 200 |
1,275 | <reponame>ananthdurai/pinot
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.core.data.manager;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.helix.HelixManager;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.pinot.common.metadata.segment.SegmentZKMetadata;
import org.apache.pinot.common.metrics.PinotMetricUtils;
import org.apache.pinot.common.metrics.ServerMetrics;
import org.apache.pinot.common.utils.TarGzCompressionUtils;
import org.apache.pinot.common.utils.fetcher.SegmentFetcherFactory;
import org.apache.pinot.core.data.manager.offline.OfflineTableDataManager;
import org.apache.pinot.segment.local.data.manager.TableDataManagerConfig;
import org.apache.pinot.segment.local.segment.index.loader.IndexLoadingConfig;
import org.apache.pinot.segment.spi.SegmentMetadata;
import org.apache.pinot.segment.spi.creator.SegmentVersion;
import org.apache.pinot.spi.crypt.PinotCrypter;
import org.apache.pinot.spi.crypt.PinotCrypterFactory;
import org.apache.pinot.spi.env.PinotConfiguration;
import org.apache.pinot.spi.utils.CommonConstants;
import org.apache.pinot.spi.utils.ReadMode;
import org.apache.pinot.spi.utils.retry.AttemptsExceededException;
import org.apache.pinot.util.TestUtils;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.apache.pinot.common.utils.fetcher.BaseSegmentFetcher.RETRY_COUNT_CONFIG_KEY;
import static org.apache.pinot.common.utils.fetcher.BaseSegmentFetcher.RETRY_DELAY_SCALE_FACTOR_CONFIG_KEY;
import static org.apache.pinot.common.utils.fetcher.BaseSegmentFetcher.RETRY_WAIT_MS_CONFIG_KEY;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
public class BaseTableDataManagerTest {
private static final File TEMP_DIR = new File(FileUtils.getTempDirectory(), "OfflineTableDataManagerTest");
private static final String TABLE_NAME = "__table01__";
@BeforeMethod
public void setUp()
throws Exception {
TestUtils.ensureDirectoriesExistAndEmpty(TEMP_DIR);
initSegmentFetcher();
}
@AfterMethod
public void tearDown()
throws Exception {
FileUtils.deleteDirectory(TEMP_DIR);
}
private BaseTableDataManager makeTestableManager() {
TableDataManagerConfig config = mock(TableDataManagerConfig.class);
when(config.getTableName()).thenReturn(TABLE_NAME);
when(config.getDataDir()).thenReturn(new File(TEMP_DIR, TABLE_NAME).getAbsolutePath());
OfflineTableDataManager tableDataManager = new OfflineTableDataManager();
tableDataManager.init(config, "dummyInstance", mock(ZkHelixPropertyStore.class),
new ServerMetrics(PinotMetricUtils.getPinotMetricsRegistry()), mock(HelixManager.class), null);
tableDataManager.start();
return tableDataManager;
}
@Test
public void testReloadSegmentNewData()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-new-data");
// Create an empty segment and compress it to tar.gz as the one in deep store.
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils
.write(new File(tempInputDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=remove");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempTar.getAbsolutePath());
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
File indexDir = tmgr.getSegmentDataDir("seg01");
FileUtils.write(new File(indexDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=local");
// Different CRCs leading to segment download.
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("10240");
when(llmd.getIndexDir()).thenReturn(indexDir);
tmgr.reloadSegment("seg01", newDummyIndexLoadingConfig(), zkmd, llmd, null, false);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("k=remove"));
}
@Test
public void testReloadSegmentLocalCopy()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-local-copy");
// Create an empty segment and compress it to tar.gz as the one in deep store.
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils
.write(new File(tempInputDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=remote");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempTar.getAbsolutePath());
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
File indexDir = tmgr.getSegmentDataDir("seg01");
FileUtils.write(new File(indexDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=local");
// Same CRCs so load the local copy.
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("1024");
when(llmd.getIndexDir()).thenReturn(indexDir);
tmgr.reloadSegment("seg01", newDummyIndexLoadingConfig(), zkmd, llmd, null, false);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("k=local"));
}
@Test
public void testReloadSegmentForceDownload()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-force-download");
// Create an empty segment and compress it to tar.gz as the one in deep store.
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils
.write(new File(tempInputDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=remote");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempTar.getAbsolutePath());
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
File indexDir = tmgr.getSegmentDataDir("seg01");
FileUtils.write(new File(indexDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=local");
// Same CRC but force to download
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("1024");
when(llmd.getIndexDir()).thenReturn(indexDir);
tmgr.reloadSegment("seg01", newDummyIndexLoadingConfig(), zkmd, llmd, null, true);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("k=remote"));
}
@Test
public void testAddOrReplaceSegmentNewData()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-new-data");
// Create an empty segment and compress it to tar.gz as the one in deep store.
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils.write(new File(tempInputDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempTar.getAbsolutePath());
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
// Different CRCs leading to segment download.
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("10240");
assertFalse(tmgr.getSegmentDataDir("seg01").exists());
tmgr.addOrReplaceSegment("seg01", newDummyIndexLoadingConfig(), zkmd, llmd);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("docs=0"));
}
@Test
public void testAddOrReplaceSegmentNoop()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("1024");
assertFalse(tmgr.getSegmentDataDir("seg01").exists());
tmgr.addOrReplaceSegment("seg01", newDummyIndexLoadingConfig(), zkmd, llmd);
// As CRC is same, the index dir is left as is, so not get created by the test.
assertFalse(tmgr.getSegmentDataDir("seg01").exists());
}
@Test
public void testAddOrReplaceSegmentRecovered()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
// Make this equal to the default crc value, so no need to make a dummy creation.meta file.
when(zkmd.getCrc()).thenReturn(Long.MIN_VALUE);
File backup = tmgr.getSegmentDataDir("seg01" + CommonConstants.Segment.SEGMENT_BACKUP_DIR_SUFFIX);
FileUtils.write(new File(backup, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01");
assertFalse(tmgr.getSegmentDataDir("seg01").exists());
tmgr.addOrReplaceSegment("seg01", newDummyIndexLoadingConfig(), zkmd, null);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("docs=0"));
}
@Test
public void testAddOrReplaceSegmentNotRecovered()
throws Exception {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-force-download");
// Create an empty segment and compress it to tar.gz as the one in deep store.
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils
.write(new File(tempInputDir, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=remote");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempTar.getAbsolutePath());
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
// Though can recover from backup, but CRC is different. Local CRC is Long.MIN_VALUE.
File backup = tmgr.getSegmentDataDir("seg01" + CommonConstants.Segment.SEGMENT_BACKUP_DIR_SUFFIX);
FileUtils.write(new File(backup, "metadata.properties"), "segment.total.docs=0\nsegment.name=seg01\nk=local");
assertFalse(tmgr.getSegmentDataDir("seg01").exists());
tmgr.addOrReplaceSegment("seg01", newDummyIndexLoadingConfig(), zkmd, null);
assertTrue(tmgr.getSegmentDataDir("seg01").exists());
assertTrue(FileUtils.readFileToString(new File(tmgr.getSegmentDataDir("seg01"), "metadata.properties"))
.contains("k=remote"));
}
@Test
public void testDownloadAndDecrypt()
throws Exception {
File tempInput = new File(TEMP_DIR, "tmp.txt");
FileUtils.write(tempInput, "this is from somewhere remote");
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getDownloadUrl()).thenReturn("file://" + tempInput.getAbsolutePath());
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-download-decrypt");
File tarFile = tmgr.downloadAndDecrypt("seg01", zkmd, tempRootDir);
assertEquals(FileUtils.readFileToString(tarFile), "this is from somewhere remote");
when(zkmd.getCrypterName()).thenReturn("fakePinotCrypter");
tarFile = tmgr.downloadAndDecrypt("seg01", zkmd, tempRootDir);
assertEquals(FileUtils.readFileToString(tarFile), "this is from somewhere remote");
FakePinotCrypter fakeCrypter = (FakePinotCrypter) PinotCrypterFactory.create("fakePinotCrypter");
assertTrue(fakeCrypter._origFile.getAbsolutePath().endsWith("__table01__/test-download-decrypt/seg01.tar.gz.enc"));
assertTrue(fakeCrypter._decFile.getAbsolutePath().endsWith("__table01__/test-download-decrypt/seg01.tar.gz"));
try {
// Set maxRetry to 0 to cause retry failure immediately.
Map<String, Object> properties = new HashMap<>();
properties.put(RETRY_COUNT_CONFIG_KEY, 0);
SegmentFetcherFactory.init(new PinotConfiguration(properties));
tmgr.downloadAndDecrypt("seg01", zkmd, tempRootDir);
fail();
} catch (AttemptsExceededException e) {
assertEquals(e.getMessage(), "Operation failed after 0 attempts");
}
}
@Test
public void testUntarAndMoveSegment()
throws IOException {
BaseTableDataManager tmgr = makeTestableManager();
File tempRootDir = tmgr.getSegmentDataDir("test-untar-move");
// All input and intermediate files are put in the tempRootDir.
File tempTar = new File(tempRootDir, "seg01" + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
File tempInputDir = new File(tempRootDir, "seg01_input");
FileUtils.write(new File(tempInputDir, "tmp.txt"), "this is in segment dir");
TarGzCompressionUtils.createTarGzFile(tempInputDir, tempTar);
FileUtils.deleteQuietly(tempInputDir);
// The destination is the segment directory at the same level of tempRootDir.
File indexDir = tmgr.untarAndMoveSegment("seg01", tempTar, tempRootDir);
assertEquals(indexDir, tmgr.getSegmentDataDir("seg01"));
assertEquals(FileUtils.readFileToString(new File(indexDir, "tmp.txt")), "this is in segment dir");
try {
tmgr.untarAndMoveSegment("seg01", new File(tempRootDir, "unknown.txt"), TEMP_DIR);
fail();
} catch (Exception e) {
// expected.
}
}
@Test
public void testIsNewSegmentMetadata()
throws IOException {
SegmentZKMetadata zkmd = mock(SegmentZKMetadata.class);
when(zkmd.getCrc()).thenReturn(Long.valueOf(1024));
assertTrue(BaseTableDataManager.isNewSegment(zkmd, null));
SegmentMetadata llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("1024");
assertFalse(BaseTableDataManager.isNewSegment(zkmd, llmd));
llmd = mock(SegmentMetadata.class);
when(llmd.getCrc()).thenReturn("10245");
assertTrue(BaseTableDataManager.isNewSegment(zkmd, llmd));
}
// Has to be public class for the class loader to work.
public static class FakePinotCrypter implements PinotCrypter {
private File _origFile;
private File _decFile;
@Override
public void init(PinotConfiguration config) {
}
@Override
public void encrypt(File origFile, File encFile) {
}
@Override
public void decrypt(File origFile, File decFile) {
_origFile = origFile;
_decFile = decFile;
}
}
private static void initSegmentFetcher()
throws Exception {
Map<String, Object> properties = new HashMap<>();
properties.put(RETRY_COUNT_CONFIG_KEY, 3);
properties.put(RETRY_WAIT_MS_CONFIG_KEY, 100);
properties.put(RETRY_DELAY_SCALE_FACTOR_CONFIG_KEY, 5);
SegmentFetcherFactory.init(new PinotConfiguration(properties));
// Setup crypter
properties.put("class.fakePinotCrypter", FakePinotCrypter.class.getName());
PinotCrypterFactory.init(new PinotConfiguration(properties));
}
private static IndexLoadingConfig newDummyIndexLoadingConfig() {
IndexLoadingConfig indexLoadingConfig = mock(IndexLoadingConfig.class);
when(indexLoadingConfig.getReadMode()).thenReturn(ReadMode.mmap);
when(indexLoadingConfig.getSegmentVersion()).thenReturn(SegmentVersion.v3);
return indexLoadingConfig;
}
}
| 6,437 |
16,461 | #import "EXEnvironment.h"
#import <Foundation/Foundation.h>
@interface EXEnvironment (Tests)
- (void)_loadShellConfig:(NSDictionary *)shellConfig
withInfoPlist:(NSDictionary *)infoPlist
withExpoKitDevUrl:(NSString *)expoKitDevelopmentUrl
withEmbeddedManifest:(NSDictionary *)embeddedManifest
isDetached:(BOOL)isDetached
isDebugXCodeScheme:(BOOL)isDebugScheme
isUserDetach:(BOOL)isUserDetach;
- (void)_loadDefaultConfig;
@end
| 195 |
648 | <reponame>swrobel/fhir
{"resourceType":"DataElement","id":"MeasureReport.group.stratifier.stratum.measureScore","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00"},"url":"http://hl7.org/fhir/DataElement/MeasureReport.group.stratifier.stratum.measureScore","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"id":"MeasureReport.group.stratifier.stratum.measureScore","path":"MeasureReport.group.stratifier.stratum.measureScore","short":"What score this stratum achieved","definition":"The measure score for this stratum, calculated as appropriate for the measure type and scoring method, and based on only the members of this stratum.","min":0,"max":"1","type":[{"code":"decimal"}],"isSummary":true}]} | 209 |
72,551 | <reponame>jerodji/swift<filename>lib/Sema/TypeCheckAvailability.cpp
//===--- TypeCheckAvailability.cpp - Availability Diagnostics -------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements availability diagnostics.
//
//===----------------------------------------------------------------------===//
#include "TypeCheckAvailability.h"
#include "TypeCheckConcurrency.h"
#include "TypeChecker.h"
#include "TypeCheckObjC.h"
#include "MiscDiagnostics.h"
#include "swift/AST/ASTWalker.h"
#include "swift/AST/Initializer.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/Pattern.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/AST/SourceFile.h"
#include "swift/AST/TypeDeclFinder.h"
#include "swift/AST/TypeRefinementContext.h"
#include "swift/Basic/Defer.h"
#include "swift/Basic/SourceManager.h"
#include "swift/Basic/StringExtras.h"
#include "swift/Parse/Lexer.h"
#include "swift/Parse/Parser.h"
#include "swift/Sema/IDETypeChecking.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace swift;
ExportContext::ExportContext(DeclContext *DC,
AvailabilityContext runningOSVersion,
FragileFunctionKind kind,
bool spi, bool exported, bool implicit, bool deprecated,
Optional<PlatformKind> unavailablePlatformKind)
: DC(DC), RunningOSVersion(runningOSVersion), FragileKind(kind) {
SPI = spi;
Exported = exported;
Implicit = implicit;
Deprecated = deprecated;
if (unavailablePlatformKind) {
Unavailable = 1;
Platform = unsigned(*unavailablePlatformKind);
} else {
Unavailable = 0;
Platform = 0;
}
Reason = unsigned(ExportabilityReason::General);
}
bool swift::isExported(const ValueDecl *VD) {
if (VD->getAttrs().hasAttribute<ImplementationOnlyAttr>())
return false;
// Is this part of the module's API or ABI?
AccessScope accessScope =
VD->getFormalAccessScope(nullptr,
/*treatUsableFromInlineAsPublic*/true);
if (accessScope.isPublic())
return true;
// Is this a stored property in a @frozen struct or class?
if (auto *property = dyn_cast<VarDecl>(VD))
if (property->isLayoutExposedToClients())
return true;
return false;
}
bool swift::isExported(const Decl *D) {
if (auto *VD = dyn_cast<ValueDecl>(D)) {
return isExported(VD);
}
if (auto *PBD = dyn_cast<PatternBindingDecl>(D)) {
for (unsigned i = 0, e = PBD->getNumPatternEntries(); i < e; ++i) {
if (auto *VD = PBD->getAnchoringVarDecl(i))
return isExported(VD);
}
return false;
}
if (auto *ED = dyn_cast<ExtensionDecl>(D)) {
if (auto *NTD = ED->getExtendedNominal())
return isExported(NTD);
return false;
}
return true;
}
template<typename Fn>
static void forEachOuterDecl(DeclContext *DC, Fn fn) {
for (; !DC->isModuleScopeContext(); DC = DC->getParent()) {
switch (DC->getContextKind()) {
case DeclContextKind::AbstractClosureExpr:
case DeclContextKind::TopLevelCodeDecl:
case DeclContextKind::SerializedLocal:
case DeclContextKind::Module:
case DeclContextKind::FileUnit:
break;
case DeclContextKind::Initializer:
if (auto *PBI = dyn_cast<PatternBindingInitializer>(DC))
fn(PBI->getBinding());
else if (auto *I = dyn_cast<PropertyWrapperInitializer>(DC))
fn(I->getWrappedVar());
break;
case DeclContextKind::SubscriptDecl:
fn(cast<SubscriptDecl>(DC));
break;
case DeclContextKind::EnumElementDecl:
fn(cast<EnumElementDecl>(DC));
break;
case DeclContextKind::AbstractFunctionDecl:
fn(cast<AbstractFunctionDecl>(DC));
if (auto *AD = dyn_cast<AccessorDecl>(DC))
fn(AD->getStorage());
break;
case DeclContextKind::GenericTypeDecl:
fn(cast<GenericTypeDecl>(DC));
break;
case DeclContextKind::ExtensionDecl:
fn(cast<ExtensionDecl>(DC));
break;
}
}
}
static void computeExportContextBits(ASTContext &Ctx, Decl *D,
bool *spi, bool *implicit, bool *deprecated,
Optional<PlatformKind> *unavailablePlatformKind) {
if (D->isSPI())
*spi = true;
// Defer bodies are desugared to an implicit closure expression. We need to
// dilute the meaning of "implicit" to make sure we're still checking
// availability inside of defer statements.
const auto isDeferBody = isa<FuncDecl>(D) && cast<FuncDecl>(D)->isDeferBody();
if (D->isImplicit() && !isDeferBody)
*implicit = true;
if (D->getAttrs().getDeprecated(Ctx))
*deprecated = true;
if (auto *A = D->getAttrs().getUnavailable(Ctx)) {
*unavailablePlatformKind = A->Platform;
}
if (auto *PBD = dyn_cast<PatternBindingDecl>(D)) {
for (unsigned i = 0, e = PBD->getNumPatternEntries(); i < e; ++i) {
if (auto *VD = PBD->getAnchoringVarDecl(i))
computeExportContextBits(Ctx, VD, spi, implicit, deprecated,
unavailablePlatformKind);
}
}
}
ExportContext ExportContext::forDeclSignature(Decl *D) {
auto &Ctx = D->getASTContext();
auto *DC = D->getInnermostDeclContext();
auto fragileKind = DC->getFragileFunctionKind();
auto runningOSVersion =
(Ctx.LangOpts.DisableAvailabilityChecking
? AvailabilityContext::alwaysAvailable()
: TypeChecker::overApproximateAvailabilityAtLocation(D->getEndLoc(), DC));
bool spi = Ctx.LangOpts.LibraryLevel == LibraryLevel::SPI;
bool implicit = false;
bool deprecated = false;
Optional<PlatformKind> unavailablePlatformKind;
computeExportContextBits(Ctx, D, &spi, &implicit, &deprecated,
&unavailablePlatformKind);
forEachOuterDecl(D->getDeclContext(),
[&](Decl *D) {
computeExportContextBits(Ctx, D,
&spi, &implicit, &deprecated,
&unavailablePlatformKind);
});
bool exported = ::isExported(D);
return ExportContext(DC, runningOSVersion, fragileKind,
spi, exported, implicit, deprecated,
unavailablePlatformKind);
}
ExportContext ExportContext::forFunctionBody(DeclContext *DC, SourceLoc loc) {
auto &Ctx = DC->getASTContext();
auto fragileKind = DC->getFragileFunctionKind();
auto runningOSVersion =
(Ctx.LangOpts.DisableAvailabilityChecking
? AvailabilityContext::alwaysAvailable()
: TypeChecker::overApproximateAvailabilityAtLocation(loc, DC));
bool spi = Ctx.LangOpts.LibraryLevel == LibraryLevel::SPI;
bool implicit = false;
bool deprecated = false;
Optional<PlatformKind> unavailablePlatformKind;
forEachOuterDecl(DC,
[&](Decl *D) {
computeExportContextBits(Ctx, D,
&spi, &implicit, &deprecated,
&unavailablePlatformKind);
});
bool exported = false;
return ExportContext(DC, runningOSVersion, fragileKind,
spi, exported, implicit, deprecated,
unavailablePlatformKind);
}
ExportContext ExportContext::forConformance(DeclContext *DC,
ProtocolDecl *proto) {
assert(isa<ExtensionDecl>(DC) || isa<NominalTypeDecl>(DC));
auto where = forDeclSignature(DC->getInnermostDeclarationDeclContext());
where.Exported &= proto->getFormalAccessScope(
DC, /*usableFromInlineAsPublic*/true).isPublic();
return where;
}
ExportContext ExportContext::withReason(ExportabilityReason reason) const {
auto copy = *this;
copy.Reason = unsigned(reason);
return copy;
}
ExportContext ExportContext::withExported(bool exported) const {
auto copy = *this;
copy.Exported = isExported() && exported;
return copy;
}
Optional<PlatformKind> ExportContext::getUnavailablePlatformKind() const {
if (Unavailable)
return PlatformKind(Platform);
return None;
}
bool ExportContext::mustOnlyReferenceExportedDecls() const {
return Exported || FragileKind.kind != FragileFunctionKind::None;
}
Optional<ExportabilityReason> ExportContext::getExportabilityReason() const {
if (Exported)
return ExportabilityReason(Reason);
return None;
}
/// Returns the first availability attribute on the declaration that is active
/// on the target platform.
static const AvailableAttr *getActiveAvailableAttribute(const Decl *D,
ASTContext &AC) {
for (auto Attr : D->getAttrs())
if (auto AvAttr = dyn_cast<AvailableAttr>(Attr)) {
if (!AvAttr->isInvalid() && AvAttr->isActivePlatform(AC)) {
return AvAttr;
}
}
return nullptr;
}
/// Returns true if there is any availability attribute on the declaration
/// that is active on the target platform.
static bool hasActiveAvailableAttribute(Decl *D,
ASTContext &AC) {
return getActiveAvailableAttribute(D, AC);
}
namespace {
/// A class to walk the AST to build the type refinement context hierarchy.
class TypeRefinementContextBuilder : private ASTWalker {
struct ContextInfo {
TypeRefinementContext *TRC;
/// The node whose end marks the end of the refinement context.
/// If the builder sees this node in a post-visitor, it will pop
/// the context from the stack. This node can be null (ParentTy()),
/// indicating that custom logic elsewhere will handle removing
/// the context when needed.
ParentTy ScopeNode;
};
std::vector<ContextInfo> ContextStack;
ASTContext &Context;
/// A mapping from abstract storage declarations with accessors to
/// to the type refinement contexts for those declarations. We refer to
/// this map to determine the appropriate parent TRC to use when
/// walking the accessor function.
llvm::DenseMap<AbstractStorageDecl *, TypeRefinementContext *>
StorageContexts;
TypeRefinementContext *getCurrentTRC() {
return ContextStack.back().TRC;
}
void pushContext(TypeRefinementContext *TRC, ParentTy PopAfterNode) {
ContextInfo Info;
Info.TRC = TRC;
Info.ScopeNode = PopAfterNode;
ContextStack.push_back(Info);
}
public:
TypeRefinementContextBuilder(TypeRefinementContext *TRC, ASTContext &Context)
: Context(Context) {
assert(TRC);
pushContext(TRC, ParentTy());
}
void build(Decl *D) {
unsigned StackHeight = ContextStack.size();
D->walk(*this);
assert(ContextStack.size() == StackHeight);
(void)StackHeight;
}
void build(Stmt *S) {
unsigned StackHeight = ContextStack.size();
S->walk(*this);
assert(ContextStack.size() == StackHeight);
(void)StackHeight;
}
void build(Expr *E) {
unsigned StackHeight = ContextStack.size();
E->walk(*this);
assert(ContextStack.size() == StackHeight);
(void)StackHeight;
}
private:
bool walkToDeclPre(Decl *D) override {
TypeRefinementContext *DeclTRC = getNewContextForWalkOfDecl(D);
if (DeclTRC) {
pushContext(DeclTRC, D);
}
return true;
}
bool walkToDeclPost(Decl *D) override {
if (ContextStack.back().ScopeNode.getAsDecl() == D) {
ContextStack.pop_back();
}
return true;
}
/// Returns a new context to be introduced for the declaration, or nullptr
/// if no new context should be introduced.
TypeRefinementContext *getNewContextForWalkOfDecl(Decl *D) {
if (auto accessor = dyn_cast<AccessorDecl>(D)) {
// Use TRC of the storage rather the current TRC when walking this
// function.
auto it = StorageContexts.find(accessor->getStorage());
if (it != StorageContexts.end()) {
return it->second;
}
}
if (declarationIntroducesNewContext(D)) {
return buildDeclarationRefinementContext(D);
}
return nullptr;
}
/// Builds the type refinement hierarchy for the body of the function.
TypeRefinementContext *buildDeclarationRefinementContext(Decl *D) {
// We require a valid range in order to be able to query for the TRC
// corresponding to a given SourceLoc.
// If this assert fires, it means we have probably synthesized an implicit
// declaration without location information. The appropriate fix is
// probably to gin up a source range for the declaration when synthesizing
// it.
assert(D->getSourceRange().isValid());
// The potential versions in the declaration are constrained by both
// the declared availability of the declaration and the potential versions
// of its lexical context.
AvailabilityContext ExplicitDeclInfo =
swift::AvailabilityInference::availableRange(D, Context);
AvailabilityContext DeclInfo = ExplicitDeclInfo;
DeclInfo.intersectWith(getCurrentTRC()->getAvailabilityInfo());
TypeRefinementContext *NewTRC =
TypeRefinementContext::createForDecl(Context, D, getCurrentTRC(),
DeclInfo,
ExplicitDeclInfo,
refinementSourceRangeForDecl(D));
// Record the TRC for this storage declaration so that
// when we process the accessor, we can use this TRC as the
// parent.
if (auto *StorageDecl = dyn_cast<AbstractStorageDecl>(D)) {
if (StorageDecl->hasParsedAccessors()) {
StorageContexts[StorageDecl] = NewTRC;
}
}
return NewTRC;
}
/// Returns true if the declaration should introduce a new refinement context.
bool declarationIntroducesNewContext(Decl *D) {
if (!isa<ValueDecl>(D) && !isa<ExtensionDecl>(D)) {
return false;
}
// No need to introduce a context if the declaration does not have an
// availability attribute.
if (!hasActiveAvailableAttribute(D, Context)) {
return false;
}
// Only introduce for an AbstractStorageDecl if it is not local.
// We introduce for the non-local case because these may
// have getters and setters (and these may be synthesized, so they might
// not even exist yet).
if (auto *storageDecl = dyn_cast<AbstractStorageDecl>(D)) {
if (storageDecl->getDeclContext()->isLocalContext()) {
// No need to
return false;
}
}
return true;
}
/// Returns the source range which should be refined by declaration. This
/// provides a convenient place to specify the refined range when it is
/// different than the declaration's source range.
SourceRange refinementSourceRangeForDecl(Decl *D) {
if (auto *storageDecl = dyn_cast<AbstractStorageDecl>(D)) {
// Use the declaration's availability for the context when checking
// the bodies of its accessors.
// HACK: For synthesized trivial accessors we may have not a valid
// location for the end of the braces, so in that case we will fall back
// to using the range for the storage declaration. The right fix here is
// to update AbstractStorageDecl::addTrivialAccessors() to take brace
// locations and have callers of that method provide appropriate source
// locations.
SourceLoc BracesEnd = storageDecl->getBracesRange().End;
if (storageDecl->hasParsedAccessors() && BracesEnd.isValid()) {
return SourceRange(storageDecl->getStartLoc(),
BracesEnd);
}
// For a variable declaration (without accessors) we use the range of the
// containing pattern binding declaration to make sure that we include
// any type annotation in the type refinement context range.
if (auto varDecl = dyn_cast<VarDecl>(storageDecl)) {
auto *PBD = varDecl->getParentPatternBinding();
if (PBD)
return PBD->getSourceRange();
}
}
return D->getSourceRange();
}
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
if (auto *IS = dyn_cast<IfStmt>(S)) {
buildIfStmtRefinementContext(IS);
return std::make_pair(false, S);
}
if (auto *RS = dyn_cast<GuardStmt>(S)) {
buildGuardStmtRefinementContext(RS);
return std::make_pair(false, S);
}
if (auto *WS = dyn_cast<WhileStmt>(S)) {
buildWhileStmtRefinementContext(WS);
return std::make_pair(false, S);
}
return std::make_pair(true, S);
}
Stmt *walkToStmtPost(Stmt *S) override {
// If we have multiple guard statements in the same block
// then we may have multiple refinement contexts to pop
// after walking that block.
while (!ContextStack.empty() &&
ContextStack.back().ScopeNode.getAsStmt() == S) {
ContextStack.pop_back();
}
return S;
}
/// Builds the type refinement hierarchy for the IfStmt if the guard
/// introduces a new refinement context for the Then branch.
/// There is no need for the caller to explicitly traverse the children
/// of this node.
void buildIfStmtRefinementContext(IfStmt *IS) {
Optional<AvailabilityContext> ThenRange;
Optional<AvailabilityContext> ElseRange;
std::tie(ThenRange, ElseRange) =
buildStmtConditionRefinementContext(IS->getCond());
if (ThenRange.hasValue()) {
// Create a new context for the Then branch and traverse it in that new
// context.
auto *ThenTRC =
TypeRefinementContext::createForIfStmtThen(Context, IS,
getCurrentTRC(),
ThenRange.getValue());
TypeRefinementContextBuilder(ThenTRC, Context).build(IS->getThenStmt());
} else {
build(IS->getThenStmt());
}
Stmt *ElseStmt = IS->getElseStmt();
if (!ElseStmt)
return;
// Refine the else branch if we're given a version range for that branch.
// For now, if present, this will only be the empty range, indicating
// that the branch is dead. We use it to suppress potential unavailability
// and deprecation diagnostics on code that definitely will not run with
// the current platform and minimum deployment target.
// If we add a more precise version range lattice (i.e., one that can
// support "<") we should create non-empty contexts for the Else branch.
if (ElseRange.hasValue()) {
// Create a new context for the Then branch and traverse it in that new
// context.
auto *ElseTRC =
TypeRefinementContext::createForIfStmtElse(Context, IS,
getCurrentTRC(),
ElseRange.getValue());
TypeRefinementContextBuilder(ElseTRC, Context).build(ElseStmt);
} else {
build(IS->getElseStmt());
}
}
/// Builds the type refinement hierarchy for the WhileStmt if the guard
/// introduces a new refinement context for the body branch.
/// There is no need for the caller to explicitly traverse the children
/// of this node.
void buildWhileStmtRefinementContext(WhileStmt *WS) {
Optional<AvailabilityContext> BodyRange =
buildStmtConditionRefinementContext(WS->getCond()).first;
if (BodyRange.hasValue()) {
// Create a new context for the body and traverse it in the new
// context.
auto *BodyTRC = TypeRefinementContext::createForWhileStmtBody(
Context, WS, getCurrentTRC(), BodyRange.getValue());
TypeRefinementContextBuilder(BodyTRC, Context).build(WS->getBody());
} else {
build(WS->getBody());
}
}
/// Builds the type refinement hierarchy for the GuardStmt and pushes
/// the fallthrough context onto the context stack so that subsequent
/// AST elements in the same scope are analyzed in the context of the
/// fallthrough TRC.
void buildGuardStmtRefinementContext(GuardStmt *GS) {
// 'guard' statements fall through if all of the
// guard conditions are true, so we refine the range after the require
// until the end of the enclosing block.
// if ... {
// guard available(...) else { return } <-- Refined range starts here
// ...
// } <-- Refined range ends here
//
// This is slightly tricky because, unlike our other control constructs,
// the refined region is not lexically contained inside the construct
// introducing the refinement context.
Optional<AvailabilityContext> FallthroughRange;
Optional<AvailabilityContext> ElseRange;
std::tie(FallthroughRange, ElseRange) =
buildStmtConditionRefinementContext(GS->getCond());
if (Stmt *ElseBody = GS->getBody()) {
if (ElseRange.hasValue()) {
auto *TrueTRC = TypeRefinementContext::createForGuardStmtElse(
Context, GS, getCurrentTRC(), ElseRange.getValue());
TypeRefinementContextBuilder(TrueTRC, Context).build(ElseBody);
} else {
build(ElseBody);
}
}
auto *ParentBrace = dyn_cast<BraceStmt>(Parent.getAsStmt());
assert(ParentBrace && "Expected parent of GuardStmt to be BraceStmt");
if (!FallthroughRange.hasValue())
return;
// Create a new context for the fallthrough.
auto *FallthroughTRC =
TypeRefinementContext::createForGuardStmtFallthrough(Context, GS,
ParentBrace, getCurrentTRC(), FallthroughRange.getValue());
pushContext(FallthroughTRC, ParentBrace);
}
/// Build the type refinement context for a StmtCondition and return a pair
/// of optional version ranges, the first for the true branch and the second
/// for the false branch. A value of None for a given branch indicates that
/// the branch does not introduce a new refinement.
std::pair<Optional<AvailabilityContext>, Optional<AvailabilityContext>>
buildStmtConditionRefinementContext(StmtCondition Cond) {
// Any refinement contexts introduced in the statement condition
// will end at the end of the last condition element.
StmtConditionElement LastElement = Cond.back();
// Keep track of how many nested refinement contexts we have pushed on
// the context stack so we can pop them when we're done building the
// context for the StmtCondition.
unsigned NestedCount = 0;
// Tracks the potential version range when the condition is false.
auto FalseFlow = AvailabilityContext::neverAvailable();
TypeRefinementContext *StartingTRC = getCurrentTRC();
// Tracks if we're refining for availability or unavailability.
Optional<bool> isUnavailability = None;
for (StmtConditionElement Element : Cond) {
TypeRefinementContext *CurrentTRC = getCurrentTRC();
AvailabilityContext CurrentInfo = CurrentTRC->getAvailabilityInfo();
AvailabilityContext CurrentExplicitInfo =
CurrentTRC->getExplicitAvailabilityInfo();
// If the element is not a condition, walk it in the current TRC.
if (Element.getKind() != StmtConditionElement::CK_Availability) {
// Assume any condition element that is not a #available() can
// potentially be false, so conservatively combine the version
// range of the current context with the accumulated false flow
// of all other conjuncts.
FalseFlow.unionWith(CurrentInfo);
Element.walk(*this);
continue;
}
// #available query: introduce a new refinement context for the statement
// condition elements following it.
auto *Query = Element.getAvailability();
if (isUnavailability == None) {
isUnavailability = Query->isUnavailability();
} else if (isUnavailability != Query->isUnavailability()) {
// Mixing availability with unavailability in the same statement will
// cause the false flow's version range to be ambiguous. Report it.
//
// Technically we can support this by not refining ambiguous flows,
// but there are currently no legitimate cases where one would have
// to mix availability with unavailability.
Context.Diags.diagnose(Query->getLoc(),
diag::availability_cannot_be_mixed);
break;
}
// If this query expression has no queries, we will not introduce a new
// refinement context. We do not diagnose here: a diagnostic will already
// have been emitted by the parser.
// For #unavailable, empty queries are valid as wildcards are implied.
if (!Query->isUnavailability() && Query->getQueries().empty())
continue;
AvailabilitySpec *Spec = bestActiveSpecForQuery(Query);
if (!Spec) {
// We couldn't find an appropriate spec for the current platform,
// so rather than refining, emit a diagnostic and just use the current
// TRC.
Context.Diags.diagnose(
Query->getLoc(), diag::availability_query_required_for_platform,
platformString(targetPlatform(Context.LangOpts)));
continue;
}
AvailabilityContext NewConstraint = contextForSpec(Spec, false);
Query->setAvailableRange(contextForSpec(Spec, true).getOSVersion());
// When compiling zippered for macCatalyst, we need to collect both
// a macOS version (the target version) and an iOS/macCatalyst version
// (the target-variant). These versions will both be passed to a runtime
// entrypoint that will check either the macOS version or the iOS
// version depending on the kind of process this code is loaded into.
if (Context.LangOpts.TargetVariant) {
AvailabilitySpec *VariantSpec =
bestActiveSpecForQuery(Query, /*ForTargetVariant*/ true);
VersionRange VariantRange =
contextForSpec(VariantSpec, true).getOSVersion();
Query->setVariantAvailableRange(VariantRange);
}
if (Spec->getKind() == AvailabilitySpecKind::OtherPlatform) {
// The wildcard spec '*' represents the minimum deployment target, so
// there is no need to create a refinement context for this query.
// Further, we won't diagnose for useless #available() conditions
// where * matched on this platform -- presumably those conditions are
// needed for some other platform.
continue;
}
// If the explicitly-specified (via #availability) version range for the
// current TRC is completely contained in the range for the spec, then
// a version query can never be false, so the spec is useless.
// If so, report this.
if (CurrentExplicitInfo.isContainedIn(NewConstraint)) {
// Unavailability refinements are always "useless" from a symbol
// availability point of view, so only useless availability specs are
// reported.
if (isUnavailability.getValue()) {
continue;
}
DiagnosticEngine &Diags = Context.Diags;
if (CurrentTRC->getReason() != TypeRefinementContext::Reason::Root) {
PlatformKind BestPlatform = targetPlatform(Context.LangOpts);
auto *PlatformSpec =
dyn_cast<PlatformVersionConstraintAvailabilitySpec>(Spec);
// If possible, try to report the diagnostic in terms for the
// platform the user uttered in the '#available()'. For a platform
// that inherits availability from another platform it may be
// different from the platform specified in the target triple.
if (PlatformSpec)
BestPlatform = PlatformSpec->getPlatform();
Diags.diagnose(Query->getLoc(),
diag::availability_query_useless_enclosing_scope,
platformString(BestPlatform));
Diags.diagnose(CurrentTRC->getIntroductionLoc(),
diag::availability_query_useless_enclosing_scope_here);
}
}
if (CurrentInfo.isContainedIn(NewConstraint)) {
// No need to actually create the refinement context if we know it is
// useless.
continue;
}
// If the #available() is not useless then there is potential false flow,
// so join the false flow with the potential versions of the current
// context.
// We could be more precise here if we enriched the lattice to include
// ranges of the form [x, y).
FalseFlow.unionWith(CurrentInfo);
auto *TRC = TypeRefinementContext::createForConditionFollowingQuery(
Context, Query, LastElement, CurrentTRC, NewConstraint);
pushContext(TRC, ParentTy());
++NestedCount;
}
Optional<AvailabilityContext> FalseRefinement = None;
// The version range for the false branch should never have any versions
// that weren't possible when the condition started evaluating.
assert(FalseFlow.isContainedIn(StartingTRC->getAvailabilityInfo()));
// If the starting version range is not completely contained in the
// false flow version range then it must be the case that false flow range
// is strictly smaller than the starting range (because the false flow
// range *is* contained in the starting range), so we should introduce a
// new refinement for the false flow.
if (!StartingTRC->getAvailabilityInfo().isContainedIn(FalseFlow)) {
FalseRefinement = FalseFlow;
}
auto makeResult =
[isUnavailability](Optional<AvailabilityContext> TrueRefinement,
Optional<AvailabilityContext> FalseRefinement) {
if (isUnavailability.hasValue() && isUnavailability.getValue()) {
// If this is an unavailability check, invert the result.
return std::make_pair(FalseRefinement, TrueRefinement);
}
return std::make_pair(TrueRefinement, FalseRefinement);
};
if (NestedCount == 0)
return makeResult(None, FalseRefinement);
TypeRefinementContext *NestedTRC = getCurrentTRC();
while (NestedCount-- > 0)
ContextStack.pop_back();
assert(getCurrentTRC() == StartingTRC);
return makeResult(NestedTRC->getAvailabilityInfo(), FalseRefinement);
}
/// Return the best active spec for the target platform or nullptr if no
/// such spec exists.
AvailabilitySpec *bestActiveSpecForQuery(PoundAvailableInfo *available,
bool forTargetVariant = false) {
OtherPlatformAvailabilitySpec *FoundOtherSpec = nullptr;
PlatformVersionConstraintAvailabilitySpec *BestSpec = nullptr;
for (auto *Spec : available->getQueries()) {
if (auto *OtherSpec = dyn_cast<OtherPlatformAvailabilitySpec>(Spec)) {
FoundOtherSpec = OtherSpec;
continue;
}
auto *VersionSpec =
dyn_cast<PlatformVersionConstraintAvailabilitySpec>(Spec);
if (!VersionSpec)
continue;
// FIXME: This is not quite right: we want to handle AppExtensions
// properly. For example, on the OSXApplicationExtension platform
// we want to chose the OS X spec unless there is an explicit
// OSXApplicationExtension spec.
if (isPlatformActive(VersionSpec->getPlatform(), Context.LangOpts,
forTargetVariant)) {
if (!BestSpec ||
inheritsAvailabilityFromPlatform(VersionSpec->getPlatform(),
BestSpec->getPlatform())) {
BestSpec = VersionSpec;
}
}
}
if (BestSpec)
return BestSpec;
// If we have reached this point, we found no spec for our target, so
// we return the other spec ('*'), if we found it, or nullptr, if not.
if (FoundOtherSpec) {
return FoundOtherSpec;
} else if (available->isUnavailability()) {
// For #unavailable, imply the presence of a wildcard.
SourceLoc Loc = available->getRParenLoc();
return new (Context) OtherPlatformAvailabilitySpec(Loc);
} else {
return nullptr;
}
}
/// Return the availability context for the given spec.
AvailabilityContext contextForSpec(AvailabilitySpec *Spec,
bool GetRuntimeContext) {
if (isa<OtherPlatformAvailabilitySpec>(Spec)) {
return AvailabilityContext::alwaysAvailable();
}
auto *VersionSpec = cast<PlatformVersionConstraintAvailabilitySpec>(Spec);
llvm::VersionTuple Version = (GetRuntimeContext ?
VersionSpec->getRuntimeVersion() :
VersionSpec->getVersion());
return AvailabilityContext(VersionRange::allGTE(Version));
}
Expr *walkToExprPost(Expr *E) override {
if (ContextStack.back().ScopeNode.getAsExpr() == E) {
ContextStack.pop_back();
}
return E;
}
};
} // end anonymous namespace
void TypeChecker::buildTypeRefinementContextHierarchy(SourceFile &SF) {
TypeRefinementContext *RootTRC = SF.getTypeRefinementContext();
ASTContext &Context = SF.getASTContext();
if (!RootTRC) {
// The root type refinement context reflects the fact that all parts of
// the source file are guaranteed to be executing on at least the minimum
// platform version.
auto MinPlatformReq = AvailabilityContext::forDeploymentTarget(Context);
RootTRC = TypeRefinementContext::createRoot(&SF, MinPlatformReq);
SF.setTypeRefinementContext(RootTRC);
}
// Build refinement contexts, if necessary, for all declarations starting
// with StartElem.
TypeRefinementContextBuilder Builder(RootTRC, Context);
for (auto D : SF.getTopLevelDecls()) {
Builder.build(D);
}
}
void TypeChecker::buildTypeRefinementContextHierarchyDelayed(SourceFile &SF, AbstractFunctionDecl *AFD) {
// If there's no TRC for the file, we likely don't want this one either.
// RootTRC is not set when availability checking is disabled.
TypeRefinementContext *RootTRC = SF.getTypeRefinementContext();
if(!RootTRC)
return;
if (AFD->getBodyKind() != AbstractFunctionDecl::BodyKind::Unparsed)
return;
// Parse the function body.
AFD->getBody(/*canSynthesize=*/true);
// Build the refinement context for the function body.
ASTContext &Context = SF.getASTContext();
auto LocalTRC = RootTRC->findMostRefinedSubContext(AFD->getLoc(), Context.SourceMgr);
TypeRefinementContextBuilder Builder(LocalTRC, Context);
Builder.build(AFD);
}
TypeRefinementContext *
TypeChecker::getOrBuildTypeRefinementContext(SourceFile *SF) {
TypeRefinementContext *TRC = SF->getTypeRefinementContext();
if (!TRC) {
buildTypeRefinementContextHierarchy(*SF);
TRC = SF->getTypeRefinementContext();
}
return TRC;
}
AvailabilityContext
TypeChecker::overApproximateAvailabilityAtLocation(SourceLoc loc,
const DeclContext *DC,
const TypeRefinementContext **MostRefined) {
SourceFile *SF = DC->getParentSourceFile();
auto &Context = DC->getASTContext();
// If our source location is invalid (this may be synthesized code), climb
// the decl context hierarchy until we find a location that is valid,
// collecting availability ranges on the way up.
// We will combine the version ranges from these annotations
// with the TRC for the valid location to overapproximate the running
// OS versions at the original source location.
// Because we are climbing DeclContexts we will miss refinement contexts in
// synthesized code that are introduced by AST elements that are themselves
// not DeclContexts, such as #available(..) and property declarations.
// That is, a reference with an invalid location that is contained
// inside a #available() and with no intermediate DeclContext will not be
// refined. For now, this is fine -- but if we ever synthesize #available(),
// this will be a real problem.
// We can assume we are running on at least the minimum deployment target.
auto OverApproximateContext =
AvailabilityContext::forDeploymentTarget(Context);
auto isInvalidLoc = [SF](SourceLoc loc) {
return SF ? loc.isInvalid() : true;
};
while (DC && isInvalidLoc(loc)) {
const Decl *D = DC->getInnermostDeclarationDeclContext();
if (!D)
break;
loc = D->getLoc();
Optional<AvailabilityContext> Info =
AvailabilityInference::annotatedAvailableRange(D, Context);
if (Info.hasValue()) {
OverApproximateContext.constrainWith(Info.getValue());
}
DC = D->getDeclContext();
}
if (SF && loc.isValid()) {
TypeRefinementContext *rootTRC = getOrBuildTypeRefinementContext(SF);
TypeRefinementContext *TRC =
rootTRC->findMostRefinedSubContext(loc, Context.SourceMgr);
OverApproximateContext.constrainWith(TRC->getAvailabilityInfo());
if (MostRefined) {
*MostRefined = TRC;
}
}
return OverApproximateContext;
}
bool TypeChecker::isDeclarationUnavailable(
const Decl *D, const DeclContext *referenceDC,
llvm::function_ref<AvailabilityContext()> getAvailabilityContext) {
ASTContext &Context = referenceDC->getASTContext();
if (Context.LangOpts.DisableAvailabilityChecking) {
return false;
}
if (!referenceDC->getParentSourceFile()) {
// We only check availability if this reference is in a source file; we do
// not check in other kinds of FileUnits.
return false;
}
AvailabilityContext safeRangeUnderApprox{
AvailabilityInference::availableRange(D, Context)};
if (safeRangeUnderApprox.isAlwaysAvailable())
return false;
AvailabilityContext runningOSOverApprox = getAvailabilityContext();
// The reference is safe if an over-approximation of the running OS
// versions is fully contained within an under-approximation
// of the versions on which the declaration is available. If this
// containment cannot be guaranteed, we say the reference is
// not available.
return !runningOSOverApprox.isContainedIn(safeRangeUnderApprox);
}
Optional<UnavailabilityReason>
TypeChecker::checkDeclarationAvailability(const Decl *D,
const ExportContext &Where) {
if (isDeclarationUnavailable(D, Where.getDeclContext(), [&Where] {
return Where.getAvailabilityContext();
})) {
auto &Context = Where.getDeclContext()->getASTContext();
AvailabilityContext safeRangeUnderApprox{
AvailabilityInference::availableRange(D, Context)};
VersionRange version = safeRangeUnderApprox.getOSVersion();
return UnavailabilityReason::requiresVersionRange(version);
}
return None;
}
Optional<UnavailabilityReason>
TypeChecker::checkConformanceAvailability(const RootProtocolConformance *conf,
const ExtensionDecl *ext,
const ExportContext &where) {
return checkDeclarationAvailability(ext, where);
}
/// A class that walks the AST to find the innermost (i.e., deepest) node that
/// contains a target SourceRange and matches a particular criterion.
/// This class finds the innermost nodes of interest by walking
/// down the root until it has found the target range (in a Pre-visitor)
/// and then recording the innermost node on the way back up in the
/// Post-visitors. It does its best to not search unnecessary subtrees,
/// although this is complicated by the fact that not all nodes have
/// source range information.
class InnermostAncestorFinder : private ASTWalker {
public:
/// The type of a match predicate, which takes as input a node and its
/// parent and returns a bool indicating whether the node matches.
using MatchPredicate = std::function<bool(ASTNode, ASTWalker::ParentTy)>;
private:
const SourceRange TargetRange;
const SourceManager &SM;
const MatchPredicate Predicate;
bool FoundTarget = false;
Optional<ASTNode> InnermostMatchingNode;
public:
InnermostAncestorFinder(SourceRange TargetRange, const SourceManager &SM,
ASTNode SearchNode, const MatchPredicate &Predicate)
: TargetRange(TargetRange), SM(SM), Predicate(Predicate) {
assert(TargetRange.isValid());
SearchNode.walk(*this);
}
/// Returns the innermost node containing the target range that matches
/// the predicate.
Optional<ASTNode> getInnermostMatchingNode() { return InnermostMatchingNode; }
std::pair<bool, Expr *> walkToExprPre(Expr *E) override {
return std::make_pair(walkToRangePre(E->getSourceRange()), E);
}
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
return std::make_pair(walkToRangePre(S->getSourceRange()), S);
}
bool walkToDeclPre(Decl *D) override {
return walkToRangePre(D->getSourceRange());
}
std::pair<bool, Pattern *> walkToPatternPre(Pattern *P) override {
return std::make_pair(walkToRangePre(P->getSourceRange()), P);
}
bool walkToTypeReprPre(TypeRepr *T) override {
return walkToRangePre(T->getSourceRange());
}
/// Returns true if the walker should traverse an AST node with
/// source range Range.
bool walkToRangePre(SourceRange Range) {
// When walking down the tree, we traverse until we have found a node
// inside the target range. Once we have found such a node, there is no
// need to traverse any deeper.
if (FoundTarget)
return false;
// If we haven't found our target yet and the node we are pre-visiting
// doesn't have a valid range, we still have to traverse it because its
// subtrees may have valid ranges.
if (Range.isInvalid())
return true;
// We have found our target if the range of the node we are visiting
// is contained in the range we are looking for.
FoundTarget = SM.rangeContains(TargetRange, Range);
if (FoundTarget)
return false;
// Search the subtree if the target range is inside its range.
return SM.rangeContains(Range, TargetRange);
}
Expr *walkToExprPost(Expr *E) override {
if (walkToNodePost(E)) {
return E;
}
return nullptr;
}
Stmt *walkToStmtPost(Stmt *S) override {
if (walkToNodePost(S)) {
return S;
}
return nullptr;
}
bool walkToDeclPost(Decl *D) override {
return walkToNodePost(D);
}
/// Once we have found the target node, look for the innermost ancestor
/// matching our criteria on the way back up the spine of the tree.
bool walkToNodePost(ASTNode Node) {
if (!InnermostMatchingNode.hasValue() && Predicate(Node, Parent)) {
assert(Node.getSourceRange().isInvalid() ||
SM.rangeContains(Node.getSourceRange(), TargetRange));
InnermostMatchingNode = Node;
return false;
}
return true;
}
};
/// Starting from SearchRoot, finds the innermost node containing ChildRange
/// for which Predicate returns true. Returns None if no such root is found.
static Optional<ASTNode> findInnermostAncestor(
SourceRange ChildRange, const SourceManager &SM, ASTNode SearchRoot,
const InnermostAncestorFinder::MatchPredicate &Predicate) {
InnermostAncestorFinder Finder(ChildRange, SM, SearchRoot, Predicate);
return Finder.getInnermostMatchingNode();
}
/// Given a reference range and a declaration context containing the range,
/// attempt to find a declaration containing the reference. This may not
/// be the innermost declaration containing the range.
/// Returns null if no such declaration can be found.
static const Decl *findContainingDeclaration(SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
const SourceManager &SM) {
auto ContainsReferenceRange = [&](const Decl *D) -> bool {
if (ReferenceRange.isInvalid())
return false;
// Members of an active #if are represented both inside the
// IfConfigDecl and in the enclosing context. Skip over the IfConfigDecl
// so that that the member declaration is found rather the #if itself.
if (isa<IfConfigDecl>(D))
return false;
return SM.rangeContains(D->getSourceRange(), ReferenceRange);
};
if (const Decl *D = ReferenceDC->getInnermostDeclarationDeclContext()) {
// If we have an inner declaration context, see if we can narrow the search
// down to one of its members. This is important for properties, which don't
// count as DeclContexts of their own but which can still introduce
// availability.
if (auto *IDC = dyn_cast<IterableDeclContext>(D)) {
auto BestMember = llvm::find_if(IDC->getMembers(),
ContainsReferenceRange);
if (BestMember != IDC->getMembers().end())
return *BestMember;
}
return D;
}
// We couldn't find a suitable node by climbing the DeclContext hierarchy, so
// fall back to looking for a top-level declaration that contains the
// reference range. We will hit this case for top-level elements that do not
// themselves introduce DeclContexts, such as global variables. If we don't
// have a reference range, there is nothing we can do, so return null.
if (ReferenceRange.isInvalid())
return nullptr;
SourceFile *SF = ReferenceDC->getParentSourceFile();
if (!SF)
return nullptr;
auto BestTopLevelDecl = llvm::find_if(SF->getTopLevelDecls(),
ContainsReferenceRange);
if (BestTopLevelDecl != SF->getTopLevelDecls().end())
return *BestTopLevelDecl;
return nullptr;
}
/// Given a declaration that allows availability attributes in the abstract
/// syntax tree, return the declaration upon which the declaration would
/// appear in concrete syntax. This function is necessary because for semantic
/// analysis, the parser attaches attributes to declarations other
/// than those on which they, concretely, appear. For these declarations (enum
/// cases and variable declarations) a Fix-It for an added availability
/// attribute should be suggested for the appropriate concrete location.
static const Decl *
concreteSyntaxDeclForAvailableAttribute(const Decl *AbstractSyntaxDecl) {
// This function needs to be kept in sync with its counterpart,
// abstractSyntaxDeclForAvailableAttribute().
// The source range for VarDecls does not include 'var ' (and, in any
// event, multiple variables can be introduced with a single 'var'),
// so suggest adding an attribute to the PatterningBindingDecl instead.
if (auto *VD = dyn_cast<VarDecl>(AbstractSyntaxDecl)) {
return VD->getParentPatternBinding();
}
// Similarly suggest applying the Fix-It to the parent enum case rather than
// the enum element.
if (auto *EE = dyn_cast<EnumElementDecl>(AbstractSyntaxDecl)) {
return EE->getParentCase();
}
return AbstractSyntaxDecl;
}
/// Given a declaration upon which an availability attribute would appear in
/// concrete syntax, return a declaration to which the parser
/// actually attaches the attribute in the abstract syntax tree. We use this
/// function to determine whether the concrete syntax already has an
/// availability attribute.
static const Decl *
abstractSyntaxDeclForAvailableAttribute(const Decl *ConcreteSyntaxDecl) {
// This function needs to be kept in sync with its counterpart,
// concreteSyntaxDeclForAvailableAttribute().
if (auto *PBD = dyn_cast<PatternBindingDecl>(ConcreteSyntaxDecl)) {
// Existing @available attributes in the AST are attached to VarDecls
// rather than PatternBindingDecls, so we return the first VarDecl for
// the pattern binding declaration.
// This is safe, even though there may be multiple VarDecls, because
// all parsed attribute that appear in the concrete syntax upon on the
// PatternBindingDecl are added to all of the VarDecls for the pattern
// binding.
if (PBD->getNumPatternEntries() != 0) {
return PBD->getAnchoringVarDecl(0);
}
} else if (auto *ECD = dyn_cast<EnumCaseDecl>(ConcreteSyntaxDecl)) {
// Similar to the PatternBindingDecl case above, we return the
// first EnumElementDecl.
ArrayRef<EnumElementDecl *> Elems = ECD->getElements();
if (!Elems.empty()) {
return Elems.front();
}
}
return ConcreteSyntaxDecl;
}
/// Given a declaration, return a better related declaration for which
/// to suggest an @available fixit, or the original declaration
/// if no such related declaration exists.
static const Decl *relatedDeclForAvailabilityFixit(const Decl *D) {
if (auto *accessor = dyn_cast<AccessorDecl>(D)) {
// Suggest @available Fix-Its on property rather than individual
// accessors.
D = accessor->getStorage();
}
return abstractSyntaxDeclForAvailableAttribute(D);
}
/// Walk the DeclContext hierarchy starting from D to find a declaration
/// at the member level (i.e., declared in a type context) on which to provide
/// an @available() Fix-It.
static const Decl *ancestorMemberLevelDeclForAvailabilityFixit(const Decl *D) {
while (D) {
D = relatedDeclForAvailabilityFixit(D);
if (!D->isImplicit() &&
D->getDeclContext()->isTypeContext() &&
DeclAttribute::canAttributeAppearOnDecl(DeclAttrKind::DAK_Available,
D)) {
break;
}
D = cast_or_null<AbstractFunctionDecl>(
D->getDeclContext()->getInnermostMethodContext());
}
return D;
}
/// Returns true if the declaration is at the type level (either a nominal
/// type, an extension, or a global function) and can support an @available
/// attribute.
static bool isTypeLevelDeclForAvailabilityFixit(const Decl *D) {
if (!DeclAttribute::canAttributeAppearOnDecl(DeclAttrKind::DAK_Available,
D)) {
return false;
}
if (isa<ExtensionDecl>(D) || isa<NominalTypeDecl>(D)) {
return true;
}
bool IsModuleScopeContext = D->getDeclContext()->isModuleScopeContext();
// We consider global functions to be "type level"
if (isa<FuncDecl>(D)) {
return IsModuleScopeContext;
}
if (auto *VD = dyn_cast<VarDecl>(D)) {
if (!IsModuleScopeContext)
return false;
if (PatternBindingDecl *PBD = VD->getParentPatternBinding()) {
return PBD->getDeclContext()->isModuleScopeContext();
}
}
return false;
}
/// Walk the DeclContext hierarchy starting from D to find a declaration
/// at a member level (i.e., declared in a type context) on which to provide an
/// @available() Fix-It.
static const Decl *ancestorTypeLevelDeclForAvailabilityFixit(const Decl *D) {
assert(D);
D = relatedDeclForAvailabilityFixit(D);
while (D && !isTypeLevelDeclForAvailabilityFixit(D)) {
D = D->getDeclContext()->getInnermostDeclarationDeclContext();
}
return D;
}
/// Given the range of a reference to an unavailable symbol and the
/// declaration context containing the reference, make a best effort find up to
/// three locations for potential fixits.
///
/// \param FoundVersionCheckNode Returns a node that can be wrapped in a
/// if #available(...) { ... } version check to fix the unavailable reference,
/// or None if such a node cannot be found.
///
/// \param FoundMemberLevelDecl Returns member-level declaration (i.e., the
/// child of a type DeclContext) for which an @available attribute would
/// fix the unavailable reference.
///
/// \param FoundTypeLevelDecl returns a type-level declaration (a
/// a nominal type, an extension, or a global function) for which an
/// @available attribute would fix the unavailable reference.
static void findAvailabilityFixItNodes(SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
const SourceManager &SM,
Optional<ASTNode> &FoundVersionCheckNode,
const Decl *&FoundMemberLevelDecl,
const Decl *&FoundTypeLevelDecl) {
FoundVersionCheckNode = None;
FoundMemberLevelDecl = nullptr;
FoundTypeLevelDecl = nullptr;
// Limit tree to search based on the DeclContext of the reference.
const Decl *DeclarationToSearch =
findContainingDeclaration(ReferenceRange, ReferenceDC, SM);
if (!DeclarationToSearch)
return;
// Const-cast to inject into ASTNode. This search will not modify
// the declaration.
ASTNode SearchRoot = const_cast<Decl *>(DeclarationToSearch);
// The node to wrap in if #available(...) { ... } is the innermost node in
// SearchRoot that (1) can be guarded with an if statement and (2)
// contains the ReferenceRange.
// We make no guarantee that the Fix-It, when applied, will result in
// semantically valid code -- but, at a minimum, it should parse. So,
// for example, we may suggest wrapping a variable declaration in a guard,
// which would not be valid if the variable is later used. The goal
// is discoverability of #os() (via the diagnostic and Fix-It) rather than
// magically fixing the code in all cases.
InnermostAncestorFinder::MatchPredicate IsGuardable =
[](ASTNode Node, ASTWalker::ParentTy Parent) {
if (Expr *ParentExpr = Parent.getAsExpr()) {
auto *ParentClosure = dyn_cast<ClosureExpr>(ParentExpr);
if (!ParentClosure ||
ParentClosure->isSeparatelyTypeChecked()) {
return false;
}
} else if (auto *ParentStmt = Parent.getAsStmt()) {
if (!isa<BraceStmt>(ParentStmt)) {
return false;
}
} else {
return false;
}
return true;
};
FoundVersionCheckNode =
findInnermostAncestor(ReferenceRange, SM, SearchRoot, IsGuardable);
// Try to find declarations on which @available attributes can be added.
// The heuristics for finding these declarations are biased towards deeper
// nodes in the AST to limit the scope of suggested availability regions
// and provide a better IDE experience (it can get jumpy if Fix-It locations
// are far away from the error needing the Fix-It).
if (DeclarationToSearch) {
FoundMemberLevelDecl =
ancestorMemberLevelDeclForAvailabilityFixit(DeclarationToSearch);
FoundTypeLevelDecl =
ancestorTypeLevelDeclForAvailabilityFixit(DeclarationToSearch);
}
}
/// Emit a diagnostic note and Fix-It to add an @available attribute
/// on the given declaration for the given version range.
static void fixAvailabilityForDecl(SourceRange ReferenceRange, const Decl *D,
const VersionRange &RequiredRange,
ASTContext &Context) {
assert(D);
// Don't suggest adding an @available() to a declaration where we would
// emit a diagnostic saying it is not allowed.
if (TypeChecker::diagnosticIfDeclCannotBePotentiallyUnavailable(D).hasValue())
return;
if (getActiveAvailableAttribute(D, Context)) {
// For QoI, in future should emit a fixit to update the existing attribute.
return;
}
// For some declarations (variables, enum elements), the location in concrete
// syntax to suggest the Fix-It may differ from the declaration to which
// we attach availability attributes in the abstract syntax tree during
// parsing.
const Decl *ConcDecl = concreteSyntaxDeclForAvailableAttribute(D);
DescriptiveDeclKind KindForDiagnostic = ConcDecl->getDescriptiveKind();
SourceLoc InsertLoc;
// To avoid exposing the pattern binding declaration to the user, get the
// descriptive kind from one of the VarDecls. We get the Fix-It location
// from the PatternBindingDecl unless the VarDecl has attributes,
// in which case we get the start location of the VarDecl attributes.
DeclAttributes AttrsForLoc;
if (KindForDiagnostic == DescriptiveDeclKind::PatternBinding) {
KindForDiagnostic = D->getDescriptiveKind();
AttrsForLoc = D->getAttrs();
} else {
InsertLoc = ConcDecl->getAttrs().getStartLoc(/*forModifiers=*/false);
}
InsertLoc = D->getAttrs().getStartLoc(/*forModifiers=*/false);
if (InsertLoc.isInvalid()) {
InsertLoc = ConcDecl->getStartLoc();
}
if (InsertLoc.isInvalid())
return;
StringRef OriginalIndent =
Lexer::getIndentationForLine(Context.SourceMgr, InsertLoc);
PlatformKind Target = targetPlatform(Context.LangOpts);
D->diagnose(diag::availability_add_attribute, KindForDiagnostic)
.fixItInsert(InsertLoc, diag::insert_available_attr,
platformString(Target),
RequiredRange.getLowerEndpoint().getAsString(),
OriginalIndent);
}
/// In the special case of being in an existing, nontrivial type refinement
/// context that's close but not quite narrow enough to satisfy requirements
/// (i.e. requirements are contained-in the existing TRC but off by a subminor
/// version), emit a diagnostic and fixit that narrows the existing TRC
/// condition to the required range.
static bool fixAvailabilityByNarrowingNearbyVersionCheck(
SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
const VersionRange &RequiredRange,
ASTContext &Context,
InFlightDiagnostic &Err) {
const TypeRefinementContext *TRC = nullptr;
AvailabilityContext RunningOSOverApprox =
TypeChecker::overApproximateAvailabilityAtLocation(ReferenceRange.Start,
ReferenceDC, &TRC);
VersionRange RunningRange = RunningOSOverApprox.getOSVersion();
if (RunningRange.hasLowerEndpoint() &&
RequiredRange.hasLowerEndpoint() &&
AvailabilityContext(RequiredRange).isContainedIn(RunningOSOverApprox) &&
TRC && TRC->getReason() != TypeRefinementContext::Reason::Root) {
// Only fix situations that are "nearby" versions, meaning
// disagreement on a minor-or-less version for non-macOS,
// or disagreement on a subminor-or-less version for macOS.
auto RunningVers = RunningRange.getLowerEndpoint();
auto RequiredVers = RequiredRange.getLowerEndpoint();
auto Platform = targetPlatform(Context.LangOpts);
if (RunningVers.getMajor() != RequiredVers.getMajor())
return false;
if ((Platform == PlatformKind::macOS ||
Platform == PlatformKind::macOSApplicationExtension) &&
!(RunningVers.getMinor().hasValue() &&
RequiredVers.getMinor().hasValue() &&
RunningVers.getMinor().getValue() ==
RequiredVers.getMinor().getValue()))
return false;
auto FixRange = TRC->getAvailabilityConditionVersionSourceRange(
Platform, RunningVers);
if (!FixRange.isValid())
return false;
// Have found a nontrivial type refinement context-introducer to narrow.
Err.fixItReplace(FixRange, RequiredVers.getAsString());
return true;
}
return false;
}
/// Emit a diagnostic note and Fix-It to add an if #available(...) { } guard
/// that checks for the given version range around the given node.
static void fixAvailabilityByAddingVersionCheck(
ASTNode NodeToWrap, const VersionRange &RequiredRange,
SourceRange ReferenceRange, ASTContext &Context) {
SourceRange RangeToWrap = NodeToWrap.getSourceRange();
if (RangeToWrap.isInvalid())
return;
SourceLoc ReplaceLocStart = RangeToWrap.Start;
StringRef ExtraIndent;
StringRef OriginalIndent = Lexer::getIndentationForLine(
Context.SourceMgr, ReplaceLocStart, &ExtraIndent);
std::string IfText;
{
llvm::raw_string_ostream Out(IfText);
SourceLoc ReplaceLocEnd =
Lexer::getLocForEndOfToken(Context.SourceMgr, RangeToWrap.End);
std::string GuardedText =
Context.SourceMgr.extractText(CharSourceRange(Context.SourceMgr,
ReplaceLocStart,
ReplaceLocEnd)).str();
std::string NewLine = "\n";
std::string NewLineReplacement = (NewLine + ExtraIndent).str();
// Indent the body of the Fix-It if. Because the body may be a compound
// statement, we may have to indent multiple lines.
size_t StartAt = 0;
while ((StartAt = GuardedText.find(NewLine, StartAt)) !=
std::string::npos) {
GuardedText.replace(StartAt, NewLine.length(), NewLineReplacement);
StartAt += NewLine.length();
}
PlatformKind Target = targetPlatform(Context.LangOpts);
Out << "if #available(" << platformString(Target)
<< " " << RequiredRange.getLowerEndpoint().getAsString()
<< ", *) {\n";
Out << OriginalIndent << ExtraIndent << GuardedText << "\n";
// We emit an empty fallback case with a comment to encourage the developer
// to think explicitly about whether fallback on earlier versions is needed.
Out << OriginalIndent << "} else {\n";
Out << OriginalIndent << ExtraIndent << "// Fallback on earlier versions\n";
Out << OriginalIndent << "}";
}
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_guard_with_version_check)
.fixItReplace(RangeToWrap, IfText);
}
/// Emit suggested Fix-Its for a reference with to an unavailable symbol
/// requiting the given OS version range.
static void fixAvailability(SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
const VersionRange &RequiredRange,
ASTContext &Context) {
if (ReferenceRange.isInvalid())
return;
Optional<ASTNode> NodeToWrapInVersionCheck;
const Decl *FoundMemberDecl = nullptr;
const Decl *FoundTypeLevelDecl = nullptr;
findAvailabilityFixItNodes(ReferenceRange, ReferenceDC, Context.SourceMgr,
NodeToWrapInVersionCheck, FoundMemberDecl,
FoundTypeLevelDecl);
// Suggest wrapping in if #available(...) { ... } if possible.
if (NodeToWrapInVersionCheck.hasValue()) {
fixAvailabilityByAddingVersionCheck(NodeToWrapInVersionCheck.getValue(),
RequiredRange, ReferenceRange, Context);
}
// Suggest adding availability attributes.
if (FoundMemberDecl) {
fixAvailabilityForDecl(ReferenceRange, FoundMemberDecl, RequiredRange,
Context);
}
if (FoundTypeLevelDecl) {
fixAvailabilityForDecl(ReferenceRange, FoundTypeLevelDecl, RequiredRange,
Context);
}
}
void TypeChecker::diagnosePotentialOpaqueTypeUnavailability(
SourceRange ReferenceRange, const DeclContext *ReferenceDC,
const UnavailabilityReason &Reason) {
ASTContext &Context = ReferenceDC->getASTContext();
auto RequiredRange = Reason.getRequiredOSVersionRange();
{
auto Err =
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_opaque_types_only_version_newer,
prettyPlatformString(targetPlatform(Context.LangOpts)),
Reason.getRequiredOSVersionRange().getLowerEndpoint());
// Direct a fixit to the error if an existing guard is nearly-correct
if (fixAvailabilityByNarrowingNearbyVersionCheck(ReferenceRange,
ReferenceDC,
RequiredRange, Context, Err))
return;
}
fixAvailability(ReferenceRange, ReferenceDC, RequiredRange, Context);
}
static void diagnosePotentialConcurrencyUnavailability(
SourceRange ReferenceRange, const DeclContext *ReferenceDC,
const UnavailabilityReason &Reason) {
ASTContext &Context = ReferenceDC->getASTContext();
auto RequiredRange = Reason.getRequiredOSVersionRange();
{
auto Err =
Context.Diags.diagnose(
ReferenceRange.Start,
diag::availability_concurrency_only_version_newer,
prettyPlatformString(targetPlatform(Context.LangOpts)),
Reason.getRequiredOSVersionRange().getLowerEndpoint());
// Direct a fixit to the error if an existing guard is nearly-correct
if (fixAvailabilityByNarrowingNearbyVersionCheck(ReferenceRange,
ReferenceDC,
RequiredRange, Context, Err))
return;
}
fixAvailability(ReferenceRange, ReferenceDC, RequiredRange, Context);
}
void TypeChecker::checkConcurrencyAvailability(SourceRange ReferenceRange,
const DeclContext *ReferenceDC) {
// Check the availability of concurrency runtime support.
ASTContext &ctx = ReferenceDC->getASTContext();
if (ctx.LangOpts.DisableAvailabilityChecking)
return;
if (!shouldCheckAvailability(ReferenceDC->getAsDecl()))
return;
auto runningOS =
TypeChecker::overApproximateAvailabilityAtLocation(
ReferenceRange.Start, ReferenceDC);
auto availability = ctx.getBackDeployedConcurrencyAvailability();
if (!runningOS.isContainedIn(availability)) {
diagnosePotentialConcurrencyUnavailability(
ReferenceRange, ReferenceDC,
UnavailabilityReason::requiresVersionRange(availability.getOSVersion()));
}
}
void TypeChecker::diagnosePotentialUnavailability(
const ValueDecl *D, SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
const UnavailabilityReason &Reason) {
ASTContext &Context = ReferenceDC->getASTContext();
auto RequiredRange = Reason.getRequiredOSVersionRange();
{
auto Err =
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_decl_only_version_newer,
D->getName(), prettyPlatformString(targetPlatform(Context.LangOpts)),
Reason.getRequiredOSVersionRange().getLowerEndpoint());
// Direct a fixit to the error if an existing guard is nearly-correct
if (fixAvailabilityByNarrowingNearbyVersionCheck(ReferenceRange,
ReferenceDC,
RequiredRange, Context, Err))
return;
}
fixAvailability(ReferenceRange, ReferenceDC, RequiredRange, Context);
}
void TypeChecker::diagnosePotentialAccessorUnavailability(
const AccessorDecl *Accessor, SourceRange ReferenceRange,
const DeclContext *ReferenceDC, const UnavailabilityReason &Reason,
bool ForInout) {
ASTContext &Context = ReferenceDC->getASTContext();
assert(Accessor->isGetterOrSetter());
const AbstractStorageDecl *ASD = Accessor->getStorage();
DeclName Name = ASD->getName();
auto &diag = ForInout ? diag::availability_inout_accessor_only_version_newer
: diag::availability_accessor_only_version_newer;
auto RequiredRange = Reason.getRequiredOSVersionRange();
{
auto Err =
Context.Diags.diagnose(
ReferenceRange.Start, diag,
static_cast<unsigned>(Accessor->getAccessorKind()), Name,
prettyPlatformString(targetPlatform(Context.LangOpts)),
Reason.getRequiredOSVersionRange().getLowerEndpoint());
// Direct a fixit to the error if an existing guard is nearly-correct
if (fixAvailabilityByNarrowingNearbyVersionCheck(ReferenceRange,
ReferenceDC,
RequiredRange, Context, Err))
return;
}
fixAvailability(ReferenceRange, ReferenceDC, RequiredRange, Context);
}
static DiagnosticBehavior
behaviorLimitForExplicitUnavailability(
const RootProtocolConformance *rootConf,
const DeclContext *fromDC) {
auto protoDecl = rootConf->getProtocol();
// Soften errors about unavailable `Sendable` conformances depending on the
// concurrency checking mode.
if (protoDecl->isSpecificProtocol(KnownProtocolKind::Sendable)) {
SendableCheckContext checkContext(fromDC);
if (auto nominal = rootConf->getType()->getAnyNominal())
return checkContext.diagnosticBehavior(nominal);
return checkContext.defaultDiagnosticBehavior();
}
return DiagnosticBehavior::Unspecified;
}
void TypeChecker::diagnosePotentialUnavailability(
const RootProtocolConformance *rootConf,
const ExtensionDecl *ext,
SourceLoc loc,
const DeclContext *dc,
const UnavailabilityReason &reason) {
ASTContext &ctx = dc->getASTContext();
auto requiredRange = reason.getRequiredOSVersionRange();
{
auto type = rootConf->getType();
auto proto = rootConf->getProtocol()->getDeclaredInterfaceType();
auto diagID = (ctx.LangOpts.EnableConformanceAvailabilityErrors
? diag::conformance_availability_only_version_newer
: diag::conformance_availability_only_version_newer_warn);
auto behavior = behaviorLimitForExplicitUnavailability(rootConf, dc);
auto err =
ctx.Diags.diagnose(
loc, diagID,
type, proto, prettyPlatformString(targetPlatform(ctx.LangOpts)),
reason.getRequiredOSVersionRange().getLowerEndpoint());
err.limitBehavior(behavior);
// Direct a fixit to the error if an existing guard is nearly-correct
if (fixAvailabilityByNarrowingNearbyVersionCheck(loc, dc,
requiredRange, ctx, err))
return;
}
fixAvailability(loc, dc, requiredRange, ctx);
}
const AvailableAttr *TypeChecker::getDeprecated(const Decl *D) {
if (auto *Attr = D->getAttrs().getDeprecated(D->getASTContext()))
return Attr;
// Treat extensions methods as deprecated if their extension
// is deprecated.
DeclContext *DC = D->getDeclContext();
if (auto *ED = dyn_cast<ExtensionDecl>(DC)) {
return getDeprecated(ED);
}
return nullptr;
}
/// Returns true if the reference or any of its parents is an
/// unconditional unavailable declaration for the same platform.
static bool isInsideCompatibleUnavailableDeclaration(
const Decl *D, const ExportContext &where,
const AvailableAttr *attr) {
auto referencedPlatform = where.getUnavailablePlatformKind();
if (!referencedPlatform)
return false;
if (!attr->isUnconditionallyUnavailable()) {
return false;
}
// Refuse calling unavailable functions from unavailable code,
// but allow the use of types.
PlatformKind platform = attr->Platform;
if (platform == PlatformKind::none &&
!isa<TypeDecl>(D) &&
!isa<ExtensionDecl>(D)) {
return false;
}
return (*referencedPlatform == platform ||
inheritsAvailabilityFromPlatform(platform,
*referencedPlatform));
}
static void fixItAvailableAttrRename(InFlightDiagnostic &diag,
SourceRange referenceRange,
const ValueDecl *renamedDecl,
const AvailableAttr *attr,
const Expr *call) {
if (isa<AccessorDecl>(renamedDecl))
return;
ParsedDeclName parsed = swift::parseDeclName(attr->Rename);
if (!parsed)
return;
bool originallyWasKnownOperatorExpr = false;
if (call) {
originallyWasKnownOperatorExpr =
isa<BinaryExpr>(call) ||
isa<PrefixUnaryExpr>(call) ||
isa<PostfixUnaryExpr>(call);
}
if (parsed.isOperator() != originallyWasKnownOperatorExpr)
return;
auto &ctx = renamedDecl->getASTContext();
SourceManager &sourceMgr = ctx.SourceMgr;
if (parsed.isInstanceMember()) {
auto *CE = dyn_cast_or_null<CallExpr>(call);
if (!CE)
return;
// Replace the base of the call with the "self argument".
// We can only do a good job with the fix-it if we have the whole call
// expression.
// FIXME: Should we be validating the ContextName in some way?
unsigned selfIndex = parsed.SelfIndex.getValue();
const Expr *selfExpr = nullptr;
SourceLoc removeRangeStart;
SourceLoc removeRangeEnd;
auto *originalArgs = CE->getArgs()->getOriginalArgs();
size_t numElementsWithinParens = originalArgs->size();
numElementsWithinParens -= originalArgs->getNumTrailingClosures();
if (selfIndex >= numElementsWithinParens)
return;
if (parsed.IsGetter) {
if (numElementsWithinParens != 1)
return;
} else if (parsed.IsSetter) {
if (numElementsWithinParens != 2)
return;
} else {
if (parsed.ArgumentLabels.size() != originalArgs->size() - 1)
return;
}
selfExpr = originalArgs->getExpr(selfIndex);
if (selfIndex + 1 == numElementsWithinParens) {
if (selfIndex > 0) {
// Remove from the previous comma to the close-paren (half-open).
removeRangeStart = originalArgs->getExpr(selfIndex - 1)->getEndLoc();
removeRangeStart = Lexer::getLocForEndOfToken(sourceMgr,
removeRangeStart);
} else {
// Remove from after the open paren to the close paren (half-open).
removeRangeStart =
Lexer::getLocForEndOfToken(sourceMgr, originalArgs->getStartLoc());
}
// Prefer the r-paren location, so that we get the right behavior when
// there's a trailing closure, but handle some implicit cases too.
removeRangeEnd = originalArgs->getRParenLoc();
if (removeRangeEnd.isInvalid())
removeRangeEnd = originalArgs->getEndLoc();
} else {
// Remove from the label to the start of the next argument (half-open).
SourceLoc labelLoc = originalArgs->getLabelLoc(selfIndex);
if (labelLoc.isValid())
removeRangeStart = labelLoc;
else
removeRangeStart = selfExpr->getStartLoc();
SourceLoc nextLabelLoc = originalArgs->getLabelLoc(selfIndex + 1);
if (nextLabelLoc.isValid())
removeRangeEnd = nextLabelLoc;
else
removeRangeEnd = originalArgs->getExpr(selfIndex + 1)->getStartLoc();
}
// Avoid later argument label fix-its for this argument.
if (!parsed.isPropertyAccessor()) {
Identifier oldLabel = originalArgs->getLabel(selfIndex);
StringRef oldLabelStr;
if (!oldLabel.empty())
oldLabelStr = oldLabel.str();
parsed.ArgumentLabels.insert(parsed.ArgumentLabels.begin() + selfIndex,
oldLabelStr);
}
if (auto *inoutSelf = dyn_cast<InOutExpr>(selfExpr))
selfExpr = inoutSelf->getSubExpr();
CharSourceRange selfExprRange =
Lexer::getCharSourceRangeFromSourceRange(sourceMgr,
selfExpr->getSourceRange());
bool needsParens = !selfExpr->canAppendPostfixExpression();
SmallString<64> selfReplace;
if (needsParens)
selfReplace.push_back('(');
// If the base is contextual member lookup and we know the type,
// let's just prepend it, otherwise we'll end up with an incorrect fix-it.
auto base = sourceMgr.extractText(selfExprRange);
if (!base.empty() && base.front() == '.') {
auto newName = attr->Rename;
// If this is not a rename, let's not
// even try to emit a fix-it because
// it's going to be invalid.
if (newName.empty())
return;
auto parts = newName.split('.');
auto nominalName = parts.first;
assert(!nominalName.empty());
selfReplace += nominalName;
}
selfReplace += base;
if (needsParens)
selfReplace.push_back(')');
selfReplace.push_back('.');
selfReplace += parsed.BaseName;
diag.fixItReplace(CE->getFn()->getSourceRange(), selfReplace);
if (!parsed.isPropertyAccessor())
diag.fixItRemoveChars(removeRangeStart, removeRangeEnd);
// Continue on to diagnose any argument label renames.
} else if (parsed.BaseName == "init" && isa_and_nonnull<CallExpr>(call)) {
auto *CE = cast<CallExpr>(call);
// For initializers, replace with a "call" of the context type...but only
// if we know we're doing a call (rather than a first-class reference).
if (parsed.isMember()) {
diag.fixItReplace(CE->getFn()->getSourceRange(), parsed.ContextName);
} else if (auto *dotCall = dyn_cast<DotSyntaxCallExpr>(CE->getFn())) {
SourceLoc removeLoc = dotCall->getDotLoc();
if (removeLoc.isInvalid())
return;
diag.fixItRemove(SourceRange(removeLoc, dotCall->getFn()->getEndLoc()));
} else if (!isa<ConstructorRefCallExpr>(CE->getFn())) {
return;
}
// Continue on to diagnose any constructor argument label renames.
} else if (parsed.IsSubscript) {
if (auto *CE = dyn_cast_or_null<CallExpr>(call)) {
// Renaming from CallExpr to SubscriptExpr. Remove function name and
// replace parens with square brackets.
if (auto *DSCE = dyn_cast<DotSyntaxCallExpr>(CE->getFn())) {
if (DSCE->getBase()->isImplicit()) {
// If self is implicit, self must be inserted before subscript syntax.
diag.fixItInsert(CE->getStartLoc(), "self");
}
}
diag.fixItReplace(CE->getFn()->getEndLoc(), "[");
diag.fixItReplace(CE->getEndLoc(), "]");
}
} else {
// Just replace the base name.
SmallString<64> baseReplace;
if (!parsed.ContextName.empty()) {
baseReplace += parsed.ContextName;
baseReplace += '.';
}
baseReplace += parsed.BaseName;
if (parsed.IsFunctionName && isa_and_nonnull<SubscriptExpr>(call)) {
auto *SE = cast<SubscriptExpr>(call);
// Renaming from SubscriptExpr to CallExpr. Insert function name and
// replace square brackets with parens.
diag.fixItReplace(SE->getArgs()->getStartLoc(),
("." + baseReplace.str() + "(").str());
diag.fixItReplace(SE->getEndLoc(), ")");
} else {
if (parsed.IsFunctionName && parsed.ArgumentLabels.empty() &&
isa<VarDecl>(renamedDecl)) {
// If we're going from a var to a function with no arguments, emit an
// empty parameter list.
baseReplace += "()";
}
diag.fixItReplace(referenceRange, baseReplace);
}
}
if (!call || !call->getArgs())
return;
auto *originalArgs = call->getArgs()->getOriginalArgs();
if (parsed.IsGetter) {
diag.fixItRemove(originalArgs->getSourceRange());
return;
}
if (parsed.IsSetter) {
const Expr *newValueExpr = nullptr;
if (originalArgs->size() >= 1) {
size_t newValueIndex = 0;
if (parsed.isInstanceMember()) {
assert(parsed.SelfIndex.getValue() == 0 ||
parsed.SelfIndex.getValue() == 1);
newValueIndex = !parsed.SelfIndex.getValue();
}
newValueExpr = originalArgs->getExpr(newValueIndex);
} else {
newValueExpr = originalArgs->getExpr(0);
}
diag.fixItReplaceChars(originalArgs->getStartLoc(),
newValueExpr->getStartLoc(), " = ");
diag.fixItRemoveChars(
Lexer::getLocForEndOfToken(sourceMgr, newValueExpr->getEndLoc()),
Lexer::getLocForEndOfToken(sourceMgr, originalArgs->getEndLoc()));
return;
}
if (!parsed.IsFunctionName)
return;
SmallVector<Identifier, 4> argumentLabelIDs;
llvm::transform(parsed.ArgumentLabels, std::back_inserter(argumentLabelIDs),
[&ctx](StringRef labelStr) -> Identifier {
return labelStr.empty() ? Identifier()
: ctx.getIdentifier(labelStr);
});
// Coerce the `argumentLabelIDs` to the user supplied arguments.
// e.g:
// @available(.., renamed: "new(w:x:y:z:)")
// func old(a: Int, b: Int..., c: String="", d: Int=0){}
// old(a: 1, b: 2, 3, 4, d: 5)
// coerce
// argumentLabelIDs = {"w", "x", "y", "z"}
// to
// argumentLabelIDs = {"w", "x", "", "", "z"}
auto I = argumentLabelIDs.begin();
auto updateLabelsForArg = [&](Expr *expr) -> bool {
if (isa<DefaultArgumentExpr>(expr)) {
// Defaulted: remove param label of it.
if (I == argumentLabelIDs.end())
return true;
I = argumentLabelIDs.erase(I);
return false;
}
if (auto *varargExpr = dyn_cast<VarargExpansionExpr>(expr)) {
if (auto *arrayExpr = dyn_cast<ArrayExpr>(varargExpr->getSubExpr())) {
auto variadicArgsNum = arrayExpr->getNumElements();
if (variadicArgsNum == 0) {
// No arguments: Remove param label of it.
I = argumentLabelIDs.erase(I);
} else if (variadicArgsNum == 1) {
// One argument: Just advance.
++I;
} else {
++I;
// Two or more arguments: Insert empty labels after the first one.
--variadicArgsNum;
I = argumentLabelIDs.insert(I, variadicArgsNum, Identifier());
I += variadicArgsNum;
}
return false;
}
}
// Normal: Just advance.
if (I == argumentLabelIDs.end())
return true;
++I;
return false;
};
for (auto arg : *call->getArgs()) {
if (updateLabelsForArg(arg.getExpr()))
return;
}
if (argumentLabelIDs.size() != originalArgs->size()) {
// Mismatched lengths; give up.
return;
}
// If any of the argument labels are mismatched, perform label correction.
for (auto i : indices(*originalArgs)) {
// The argument label of an unlabeled trailing closure is ignored.
if (originalArgs->isUnlabeledTrailingClosureIndex(i))
continue;
if (argumentLabelIDs[i] != originalArgs->getLabel(i)) {
diagnoseArgumentLabelError(ctx, originalArgs, argumentLabelIDs,
parsed.IsSubscript, &diag);
return;
}
}
}
// Must be kept in sync with diag::availability_decl_unavailable_rename and
// others.
namespace {
enum class ReplacementDeclKind : unsigned {
None,
InstanceMethod,
Property,
};
} // end anonymous namespace
static Optional<ReplacementDeclKind>
describeRename(ASTContext &ctx, const AvailableAttr *attr, const ValueDecl *D,
SmallVectorImpl<char> &nameBuf) {
ParsedDeclName parsed = swift::parseDeclName(attr->Rename);
if (!parsed)
return None;
// Only produce special descriptions for renames to
// - instance members
// - properties (or global bindings)
// - class/static methods
// - initializers, unless the original was known to be an initializer
// Leave non-member renames alone, as well as renames from top-level types
// and bindings to member types and class/static properties.
if (!(parsed.isInstanceMember() || parsed.isPropertyAccessor() ||
(parsed.isMember() && parsed.IsFunctionName) ||
(parsed.BaseName == "init" &&
!dyn_cast_or_null<ConstructorDecl>(D)))) {
return None;
}
llvm::raw_svector_ostream name(nameBuf);
if (!parsed.ContextName.empty())
name << parsed.ContextName << '.';
if (parsed.IsFunctionName) {
name << parsed.formDeclName(ctx, (D && isa<SubscriptDecl>(D)));
} else {
name << parsed.BaseName;
}
if (parsed.isMember() && parsed.isPropertyAccessor())
return ReplacementDeclKind::Property;
if (parsed.isInstanceMember() && parsed.IsFunctionName)
return ReplacementDeclKind::InstanceMethod;
// We don't have enough information.
return ReplacementDeclKind::None;
}
void TypeChecker::diagnoseIfDeprecated(SourceRange ReferenceRange,
const ExportContext &Where,
const ValueDecl *DeprecatedDecl,
const Expr *Call) {
const AvailableAttr *Attr = TypeChecker::getDeprecated(DeprecatedDecl);
if (!Attr)
return;
// We match the behavior of clang to not report deprecation warnings
// inside declarations that are themselves deprecated on all deployment
// targets.
if (Where.isDeprecated()) {
return;
}
auto *ReferenceDC = Where.getDeclContext();
auto &Context = ReferenceDC->getASTContext();
if (!Context.LangOpts.DisableAvailabilityChecking) {
AvailabilityContext RunningOSVersions = Where.getAvailabilityContext();
if (RunningOSVersions.isKnownUnreachable()) {
// Suppress a deprecation warning if the availability checking machinery
// thinks the reference program location will not execute on any
// deployment target for the current platform.
return;
}
}
DeclName Name;
unsigned RawAccessorKind;
std::tie(RawAccessorKind, Name) =
getAccessorKindAndNameForDiagnostics(DeprecatedDecl);
StringRef Platform = Attr->prettyPlatformString();
llvm::VersionTuple DeprecatedVersion;
if (Attr->Deprecated)
DeprecatedVersion = Attr->Deprecated.getValue();
if (Attr->Message.empty() && Attr->Rename.empty()) {
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_deprecated,
RawAccessorKind, Name, Attr->hasPlatform(), Platform,
Attr->Deprecated.hasValue(), DeprecatedVersion,
/*message*/ StringRef())
.highlight(Attr->getRange());
return;
}
SmallString<32> newNameBuf;
Optional<ReplacementDeclKind> replacementDeclKind =
describeRename(Context, Attr, /*decl*/nullptr, newNameBuf);
StringRef newName = replacementDeclKind ? newNameBuf.str() : Attr->Rename;
if (!Attr->Message.empty()) {
EncodedDiagnosticMessage EncodedMessage(Attr->Message);
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_deprecated,
RawAccessorKind, Name, Attr->hasPlatform(), Platform,
Attr->Deprecated.hasValue(), DeprecatedVersion,
EncodedMessage.Message)
.highlight(Attr->getRange());
} else {
unsigned rawReplaceKind = static_cast<unsigned>(
replacementDeclKind.getValueOr(ReplacementDeclKind::None));
Context.Diags.diagnose(
ReferenceRange.Start, diag::availability_deprecated_rename,
RawAccessorKind, Name, Attr->hasPlatform(), Platform,
Attr->Deprecated.hasValue(), DeprecatedVersion,
replacementDeclKind.hasValue(), rawReplaceKind, newName)
.highlight(Attr->getRange());
}
if (!Attr->Rename.empty() && !isa<AccessorDecl>(DeprecatedDecl)) {
auto renameDiag = Context.Diags.diagnose(
ReferenceRange.Start,
diag::note_deprecated_rename,
newName);
fixItAvailableAttrRename(renameDiag, ReferenceRange, DeprecatedDecl,
Attr, Call);
}
}
bool TypeChecker::diagnoseIfDeprecated(SourceLoc loc,
const RootProtocolConformance *rootConf,
const ExtensionDecl *ext,
const ExportContext &where) {
const AvailableAttr *attr = TypeChecker::getDeprecated(ext);
if (!attr)
return false;
// We match the behavior of clang to not report deprecation warnings
// inside declarations that are themselves deprecated on all deployment
// targets.
if (where.isDeprecated()) {
return false;
}
auto *dc = where.getDeclContext();
auto &ctx = dc->getASTContext();
if (!ctx.LangOpts.DisableAvailabilityChecking) {
AvailabilityContext runningOSVersion = where.getAvailabilityContext();
if (runningOSVersion.isKnownUnreachable()) {
// Suppress a deprecation warning if the availability checking machinery
// thinks the reference program location will not execute on any
// deployment target for the current platform.
return false;
}
}
auto type = rootConf->getType();
auto proto = rootConf->getProtocol()->getDeclaredInterfaceType();
StringRef platform = attr->prettyPlatformString();
llvm::VersionTuple deprecatedVersion;
if (attr->Deprecated)
deprecatedVersion = attr->Deprecated.getValue();
if (attr->Message.empty()) {
ctx.Diags.diagnose(
loc, diag::conformance_availability_deprecated,
type, proto, attr->hasPlatform(), platform,
attr->Deprecated.hasValue(), deprecatedVersion,
/*message*/ StringRef())
.highlight(attr->getRange());
return true;
}
EncodedDiagnosticMessage encodedMessage(attr->Message);
ctx.Diags.diagnose(
loc, diag::conformance_availability_deprecated,
type, proto, attr->hasPlatform(), platform,
attr->Deprecated.hasValue(), deprecatedVersion,
encodedMessage.Message)
.highlight(attr->getRange());
return true;
}
void swift::diagnoseUnavailableOverride(ValueDecl *override,
const ValueDecl *base,
const AvailableAttr *attr) {
ASTContext &ctx = override->getASTContext();
auto &diags = ctx.Diags;
if (attr->Rename.empty()) {
EncodedDiagnosticMessage EncodedMessage(attr->Message);
diags.diagnose(override, diag::override_unavailable,
override->getBaseName(), EncodedMessage.Message);
DeclName name;
unsigned rawAccessorKind;
std::tie(rawAccessorKind, name) =
getAccessorKindAndNameForDiagnostics(base);
diags.diagnose(base, diag::availability_marked_unavailable,
rawAccessorKind, name);
return;
}
ExportContext where = ExportContext::forDeclSignature(override);
diagnoseExplicitUnavailability(base, override->getLoc(), where,
/*Flags*/None,
[&](InFlightDiagnostic &diag) {
ParsedDeclName parsedName = parseDeclName(attr->Rename);
if (!parsedName || parsedName.isPropertyAccessor() ||
parsedName.isMember() || parsedName.isOperator()) {
return;
}
// Only initializers should be named 'init'.
if (isa<ConstructorDecl>(override) ^
(parsedName.BaseName == "init")) {
return;
}
if (!parsedName.IsFunctionName) {
diag.fixItReplace(override->getNameLoc(), parsedName.BaseName);
return;
}
DeclName newName = parsedName.formDeclName(ctx);
size_t numArgs = override->getName().getArgumentNames().size();
if (!newName || newName.getArgumentNames().size() != numArgs)
return;
fixDeclarationName(diag, override, newName);
});
}
/// Emit a diagnostic for references to declarations that have been
/// marked as unavailable, either through "unavailable" or "obsoleted:".
bool swift::diagnoseExplicitUnavailability(const ValueDecl *D, SourceRange R,
const ExportContext &Where,
const Expr *call,
DeclAvailabilityFlags Flags) {
return diagnoseExplicitUnavailability(D, R, Where, Flags,
[=](InFlightDiagnostic &diag) {
fixItAvailableAttrRename(diag, R, D, AvailableAttr::isUnavailable(D),
call);
});
}
/// Emit a diagnostic for references to declarations that have been
/// marked as unavailable, either through "unavailable" or "obsoleted:".
bool swift::diagnoseExplicitUnavailability(SourceLoc loc,
const RootProtocolConformance *rootConf,
const ExtensionDecl *ext,
const ExportContext &where) {
auto *attr = AvailableAttr::isUnavailable(ext);
if (!attr)
return false;
// Calling unavailable code from within code with the same
// unavailability is OK -- the eventual caller can't call the
// enclosing code in the same situations it wouldn't be able to
// call this code.
if (isInsideCompatibleUnavailableDeclaration(ext, where, attr))
return false;
ASTContext &ctx = ext->getASTContext();
auto &diags = ctx.Diags;
auto type = rootConf->getType();
auto proto = rootConf->getProtocol()->getDeclaredInterfaceType();
StringRef platform;
auto behavior = DiagnosticBehavior::Unspecified;
switch (attr->getPlatformAgnosticAvailability()) {
case PlatformAgnosticAvailabilityKind::Deprecated:
llvm_unreachable("shouldn't see deprecations in explicit unavailability");
case PlatformAgnosticAvailabilityKind::None:
case PlatformAgnosticAvailabilityKind::Unavailable:
if (attr->Platform != PlatformKind::none) {
// This was platform-specific; indicate the platform.
platform = attr->prettyPlatformString();
break;
}
// Downgrade unavailable Sendable conformance diagnostics where
// appropriate.
behavior = behaviorLimitForExplicitUnavailability(
rootConf, where.getDeclContext());
LLVM_FALLTHROUGH;
case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific:
case PlatformAgnosticAvailabilityKind::PackageDescriptionVersionSpecific:
// We don't want to give further detail about these.
platform = "";
break;
case PlatformAgnosticAvailabilityKind::UnavailableInSwift:
// This API is explicitly unavailable in Swift.
platform = "Swift";
break;
}
EncodedDiagnosticMessage EncodedMessage(attr->Message);
diags.diagnose(loc, diag::conformance_availability_unavailable,
type, proto,
platform.empty(), platform, EncodedMessage.Message)
.limitBehavior(behavior);
switch (attr->getVersionAvailability(ctx)) {
case AvailableVersionComparison::Available:
case AvailableVersionComparison::PotentiallyUnavailable:
llvm_unreachable("These aren't considered unavailable");
case AvailableVersionComparison::Unavailable:
if ((attr->isLanguageVersionSpecific() ||
attr->isPackageDescriptionVersionSpecific())
&& attr->Introduced.hasValue())
diags.diagnose(ext, diag::conformance_availability_introduced_in_version,
type, proto,
(attr->isLanguageVersionSpecific() ?
"Swift" : "PackageDescription"),
*attr->Introduced)
.highlight(attr->getRange());
else
diags.diagnose(ext, diag::conformance_availability_marked_unavailable,
type, proto)
.highlight(attr->getRange());
break;
case AvailableVersionComparison::Obsoleted:
// FIXME: Use of the platformString here is non-awesome for application
// extensions.
StringRef platformDisplayString;
if (attr->isLanguageVersionSpecific()) {
platformDisplayString = "Swift";
} else if (attr->isPackageDescriptionVersionSpecific()) {
platformDisplayString = "PackageDescription";
} else {
platformDisplayString = platform;
}
diags.diagnose(ext, diag::conformance_availability_obsoleted,
type, proto, platformDisplayString, *attr->Obsoleted)
.highlight(attr->getRange());
break;
}
return true;
}
/// Check if this is a subscript declaration inside String or
/// Substring that returns String, and if so return true.
bool isSubscriptReturningString(const ValueDecl *D, ASTContext &Context) {
// Is this a subscript?
if (!isa<SubscriptDecl>(D))
return false;
// Is the subscript declared in String or Substring?
auto *declContext = D->getDeclContext();
assert(declContext && "Expected decl context!");
auto *stringDecl = Context.getStringDecl();
auto *substringDecl = Context.getSubstringDecl();
auto *typeDecl = declContext->getSelfNominalTypeDecl();
if (!typeDecl)
return false;
if (typeDecl != stringDecl && typeDecl != substringDecl)
return false;
// Is the subscript index one we want to emit a special diagnostic
// for, and the return type String?
auto fnTy = D->getInterfaceType()->getAs<AnyFunctionType>();
assert(fnTy && "Expected function type for subscript decl!");
// We're only going to warn for BoundGenericStructType with a single
// type argument that is not Int!
auto params = fnTy->getParams();
if (params.size() != 1)
return false;
const auto ¶m = params.front();
if (param.hasLabel() || param.isVariadic() || param.isInOut())
return false;
auto inputTy = param.getPlainType()->getAs<BoundGenericStructType>();
if (!inputTy)
return false;
auto genericArgs = inputTy->getGenericArgs();
if (genericArgs.size() != 1)
return false;
// The subscripts taking T<Int> do not return Substring, and our
// special fixit does not help here.
auto nominalTypeParam = genericArgs[0]->getAs<NominalType>();
if (!nominalTypeParam)
return false;
if (nominalTypeParam->isInt())
return false;
auto resultTy = fnTy->getResult()->getAs<NominalType>();
if (!resultTy)
return false;
return resultTy->isString();
}
bool swift::diagnoseExplicitUnavailability(
const ValueDecl *D,
SourceRange R,
const ExportContext &Where,
DeclAvailabilityFlags Flags,
llvm::function_ref<void(InFlightDiagnostic &)> attachRenameFixIts) {
auto *Attr = AvailableAttr::isUnavailable(D);
if (!Attr)
return false;
// Calling unavailable code from within code with the same
// unavailability is OK -- the eventual caller can't call the
// enclosing code in the same situations it wouldn't be able to
// call this code.
if (isInsideCompatibleUnavailableDeclaration(D, Where, Attr))
return false;
SourceLoc Loc = R.Start;
DeclName Name;
unsigned RawAccessorKind;
std::tie(RawAccessorKind, Name) = getAccessorKindAndNameForDiagnostics(D);
ASTContext &ctx = D->getASTContext();
auto &diags = ctx.Diags;
StringRef platform;
switch (Attr->getPlatformAgnosticAvailability()) {
case PlatformAgnosticAvailabilityKind::Deprecated:
llvm_unreachable("shouldn't see deprecations in explicit unavailability");
case PlatformAgnosticAvailabilityKind::None:
case PlatformAgnosticAvailabilityKind::Unavailable:
if (Attr->Platform != PlatformKind::none) {
// This was platform-specific; indicate the platform.
platform = Attr->prettyPlatformString();
break;
}
LLVM_FALLTHROUGH;
case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific:
case PlatformAgnosticAvailabilityKind::PackageDescriptionVersionSpecific:
// We don't want to give further detail about these.
platform = "";
break;
case PlatformAgnosticAvailabilityKind::UnavailableInSwift:
// This API is explicitly unavailable in Swift.
platform = "Swift";
break;
}
// TODO: Consider removing this.
// ObjC keypaths components weren't checked previously, so errors are demoted
// to warnings to avoid source breakage. In some cases unavailable or
// obsolete decls still map to valid ObjC runtime names, so behave correctly
// at runtime, even though their use would produce an error outside of a
// #keyPath expression.
bool warnInObjCKeyPath = Flags.contains(DeclAvailabilityFlag::ForObjCKeyPath);
if (!Attr->Rename.empty()) {
SmallString<32> newNameBuf;
Optional<ReplacementDeclKind> replaceKind =
describeRename(ctx, Attr, D, newNameBuf);
unsigned rawReplaceKind = static_cast<unsigned>(
replaceKind.getValueOr(ReplacementDeclKind::None));
StringRef newName = replaceKind ? newNameBuf.str() : Attr->Rename;
EncodedDiagnosticMessage EncodedMessage(Attr->Message);
auto diag =
diags.diagnose(Loc, warnInObjCKeyPath
? diag::availability_decl_unavailable_rename_warn
: diag::availability_decl_unavailable_rename,
RawAccessorKind, Name, replaceKind.hasValue(),
rawReplaceKind, newName, EncodedMessage.Message);
attachRenameFixIts(diag);
} else if (isSubscriptReturningString(D, ctx)) {
diags.diagnose(Loc, diag::availabilty_string_subscript_migration)
.highlight(R)
.fixItInsert(R.Start, "String(")
.fixItInsertAfter(R.End, ")");
// Skip the note emitted below.
return true;
} else {
EncodedDiagnosticMessage EncodedMessage(Attr->Message);
diags
.diagnose(Loc, warnInObjCKeyPath
? diag::availability_decl_unavailable_warn
: diag::availability_decl_unavailable, RawAccessorKind,
Name, platform.empty(), platform, EncodedMessage.Message)
.highlight(R);
}
switch (Attr->getVersionAvailability(ctx)) {
case AvailableVersionComparison::Available:
case AvailableVersionComparison::PotentiallyUnavailable:
llvm_unreachable("These aren't considered unavailable");
case AvailableVersionComparison::Unavailable:
if ((Attr->isLanguageVersionSpecific() ||
Attr->isPackageDescriptionVersionSpecific())
&& Attr->Introduced.hasValue())
diags.diagnose(D, diag::availability_introduced_in_version,
RawAccessorKind, Name,
(Attr->isLanguageVersionSpecific() ?
"Swift" : "PackageDescription"),
*Attr->Introduced)
.highlight(Attr->getRange());
else
diags.diagnose(D, diag::availability_marked_unavailable, RawAccessorKind,
Name)
.highlight(Attr->getRange());
break;
case AvailableVersionComparison::Obsoleted:
// FIXME: Use of the platformString here is non-awesome for application
// extensions.
StringRef platformDisplayString;
if (Attr->isLanguageVersionSpecific()) {
platformDisplayString = "Swift";
} else if (Attr->isPackageDescriptionVersionSpecific()) {
platformDisplayString = "PackageDescription";
} else {
platformDisplayString = platform;
}
diags.diagnose(D, diag::availability_obsoleted,
RawAccessorKind, Name,
platformDisplayString,
*Attr->Obsoleted)
.highlight(Attr->getRange());
break;
}
return true;
}
namespace {
class ExprAvailabilityWalker : public ASTWalker {
/// Describes how the next member reference will be treated as we traverse
/// the AST.
enum class MemberAccessContext : unsigned {
/// The member reference is in a context where an access will call
/// the getter.
Getter,
/// The member reference is in a context where an access will call
/// the setter.
Setter,
/// The member reference is in a context where it will be turned into
/// an inout argument. (Once this happens, we have to conservatively assume
/// that both the getter and setter could be called.)
InOut
};
ASTContext &Context;
MemberAccessContext AccessContext = MemberAccessContext::Getter;
SmallVector<const Expr *, 16> ExprStack;
const ExportContext &Where;
public:
explicit ExprAvailabilityWalker(const ExportContext &Where)
: Context(Where.getDeclContext()->getASTContext()), Where(Where) {}
bool shouldWalkIntoSeparatelyCheckedClosure(ClosureExpr *expr) override {
return false;
}
bool shouldWalkIntoTapExpression() override { return false; }
std::pair<bool, Expr *> walkToExprPre(Expr *E) override {
auto *DC = Where.getDeclContext();
ExprStack.push_back(E);
auto visitChildren = [&]() { return std::make_pair(true, E); };
auto skipChildren = [&]() {
ExprStack.pop_back();
return std::make_pair(false, E);
};
if (auto DR = dyn_cast<DeclRefExpr>(E)) {
diagnoseDeclRefAvailability(DR->getDeclRef(), DR->getSourceRange(),
getEnclosingApplyExpr(), None);
maybeDiagStorageAccess(DR->getDecl(), DR->getSourceRange(), DC);
}
if (auto MR = dyn_cast<MemberRefExpr>(E)) {
walkMemberRef(MR);
return skipChildren();
}
if (auto OCDR = dyn_cast<OtherConstructorDeclRefExpr>(E))
diagnoseDeclRefAvailability(OCDR->getDeclRef(),
OCDR->getConstructorLoc().getSourceRange(),
getEnclosingApplyExpr());
if (auto DMR = dyn_cast<DynamicMemberRefExpr>(E))
diagnoseDeclRefAvailability(DMR->getMember(),
DMR->getNameLoc().getSourceRange(),
getEnclosingApplyExpr());
if (auto DS = dyn_cast<DynamicSubscriptExpr>(E))
diagnoseDeclRefAvailability(DS->getMember(), DS->getSourceRange());
if (auto S = dyn_cast<SubscriptExpr>(E)) {
if (S->hasDecl()) {
diagnoseDeclRefAvailability(S->getDecl(), S->getSourceRange(), S);
maybeDiagStorageAccess(S->getDecl().getDecl(), S->getSourceRange(), DC);
}
}
if (auto KP = dyn_cast<KeyPathExpr>(E)) {
maybeDiagKeyPath(KP);
}
if (auto A = dyn_cast<AssignExpr>(E)) {
walkAssignExpr(A);
return skipChildren();
}
if (auto IO = dyn_cast<InOutExpr>(E)) {
walkInOutExpr(IO);
return skipChildren();
}
if (auto T = dyn_cast<TypeExpr>(E)) {
if (!T->isImplicit()) {
diagnoseTypeAvailability(T->getTypeRepr(), T->getType(), E->getLoc(),
Where);
}
}
if (auto CE = dyn_cast<ClosureExpr>(E)) {
for (auto *param : *CE->getParameters()) {
diagnoseTypeAvailability(param->getTypeRepr(), param->getInterfaceType(),
E->getLoc(), Where);
}
diagnoseTypeAvailability(CE->hasExplicitResultType()
? CE->getExplicitResultTypeRepr()
: nullptr,
CE->getResultType(), E->getLoc(), Where);
}
if (auto CE = dyn_cast<ExplicitCastExpr>(E)) {
diagnoseTypeAvailability(CE->getCastTypeRepr(), CE->getCastType(),
E->getLoc(), Where);
}
if (AbstractClosureExpr *closure = dyn_cast<AbstractClosureExpr>(E)) {
// Multi-statement closures are collected by ExprWalker::rewriteFunction
// and checked by ExprWalker::processDelayed in CSApply.cpp.
// Single-statement closures only have the attributes checked
// by TypeChecker::checkClosureAttributes in that rewriteFunction.
// Multi-statement closures will be checked explicitly later (as the decl
// context in the Where). Single-expression closures will not be
// revisited, and are not automatically set as the context of the 'where'.
// Don't double-check multi-statement closures, but do check
// single-statement closures, setting the closure as the decl context.
if (closure->hasSingleExpressionBody()) {
walkAbstractClosure(closure);
return skipChildren();
}
}
return visitChildren();
}
Expr *walkToExprPost(Expr *E) override {
assert(ExprStack.back() == E);
ExprStack.pop_back();
return E;
}
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
// We end up here when checking the output of the result builder transform,
// which includes closures that are not "separately typechecked" and yet
// contain statements and declarations. We need to walk them recursively,
// since these availability for these statements is not diagnosed from
// typeCheckStmt() as usual.
diagnoseStmtAvailability(S, Where.getDeclContext(), /*walkRecursively=*/true);
return std::make_pair(false, S);
}
bool diagnoseDeclRefAvailability(ConcreteDeclRef declRef, SourceRange R,
const Expr *call = nullptr,
DeclAvailabilityFlags flags = None) const;
private:
bool diagnoseIncDecRemoval(const ValueDecl *D, SourceRange R,
const AvailableAttr *Attr) const;
bool diagnoseMemoryLayoutMigration(const ValueDecl *D, SourceRange R,
const AvailableAttr *Attr,
const ApplyExpr *call) const;
/// Walks up from a potential callee to the enclosing ApplyExpr.
const ApplyExpr *getEnclosingApplyExpr() const {
ArrayRef<const Expr *> parents = ExprStack;
assert(!parents.empty() && "must be called while visiting an expression");
size_t idx = parents.size() - 1;
do {
if (idx == 0)
return nullptr;
--idx;
} while (isa<DotSyntaxBaseIgnoredExpr>(parents[idx]) || // Mod.f(a)
isa<SelfApplyExpr>(parents[idx]) || // obj.f(a)
isa<IdentityExpr>(parents[idx]) || // (f)(a)
isa<ForceValueExpr>(parents[idx]) || // f!(a)
isa<BindOptionalExpr>(parents[idx])); // f?(a)
auto *call = dyn_cast<ApplyExpr>(parents[idx]);
if (!call || call->getFn() != parents[idx+1])
return nullptr;
return call;
}
/// Walk an assignment expression, checking for availability.
void walkAssignExpr(AssignExpr *E) {
// We take over recursive walking of assignment expressions in order to
// walk the destination and source expressions in different member
// access contexts.
Expr *Dest = E->getDest();
if (!Dest) {
return;
}
// Check the Dest expression in a setter context.
// We have an implicit assumption here that the first MemberRefExpr
// encountered walking (pre-order) is the Dest is the destination of the
// write. For the moment this is fine -- but future syntax might violate
// this assumption.
walkInContext(E, Dest, MemberAccessContext::Setter);
// Check RHS in getter context
Expr *Source = E->getSrc();
if (!Source) {
return;
}
walkInContext(E, Source, MemberAccessContext::Getter);
}
/// Walk a member reference expression, checking for availability.
void walkMemberRef(MemberRefExpr *E) {
// Walk the base in a getter context.
// FIXME: We may need to look at the setter too, if we're going to do
// writeback. The AST should have this information.
walkInContext(E, E->getBase(), MemberAccessContext::Getter);
ValueDecl *D = E->getMember().getDecl();
// Diagnose for the member declaration itself.
if (diagnoseDeclAvailability(D, E->getNameLoc().getSourceRange(),
nullptr, Where))
return;
// Diagnose for appropriate accessors, given the access context.
auto *DC = Where.getDeclContext();
maybeDiagStorageAccess(D, E->getSourceRange(), DC);
}
/// Walk a keypath expression, checking all of its components for
/// availability.
void maybeDiagKeyPath(KeyPathExpr *KP) {
auto flags = DeclAvailabilityFlags();
if (KP->isObjC())
flags = DeclAvailabilityFlag::ForObjCKeyPath;
for (auto &component : KP->getComponents()) {
switch (component.getKind()) {
case KeyPathExpr::Component::Kind::Property:
case KeyPathExpr::Component::Kind::Subscript: {
auto decl = component.getDeclRef();
auto loc = component.getLoc();
diagnoseDeclRefAvailability(decl, loc, nullptr, flags);
break;
}
case KeyPathExpr::Component::Kind::TupleElement:
break;
case KeyPathExpr::Component::Kind::Invalid:
case KeyPathExpr::Component::Kind::UnresolvedProperty:
case KeyPathExpr::Component::Kind::UnresolvedSubscript:
case KeyPathExpr::Component::Kind::OptionalChain:
case KeyPathExpr::Component::Kind::OptionalWrap:
case KeyPathExpr::Component::Kind::OptionalForce:
case KeyPathExpr::Component::Kind::Identity:
case KeyPathExpr::Component::Kind::DictionaryKey:
case KeyPathExpr::Component::Kind::CodeCompletion:
break;
}
}
}
/// Walk an inout expression, checking for availability.
void walkInOutExpr(InOutExpr *E) {
walkInContext(E, E->getSubExpr(), MemberAccessContext::InOut);
}
/// Walk an abstract closure expression, checking for availability
void walkAbstractClosure(AbstractClosureExpr *closure) {
// Do the walk with the closure set as the decl context of the 'where'
auto where = ExportContext::forFunctionBody(closure, closure->getStartLoc());
if (where.isImplicit())
return;
ExprAvailabilityWalker walker(where);
// Manually dive into the body
closure->getBody()->walk(walker);
return;
}
/// Walk the given expression in the member access context.
void walkInContext(Expr *baseExpr, Expr *E,
MemberAccessContext AccessContext) {
llvm::SaveAndRestore<MemberAccessContext>
C(this->AccessContext, AccessContext);
E->walk(*this);
}
/// Emit diagnostics, if necessary, for accesses to storage where
/// the accessor for the AccessContext is not available.
void maybeDiagStorageAccess(const ValueDecl *VD,
SourceRange ReferenceRange,
const DeclContext *ReferenceDC) const {
if (Context.LangOpts.DisableAvailabilityChecking)
return;
auto *D = dyn_cast<AbstractStorageDecl>(VD);
if (!D)
return;
if (!D->requiresOpaqueAccessors()) {
return;
}
// Check availability of accessor functions.
// TODO: if we're talking about an inlineable storage declaration,
// this probably needs to be refined to not assume that the accesses are
// specifically using the getter/setter.
switch (AccessContext) {
case MemberAccessContext::Getter:
diagAccessorAvailability(D->getOpaqueAccessor(AccessorKind::Get),
ReferenceRange, ReferenceDC, None);
break;
case MemberAccessContext::Setter:
diagAccessorAvailability(D->getOpaqueAccessor(AccessorKind::Set),
ReferenceRange, ReferenceDC, None);
break;
case MemberAccessContext::InOut:
diagAccessorAvailability(D->getOpaqueAccessor(AccessorKind::Get),
ReferenceRange, ReferenceDC,
DeclAvailabilityFlag::ForInout);
diagAccessorAvailability(D->getOpaqueAccessor(AccessorKind::Set),
ReferenceRange, ReferenceDC,
DeclAvailabilityFlag::ForInout);
break;
}
}
/// Emit a diagnostic, if necessary for a potentially unavailable accessor.
void diagAccessorAvailability(AccessorDecl *D, SourceRange ReferenceRange,
const DeclContext *ReferenceDC,
DeclAvailabilityFlags Flags) const {
if (!D)
return;
Flags &= DeclAvailabilityFlag::ForInout;
Flags |= DeclAvailabilityFlag::ContinueOnPotentialUnavailability;
if (diagnoseDeclAvailability(D, ReferenceRange, /*call*/ nullptr, Where,
Flags))
return;
}
};
} // end anonymous namespace
/// Diagnose uses of unavailable declarations. Returns true if a diagnostic
/// was emitted.
bool ExprAvailabilityWalker::diagnoseDeclRefAvailability(
ConcreteDeclRef declRef, SourceRange R, const Expr *call,
DeclAvailabilityFlags Flags) const {
if (!declRef)
return false;
const ValueDecl *D = declRef.getDecl();
if (auto *attr = AvailableAttr::isUnavailable(D)) {
if (diagnoseIncDecRemoval(D, R, attr))
return true;
if (isa_and_nonnull<ApplyExpr>(call) &&
diagnoseMemoryLayoutMigration(D, R, attr, cast<ApplyExpr>(call)))
return true;
}
diagnoseDeclAvailability(D, R, call, Where, Flags);
if (R.isValid()) {
if (diagnoseSubstitutionMapAvailability(R.Start, declRef.getSubstitutions(),
Where)) {
return true;
}
}
return false;
}
/// Diagnose uses of API annotated '@unavailableFromAsync' when used from
/// asynchronous contexts.
/// Returns true if a diagnostic was emitted, false otherwise.
static bool
diagnoseDeclUnavailableFromAsync(const ValueDecl *D, SourceRange R,
const Expr *call, const ExportContext &Where) {
// FIXME: I don't think this is right, but I don't understand the issue well
// enough to fix it properly. If the decl context is an abstract
// closure, we need it to have a type assigned to it before we can
// determine whether it is an asynchronous context. It will crash
// when we go to check without one. In TypeChecker::typeCheckExpression
// (TypeCheckConstraints.cpp:403), we apply a solution before calling
// `performSyntacticDiagnosticsForTarget`, which eventually calls
// down to this function. Under most circumstances, the context that
// we're in is typechecked at that point and has a type assigned.
// When working with specific result builders, the solution applied
// results in an expression with an unset type. In these cases, the
// application makes its way into `ConstraintSystem::applySolution` for
// closures (CSClosure.cpp:1356). The type is computed, but is
// squirreled away in the constrain system to be applied once the
// checks (including this one) approve of the decls within the decl
// context before applying the type to the expression. It might be
// possible to drive the constraint solver through the availability
// checker and into us so that we can ask for it, but that feels wrong
// too.
// This behavior is demonstrated by the first use of the `tuplify`
// function in `testExistingPatternsInCaseStatements` in
// `test/Constraints/result_builder.swift`.
const AbstractClosureExpr *declCtxAsExpr =
dyn_cast<AbstractClosureExpr>(Where.getDeclContext());
if (declCtxAsExpr && !declCtxAsExpr->getType()) {
return false;
}
// If we are in a synchronous context, don't check it
if (!Where.getDeclContext()->isAsyncContext())
return false;
if (!D->getAttrs().hasAttribute<UnavailableFromAsyncAttr>())
return false;
ASTContext &ctx = Where.getDeclContext()->getASTContext();
const UnavailableFromAsyncAttr *attr =
D->getAttrs().getAttribute<UnavailableFromAsyncAttr>();
SourceLoc diagLoc = call ? call->getLoc() : R.Start;
ctx.Diags
.diagnose(diagLoc, diag::async_unavailable_decl, D->getDescriptiveKind(),
D->getBaseName(), attr->hasMessage(), attr->Message)
.warnUntilSwiftVersion(6);
D->diagnose(diag::decl_declared_here, D->getName());
return true;
}
/// Diagnose uses of unavailable declarations. Returns true if a diagnostic
/// was emitted.
bool swift::diagnoseDeclAvailability(const ValueDecl *D, SourceRange R,
const Expr *call,
const ExportContext &Where,
DeclAvailabilityFlags Flags) {
assert(!Where.isImplicit());
// Generic parameters are always available.
if (isa<GenericTypeParamDecl>(D))
return false;
// Keep track if this is an accessor.
auto accessor = dyn_cast<AccessorDecl>(D);
if (accessor) {
// If the property/subscript is unconditionally unavailable, don't bother
// with any of the rest of this.
if (AvailableAttr::isUnavailable(accessor->getStorage()))
return false;
}
if (R.isValid()) {
if (TypeChecker::diagnoseInlinableDeclRefAccess(R.Start, D, Where))
return true;
if (TypeChecker::diagnoseDeclRefExportability(R.Start, D, Where))
return true;
}
if (diagnoseExplicitUnavailability(D, R, Where, call, Flags))
return true;
if (diagnoseDeclUnavailableFromAsync(D, R, call, Where))
return true;
// Make sure not to diagnose an accessor's deprecation if we already
// complained about the property/subscript.
bool isAccessorWithDeprecatedStorage =
accessor && TypeChecker::getDeprecated(accessor->getStorage());
// Diagnose for deprecation
if (!isAccessorWithDeprecatedStorage)
TypeChecker::diagnoseIfDeprecated(R, Where, D, call);
if (Flags.contains(DeclAvailabilityFlag::AllowPotentiallyUnavailableProtocol)
&& isa<ProtocolDecl>(D))
return false;
// Diagnose (and possibly signal) for potential unavailability
auto maybeUnavail = TypeChecker::checkDeclarationAvailability(D, Where);
if (maybeUnavail.hasValue()) {
auto *DC = Where.getDeclContext();
if (accessor) {
bool forInout = Flags.contains(DeclAvailabilityFlag::ForInout);
TypeChecker::diagnosePotentialAccessorUnavailability(accessor, R, DC,
maybeUnavail.getValue(),
forInout);
} else {
TypeChecker::diagnosePotentialUnavailability(D, R, DC, maybeUnavail.getValue());
}
if (!Flags.contains(DeclAvailabilityFlag::ContinueOnPotentialUnavailability))
return true;
}
return false;
}
/// Return true if the specified type looks like an integer of floating point
/// type.
static bool isIntegerOrFloatingPointType(Type ty, ModuleDecl *M) {
return (TypeChecker::conformsToKnownProtocol(
ty, KnownProtocolKind::ExpressibleByIntegerLiteral, M) ||
TypeChecker::conformsToKnownProtocol(
ty, KnownProtocolKind::ExpressibleByFloatLiteral, M));
}
/// If this is a call to an unavailable ++ / -- operator, try to diagnose it
/// with a fixit hint and return true. If not, or if we fail, return false.
bool
ExprAvailabilityWalker::diagnoseIncDecRemoval(const ValueDecl *D, SourceRange R,
const AvailableAttr *Attr) const {
// We can only produce a fixit if we're talking about ++ or --.
bool isInc = D->getBaseName() == "++";
if (!isInc && D->getBaseName() != "--")
return false;
// We can only handle the simple cases of lvalue++ and ++lvalue. This is
// always modeled as:
// (postfix_unary_expr (declrefexpr ++), (inoutexpr (lvalue)))
// if not, bail out.
if (ExprStack.size() != 2 ||
!isa<DeclRefExpr>(ExprStack[1]) ||
!(isa<PostfixUnaryExpr>(ExprStack[0]) ||
isa<PrefixUnaryExpr>(ExprStack[0])))
return false;
auto call = cast<ApplyExpr>(ExprStack[0]);
// If the expression type is integer or floating point, then we can rewrite it
// to "lvalue += 1".
auto *DC = Where.getDeclContext();
std::string replacement;
if (isIntegerOrFloatingPointType(call->getType(), DC->getParentModule()))
replacement = isInc ? " += 1" : " -= 1";
else {
// Otherwise, it must be an index type. Rewrite to:
// "lvalue = lvalue.successor()".
auto &SM = Context.SourceMgr;
auto CSR = Lexer::getCharSourceRangeFromSourceRange(
SM, call->getArgs()->getSourceRange());
replacement = " = " + SM.extractText(CSR).str();
replacement += isInc ? ".successor()" : ".predecessor()";
}
if (!replacement.empty()) {
DeclName Name;
unsigned RawAccessorKind;
std::tie(RawAccessorKind, Name) = getAccessorKindAndNameForDiagnostics(D);
// If we emit a deprecation diagnostic, produce a fixit hint as well.
auto diag = Context.Diags.diagnose(
R.Start, diag::availability_decl_unavailable,
RawAccessorKind, Name, true, "",
"it has been removed in Swift 3");
if (isa<PrefixUnaryExpr>(call)) {
// Prefix: remove the ++ or --.
diag.fixItRemove(call->getFn()->getSourceRange());
diag.fixItInsertAfter(call->getArgs()->getEndLoc(), replacement);
} else {
// Postfix: replace the ++ or --.
diag.fixItReplace(call->getFn()->getSourceRange(), replacement);
}
return true;
}
return false;
}
/// If this is a call to an unavailable sizeof family function, diagnose it
/// with a fixit hint and return true. If not, or if we fail, return false.
bool
ExprAvailabilityWalker::diagnoseMemoryLayoutMigration(const ValueDecl *D,
SourceRange R,
const AvailableAttr *Attr,
const ApplyExpr *call) const {
if (!D->getModuleContext()->isStdlibModule())
return false;
StringRef Property;
if (D->getBaseName() == "sizeof") {
Property = "size";
} else if (D->getBaseName() == "alignof") {
Property = "alignment";
} else if (D->getBaseName() == "strideof") {
Property = "stride";
}
if (Property.empty())
return false;
auto *args = call->getArgs();
auto *subject = args->getUnlabeledUnaryExpr();
if (!subject)
return false;
DeclName Name;
unsigned RawAccessorKind;
std::tie(RawAccessorKind, Name) = getAccessorKindAndNameForDiagnostics(D);
EncodedDiagnosticMessage EncodedMessage(Attr->Message);
auto diag =
Context.Diags.diagnose(
R.Start, diag::availability_decl_unavailable, RawAccessorKind,
Name, true, "", EncodedMessage.Message);
diag.highlight(R);
StringRef Prefix = "MemoryLayout<";
StringRef Suffix = ">.";
if (auto DTE = dyn_cast<DynamicTypeExpr>(subject)) {
// Replace `sizeof(type(of: x))` with `MemoryLayout<X>.size`, where `X` is
// the static type of `x`. The previous spelling misleadingly hinted that
// `sizeof(_:)` might return the size of the *dynamic* type of `x`, when
// it is not the case.
auto valueType = DTE->getBase()->getType()->getRValueType();
if (!valueType || valueType->hasError()) {
// If we don't have a suitable argument, we can't emit a fixit.
return true;
}
// Note that in rare circumstances we may be destructively replacing the
// source text. For example, we'd replace `sizeof(type(of: doSomething()))`
// with `MemoryLayout<T>.size`, if T is the return type of `doSomething()`.
diag.fixItReplace(call->getSourceRange(),
(Prefix + valueType->getString() + Suffix + Property).str());
} else {
SourceRange PrefixRange(call->getStartLoc(), args->getLParenLoc());
SourceRange SuffixRange(args->getRParenLoc());
// We must remove `.self`.
if (auto *DSE = dyn_cast<DotSelfExpr>(subject))
SuffixRange.Start = DSE->getDotLoc();
diag
.fixItReplace(PrefixRange, Prefix)
.fixItReplace(SuffixRange, (Suffix + Property).str());
}
return true;
}
/// Diagnose uses of unavailable declarations.
void swift::diagnoseExprAvailability(const Expr *E, DeclContext *DC) {
auto where = ExportContext::forFunctionBody(DC, E->getStartLoc());
if (where.isImplicit())
return;
ExprAvailabilityWalker walker(where);
const_cast<Expr*>(E)->walk(walker);
}
namespace {
class StmtAvailabilityWalker : public BaseDiagnosticWalker {
DeclContext *DC;
bool WalkRecursively;
public:
explicit StmtAvailabilityWalker(DeclContext *dc, bool walkRecursively)
: DC(dc), WalkRecursively(walkRecursively) {}
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
if (!WalkRecursively && isa<BraceStmt>(S))
return std::make_pair(false, S);
return std::make_pair(true, S);
}
std::pair<bool, Expr *> walkToExprPre(Expr *E) override {
if (WalkRecursively)
diagnoseExprAvailability(E, DC);
return std::make_pair(false, E);
}
bool walkToTypeReprPre(TypeRepr *T) override {
auto where = ExportContext::forFunctionBody(DC, T->getStartLoc());
diagnoseTypeReprAvailability(T, where);
return false;
}
std::pair<bool, Pattern *> walkToPatternPre(Pattern *P) override {
if (auto *IP = dyn_cast<IsPattern>(P)) {
auto where = ExportContext::forFunctionBody(DC, P->getLoc());
diagnoseTypeAvailability(IP->getCastType(), P->getLoc(), where);
}
return std::make_pair(true, P);
}
};
}
void swift::diagnoseStmtAvailability(const Stmt *S, DeclContext *DC,
bool walkRecursively) {
// We'll visit the individual statements when we check them.
if (!walkRecursively && isa<BraceStmt>(S))
return;
StmtAvailabilityWalker walker(DC, walkRecursively);
const_cast<Stmt*>(S)->walk(walker);
}
namespace {
class TypeReprAvailabilityWalker : public ASTWalker {
const ExportContext &where;
DeclAvailabilityFlags flags;
bool checkComponentIdentTypeRepr(ComponentIdentTypeRepr *ITR) {
if (auto *typeDecl = ITR->getBoundDecl()) {
auto range = ITR->getNameLoc().getSourceRange();
if (diagnoseDeclAvailability(typeDecl, range, nullptr, where, flags))
return true;
}
bool foundAnyIssues = false;
if (auto *GTR = dyn_cast<GenericIdentTypeRepr>(ITR)) {
auto genericFlags = flags;
genericFlags -= DeclAvailabilityFlag::AllowPotentiallyUnavailableProtocol;
for (auto *genericArg : GTR->getGenericArgs()) {
if (diagnoseTypeReprAvailability(genericArg, where, genericFlags))
foundAnyIssues = true;
}
}
return foundAnyIssues;
}
public:
bool foundAnyIssues = false;
TypeReprAvailabilityWalker(const ExportContext &where,
DeclAvailabilityFlags flags)
: where(where), flags(flags) {}
bool walkToTypeReprPre(TypeRepr *T) override {
if (auto *ITR = dyn_cast<IdentTypeRepr>(T)) {
if (auto *CTR = dyn_cast<CompoundIdentTypeRepr>(ITR)) {
for (auto *comp : CTR->getComponents()) {
// If a parent type is unavailable, don't go on to diagnose
// the member since that will just produce a redundant
// diagnostic.
if (checkComponentIdentTypeRepr(comp)) {
foundAnyIssues = true;
break;
}
}
} else if (auto *GTR = dyn_cast<GenericIdentTypeRepr>(T)) {
if (checkComponentIdentTypeRepr(GTR))
foundAnyIssues = true;
} else if (auto *STR = dyn_cast<SimpleIdentTypeRepr>(T)) {
if (checkComponentIdentTypeRepr(STR))
foundAnyIssues = true;
}
// We've already visited all the children above, so we don't
// need to recurse.
return false;
}
return true;
}
};
}
bool swift::diagnoseTypeReprAvailability(const TypeRepr *T,
const ExportContext &where,
DeclAvailabilityFlags flags) {
if (!T)
return false;
TypeReprAvailabilityWalker walker(where, flags);
const_cast<TypeRepr*>(T)->walk(walker);
return walker.foundAnyIssues;
}
namespace {
class ProblematicTypeFinder : public TypeDeclFinder {
SourceLoc Loc;
const ExportContext &Where;
DeclAvailabilityFlags Flags;
public:
ProblematicTypeFinder(SourceLoc Loc, const ExportContext &Where,
DeclAvailabilityFlags Flags)
: Loc(Loc), Where(Where), Flags(Flags) {}
void visitTypeDecl(TypeDecl *decl) {
// We only need to diagnose exportability here. Availability was
// already checked on the TypeRepr.
if (Where.mustOnlyReferenceExportedDecls())
TypeChecker::diagnoseDeclRefExportability(Loc, decl, Where);
}
Action visitNominalType(NominalType *ty) override {
visitTypeDecl(ty->getDecl());
// If some generic parameters are missing, don't check conformances.
if (ty->hasUnboundGenericType())
return Action::Continue;
// When the DeclContext parameter to getContextSubstitutionMap()
// is a protocol declaration, the receiver must be a concrete
// type, so it doesn't make sense to perform this check on
// protocol types.
if (isa<ProtocolType>(ty))
return Action::Continue;
ModuleDecl *useModule = Where.getDeclContext()->getParentModule();
auto subs = ty->getContextSubstitutionMap(useModule, ty->getDecl());
(void) diagnoseSubstitutionMapAvailability(Loc, subs, Where);
return Action::Continue;
}
Action visitBoundGenericType(BoundGenericType *ty) override {
visitTypeDecl(ty->getDecl());
ModuleDecl *useModule = Where.getDeclContext()->getParentModule();
auto subs = ty->getContextSubstitutionMap(useModule, ty->getDecl());
(void) diagnoseSubstitutionMapAvailability(Loc, subs, Where);
return Action::Continue;
}
Action visitTypeAliasType(TypeAliasType *ty) override {
visitTypeDecl(ty->getDecl());
auto subs = ty->getSubstitutionMap();
(void) diagnoseSubstitutionMapAvailability(Loc, subs, Where);
return Action::Continue;
}
// We diagnose unserializable Clang function types in the
// post-visitor so that we diagnose any unexportable component
// types first.
Action walkToTypePost(Type T) override {
if (Where.mustOnlyReferenceExportedDecls()) {
if (auto fnType = T->getAs<AnyFunctionType>()) {
if (auto clangType = fnType->getClangTypeInfo().getType()) {
auto *DC = Where.getDeclContext();
auto &ctx = DC->getASTContext();
auto loader = ctx.getClangModuleLoader();
// Serialization will serialize the sugared type if it can,
// but we need the canonical type to be serializable or else
// canonicalization (e.g. in SIL) might break things.
if (!loader->isSerializable(clangType, /*check canonical*/ true)) {
ctx.Diags.diagnose(Loc, diag::unexportable_clang_function_type, T);
}
}
}
}
return TypeDeclFinder::walkToTypePost(T);
}
};
}
void swift::diagnoseTypeAvailability(Type T, SourceLoc loc,
const ExportContext &where,
DeclAvailabilityFlags flags) {
if (!T)
return;
T.walk(ProblematicTypeFinder(loc, where, flags));
}
void swift::diagnoseTypeAvailability(const TypeRepr *TR, Type T, SourceLoc loc,
const ExportContext &where,
DeclAvailabilityFlags flags) {
if (diagnoseTypeReprAvailability(TR, where, flags))
return;
diagnoseTypeAvailability(T, loc, where, flags);
}
static void diagnoseMissingConformance(
SourceLoc loc, Type type, ProtocolDecl *proto, const DeclContext *fromDC) {
assert(proto->isSpecificProtocol(KnownProtocolKind::Sendable));
diagnoseMissingSendableConformance(loc, type, fromDC);
}
bool
swift::diagnoseConformanceAvailability(SourceLoc loc,
ProtocolConformanceRef conformance,
const ExportContext &where,
Type depTy, Type replacementTy) {
assert(!where.isImplicit());
if (!conformance.isConcrete())
return false;
const ProtocolConformance *concreteConf = conformance.getConcrete();
const RootProtocolConformance *rootConf = concreteConf->getRootConformance();
// Diagnose "missing" conformances where we needed a conformance but
// didn't have one.
auto *DC = where.getDeclContext();
if (auto builtinConformance = dyn_cast<BuiltinProtocolConformance>(rootConf)){
if (builtinConformance->isMissing()) {
diagnoseMissingConformance(loc, builtinConformance->getType(),
builtinConformance->getProtocol(), DC);
}
}
auto maybeEmitAssociatedTypeNote = [&]() {
if (!depTy && !replacementTy)
return;
Type selfTy = rootConf->getProtocol()->getProtocolSelfType();
if (!depTy->isEqual(selfTy)) {
auto &ctx = DC->getASTContext();
ctx.Diags.diagnose(
loc,
diag::assoc_conformance_from_implementation_only_module,
depTy, replacementTy->getCanonicalType());
}
};
if (auto *ext = dyn_cast<ExtensionDecl>(rootConf->getDeclContext())) {
if (TypeChecker::diagnoseConformanceExportability(loc, rootConf, ext, where)) {
maybeEmitAssociatedTypeNote();
return true;
}
if (diagnoseExplicitUnavailability(loc, rootConf, ext, where)) {
maybeEmitAssociatedTypeNote();
return true;
}
// Diagnose (and possibly signal) for potential unavailability
auto maybeUnavail = TypeChecker::checkConformanceAvailability(
rootConf, ext, where);
if (maybeUnavail.hasValue()) {
TypeChecker::diagnosePotentialUnavailability(rootConf, ext, loc, DC,
maybeUnavail.getValue());
maybeEmitAssociatedTypeNote();
return true;
}
// Diagnose for deprecation
if (TypeChecker::diagnoseIfDeprecated(loc, rootConf, ext, where)) {
maybeEmitAssociatedTypeNote();
// Deprecation is just a warning, so keep going with checking the
// substitution map below.
}
}
// Now, check associated conformances.
SubstitutionMap subConformanceSubs =
concreteConf->getSubstitutions(DC->getParentModule());
if (diagnoseSubstitutionMapAvailability(loc, subConformanceSubs, where,
depTy, replacementTy))
return true;
return false;
}
bool
swift::diagnoseSubstitutionMapAvailability(SourceLoc loc,
SubstitutionMap subs,
const ExportContext &where,
Type depTy, Type replacementTy) {
bool hadAnyIssues = false;
for (ProtocolConformanceRef conformance : subs.getConformances()) {
if (diagnoseConformanceAvailability(loc, conformance, where,
depTy, replacementTy))
hadAnyIssues = true;
}
return hadAnyIssues;
}
/// Should we warn that \p decl needs an explicit availability annotation
/// in -require-explicit-availability mode?
static bool declNeedsExplicitAvailability(const Decl *decl) {
// Skip non-public decls.
if (auto valueDecl = dyn_cast<const ValueDecl>(decl)) {
AccessScope scope =
valueDecl->getFormalAccessScope(/*useDC*/nullptr,
/*treatUsableFromInlineAsPublic*/true);
if (!scope.isPublic())
return false;
}
// Skip functions emitted into clients, SPI or implicit.
if (decl->getAttrs().hasAttribute<AlwaysEmitIntoClientAttr>() ||
decl->isSPI() ||
decl->isImplicit())
return false;
// Warn on decls without an introduction version.
auto &ctx = decl->getASTContext();
auto safeRangeUnderApprox = AvailabilityInference::availableRange(decl, ctx);
return !safeRangeUnderApprox.getOSVersion().hasLowerEndpoint() &&
!decl->getAttrs().isUnavailable(ctx);
}
void swift::checkExplicitAvailability(Decl *decl) {
// Skip if the command line option was not set and
// accessors as we check the pattern binding decl instead.
if (!decl->getASTContext().LangOpts.RequireExplicitAvailability ||
isa<AccessorDecl>(decl))
return;
// Only look at decls at module level or in extensions.
// This could be changed to force having attributes on all decls.
if (!decl->getDeclContext()->isModuleScopeContext() &&
!isa<ExtensionDecl>(decl->getDeclContext())) return;
if (auto extension = dyn_cast<ExtensionDecl>(decl)) {
// decl should be either a ValueDecl or an ExtensionDecl.
auto extended = extension->getExtendedNominal();
if (!extended || !extended->getFormalAccessScope().isPublic())
return;
// Skip extensions without public members or conformances.
auto members = extension->getMembers();
auto hasMembers = std::any_of(members.begin(), members.end(),
[](const Decl *D) -> bool {
if (auto VD = dyn_cast<ValueDecl>(D))
if (declNeedsExplicitAvailability(VD))
return true;
return false;
});
auto protocols = extension->getLocalProtocols(ConformanceLookupKind::OnlyExplicit);
auto hasProtocols = std::any_of(protocols.begin(), protocols.end(),
[](const ProtocolDecl *PD) -> bool {
AccessScope scope =
PD->getFormalAccessScope(/*useDC*/nullptr,
/*treatUsableFromInlineAsPublic*/true);
return scope.isPublic();
});
if (!hasMembers && !hasProtocols) return;
} else if (auto pbd = dyn_cast<PatternBindingDecl>(decl)) {
// Check the first var instead.
if (pbd->getNumPatternEntries() == 0)
return;
llvm::SmallVector<VarDecl *, 2> vars;
pbd->getPattern(0)->collectVariables(vars);
if (vars.empty())
return;
decl = vars.front();
}
if (declNeedsExplicitAvailability(decl)) {
auto diag = decl->diagnose(diag::public_decl_needs_availability);
auto suggestPlatform =
decl->getASTContext().LangOpts.RequireExplicitAvailabilityTarget;
if (!suggestPlatform.empty()) {
auto InsertLoc = decl->getAttrs().getStartLoc(/*forModifiers=*/false);
if (InsertLoc.isInvalid())
InsertLoc = decl->getStartLoc();
if (InsertLoc.isInvalid())
return;
std::string AttrText;
{
llvm::raw_string_ostream Out(AttrText);
auto &ctx = decl->getASTContext();
StringRef OriginalIndent = Lexer::getIndentationForLine(
ctx.SourceMgr, InsertLoc);
Out << "@available(" << suggestPlatform << ", *)\n"
<< OriginalIndent;
}
diag.fixItInsert(InsertLoc, AttrText);
}
}
}
| 51,683 |
398 | <filename>src/main/java/com/ruiyun/jvppeteer/core/page/Accessibility.java
package com.ruiyun.jvppeteer.core.page;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.ruiyun.jvppeteer.core.Constant;
import com.ruiyun.jvppeteer.protocol.accessbility.SerializedAXNode;
import com.ruiyun.jvppeteer.transport.CDPSession;
import com.ruiyun.jvppeteer.util.StringUtil;
import com.ruiyun.jvppeteer.util.ValidateUtil;
import java.beans.IntrospectionException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class Accessibility {
private final CDPSession client;
public Accessibility(CDPSession client) {
this.client = client;
}
public SerializedAXNode snapshot(boolean interestingOnly , ElementHandle root) throws JsonProcessingException, IllegalAccessException, IntrospectionException, InvocationTargetException {
JsonNode nodes = this.client.send("Accessibility.getFullAXTree",null,false);
String backendNodeId = null;
if (root != null) {
Map<String,Object> params = new HashMap<>();
params.put("objectId",root.getRemoteObject().getObjectId());
JsonNode node = this.client.send("DOM.describeNode",params,true);
backendNodeId = node.get("backendNodeId").asText();
}
Iterator<JsonNode> elements = nodes.elements();
List<com.ruiyun.jvppeteer.protocol.accessbility.AXNode> payloads = new ArrayList<>();
while(elements.hasNext()){
payloads.add(Constant.OBJECTMAPPER.treeToValue(elements.next(),com.ruiyun.jvppeteer.protocol.accessbility.AXNode.class));
}
AXNode defaultRoot = AXNode.createTree(payloads);
AXNode needle = defaultRoot;
if (StringUtil.isNotEmpty(backendNodeId)){
String finalBackendNodeId = backendNodeId;
needle = defaultRoot.find(node -> finalBackendNodeId.equals(node.getPayload().getBackendDOMNodeId()+""));
if (needle == null)
return null;
}
if (!interestingOnly)
return serializeTree(needle,null).get(0);
Set<AXNode> interestingNodes = new HashSet<>();
collectInterestingNodes(interestingNodes, defaultRoot, false);
if (!interestingNodes.contains(needle))
return null;
return serializeTree(needle, interestingNodes).get(0);
}
private void collectInterestingNodes(Set<AXNode> collection, AXNode node, boolean insideControl) {
if (node.isInteresting(insideControl))
collection.add(node);
if (node.isLeafNode())
return;
insideControl = insideControl || node.isControl();
for (AXNode child :
node.getChildren())
collectInterestingNodes(collection, child, insideControl);
}
public List<SerializedAXNode> serializeTree(AXNode node, Set<AXNode> whitelistedNodes) throws IllegalAccessException, IntrospectionException, InvocationTargetException {
List<SerializedAXNode> children = new ArrayList<>();
for (AXNode child : node.getChildren())
children.addAll(serializeTree(child, whitelistedNodes));
if (ValidateUtil.isNotEmpty(whitelistedNodes) && !whitelistedNodes.contains(node))
return children;
SerializedAXNode serializedNode = node.serialize();
if (ValidateUtil.isNotEmpty(children))
serializedNode.setChildren(children);
List<SerializedAXNode> result = new ArrayList<>();
result.add(serializedNode);
return result;
}
}
| 1,485 |
589 | <filename>inspectit.shared.cs/src/test/java/rocks/inspectit/shared/cs/ci/business/valuesource/impl/HttpParameterValueSourceTest.java<gh_stars>100-1000
package rocks.inspectit.shared.cs.ci.business.valuesource.impl;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItemInArray;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import java.util.HashMap;
import java.util.Map;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.testng.annotations.Test;
import rocks.inspectit.shared.all.communication.data.HttpTimerData;
import rocks.inspectit.shared.all.communication.data.InvocationSequenceData;
import rocks.inspectit.shared.all.communication.data.TimerData;
import rocks.inspectit.shared.all.testbase.TestBase;
import rocks.inspectit.shared.cs.cmr.service.cache.CachedDataService;
/**
* @author <NAME>
*
*/
@SuppressWarnings("PMD")
public class HttpParameterValueSourceTest extends TestBase {
@InjectMocks
HttpParameterValueSource valueSource;
@Mock
CachedDataService cachedDataService;
@Mock
InvocationSequenceData invocationSeuence;
@Mock
HttpTimerData httpTimerData;
/**
* Test
* {@link HttpParameterValueSource#getStringValues(InvocationSequenceData, rocks.inspectit.shared.all.cmr.service.ICachedDataService)}
* .
*/
public static class GetStringVlaues extends HttpParameterValueSourceTest {
private static final String PARAMETER_NAME_1 = "parameter1";
private static final String PARAMETER_NAME_2 = "parameter2";
private static final String VALUE_A = "valueA";
private static final String VALUE_B = "valueB";
private static final String VALUE_C = "valueC";
private static final String VALUE_D = "valueD";
@Test
public void retrieveFirstParameter() {
Mockito.doReturn(httpTimerData).when(invocationSeuence).getTimerData();
Map<String, String[]> parameterMap = new HashMap<>();
parameterMap.put(PARAMETER_NAME_1, new String[] { VALUE_A, VALUE_B });
parameterMap.put(PARAMETER_NAME_2, new String[] { VALUE_C, VALUE_D });
Mockito.doReturn(parameterMap).when(httpTimerData).getParameters();
valueSource.setParameterName(PARAMETER_NAME_1);
String[] values = valueSource.getStringValues(invocationSeuence, cachedDataService);
assertThat(values, hasItemInArray(VALUE_A));
assertThat(values, hasItemInArray(VALUE_B));
assertThat(values, not(hasItemInArray(VALUE_C)));
assertThat(values, not(hasItemInArray(VALUE_D)));
}
@Test
public void retrieveSecondParameter() {
Mockito.doReturn(httpTimerData).when(invocationSeuence).getTimerData();
Map<String, String[]> parameterMap = new HashMap<>();
parameterMap.put(PARAMETER_NAME_1, new String[] { VALUE_A, VALUE_B });
parameterMap.put(PARAMETER_NAME_2, new String[] { VALUE_C, VALUE_D });
Mockito.doReturn(parameterMap).when(httpTimerData).getParameters();
valueSource.setParameterName(PARAMETER_NAME_2);
String[] values = valueSource.getStringValues(invocationSeuence, cachedDataService);
assertThat(values, hasItemInArray(VALUE_C));
assertThat(values, hasItemInArray(VALUE_D));
assertThat(values, not(hasItemInArray(VALUE_A)));
assertThat(values, not(hasItemInArray(VALUE_B)));
}
@Test
public void noHttpData() {
Mockito.doReturn(new TimerData()).when(invocationSeuence).getTimerData();
valueSource.setParameterName(PARAMETER_NAME_2);
String[] values = valueSource.getStringValues(invocationSeuence, cachedDataService);
assertThat(values, is(notNullValue()));
assertThat(values.length, is(equalTo(0)));
}
@Test
public void noHttpParameter() {
Mockito.doReturn(httpTimerData).when(invocationSeuence).getTimerData();
Mockito.doReturn(new HashMap<>()).when(httpTimerData).getParameters();
valueSource.setParameterName(PARAMETER_NAME_2);
String[] values = valueSource.getStringValues(invocationSeuence, cachedDataService);
assertThat(values, is(notNullValue()));
assertThat(values.length, is(equalTo(0)));
}
@Test
public void noRequiredHttpParameter() {
Mockito.doReturn(httpTimerData).when(invocationSeuence).getTimerData();
Map<String, String[]> parameterMap = new HashMap<>();
parameterMap.put(PARAMETER_NAME_1, new String[] { VALUE_A, VALUE_B });
Mockito.doReturn(parameterMap).when(httpTimerData).getParameters();
valueSource.setParameterName(PARAMETER_NAME_2);
String[] values = valueSource.getStringValues(invocationSeuence, cachedDataService);
assertThat(values, is(notNullValue()));
assertThat(values.length, is(equalTo(0)));
}
}
}
| 1,668 |
3,200 | <reponame>PowerOlive/mindspore
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""builtin_operations"""
import numpy as np
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
import mindspore.common.dtype as mstype
from mindspore.common.dtype import dtype_to_nptype, get_py_obj_dtype
def ScalarAdd(x, y):
"""Implement `scalar_add`."""
return x + y
def ScalarMul(x, y):
"""Implement `scalar_mul`."""
return x * y
def ScalarMod(x, y):
"""Implement `scalar_mul`."""
return x % y
def ScalarSub(x, y):
"""Implement `scalar_sub`."""
return x - y
def ScalarUsub(x):
"""Implement `scalar_usub`."""
return -x
def TupleGetItem(x, index):
"""Implement `tuple_getitem`."""
if isinstance(x, Tensor):
x = x.asnumpy()
y = x[index]
return Tensor(y)
return x[index]
def scalar_gt(x, y):
"""Implement `scalar_gt`."""
return x > y
def scalar_ne(x, y):
"""Implement `scalar_ne`."""
return x != y
def scalar_eq(x, y):
"""Implement `scalar_eq`."""
return x == y
def scalar_le(x, y):
"""Implement `scalar_le`."""
return x <= y
def scalar_lt(x, y):
"""Implement `scalar_lt`."""
return x < y
def identity(x):
"""Implement `identity`."""
return x
def zeros_like_tensor(x):
"""Implement `zeros_like_tensor`."""
x = x.asnumpy()
value = Tensor(np.zeros(x.shape).astype(np.float32))
return value
def Switch(c, x, y):
"""Implement `switch`."""
return x if c else y
def list_getitem(data, item):
"""Implement `list_getitem`."""
return data[item]
def bool_not(x):
"""Implement `bool_not`."""
return not x
def bool_and(x, y):
"""Implement `bool_and`."""
return x and y
def bool_or(x, y):
"""Implement `bool_or`."""
return x or y
def make_list(*xs):
"""Implement `make_list`."""
return list(xs)
def list_len(x):
"""Implement `list_len`."""
return len(x)
def Depend(value, expr):
"""Implement `Depend`."""
return value
def UpdateState(monad, *exprs):
"""Implement `UpdateState`."""
return monad
def Load(value, u=None):
"""Implement `Load`."""
return value
# only used in PyNative mode
def make_ref(key, value, ref):
return value
def scalar_cast(x, t):
"""Implement scalar_cast."""
np_type = dtype_to_nptype(t)
value = np_type(x)
cast_value = np.ndarray.item(value)
return cast_value
def typeof(x):
"""Implement typeof."""
return get_py_obj_dtype(x)
def tuple_to_array(x):
"""Implement `tuple_to_array`."""
return Tensor(np.array(x))
def stop_gradient(x):
"""Implement `stop_gradient`."""
return x
hyper_map = C.HyperMap()
def mixed_precision_cast(dst_type, x):
"""Implement `mixed_precision_cast`."""
def cast_inner(data):
if isinstance(data, Tensor) and data.dtype in (mstype.float32, mstype.float16, mstype.float64):
return F.cast(data, dst_type)
return data
return hyper_map(cast_inner, x)
| 1,491 |
432 | /* $FreeBSD: head/sys/dev/usb/template/usb_template_mouse.c 246125 2013-01-30 16:05:54Z hselasky $ */
/*-
* Copyright (c) 2010 <NAME>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This file contains the USB template for an USB Mouse Device.
*/
#ifdef USB_GLOBAL_INCLUDE_FILE
#include USB_GLOBAL_INCLUDE_FILE
#else
#include <sys/stdint.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/lock.h>
#include <sys/condvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/callout.h>
#include <sys/malloc.h>
#include <sys/priv.h>
#include <bus/u4b/usb.h>
#include <bus/u4b/usbdi.h>
#include <bus/u4b/usb_core.h>
#include <bus/u4b/usb_cdc.h>
#include <bus/u4b/template/usb_template.h>
#endif /* USB_GLOBAL_INCLUDE_FILE */
enum {
INDEX_LANG,
INDEX_MOUSE,
INDEX_PRODUCT,
INDEX_MAX,
};
#define STRING_PRODUCT \
"M\0o\0u\0s\0e\0 \0T\0e\0s\0t\0 \0D\0e\0v\0i\0c\0e"
#define STRING_MOUSE \
"M\0o\0u\0s\0e\0 \0i\0n\0t\0e\0r\0f\0a\0c\0e"
/* make the real string descriptors */
USB_MAKE_STRING_DESC(STRING_MOUSE, string_mouse);
USB_MAKE_STRING_DESC(STRING_PRODUCT, string_product);
/* prototypes */
/* The following HID descriptor was dumped from a HP mouse. */
static uint8_t mouse_hid_descriptor[] = {
0x05, 0x01, 0x09, 0x02, 0xa1, 0x01, 0x09, 0x01,
0xa1, 0x00, 0x05, 0x09, 0x19, 0x01, 0x29, 0x03,
0x15, 0x00, 0x25, 0x01, 0x95, 0x03, 0x75, 0x01,
0x81, 0x02, 0x95, 0x05, 0x81, 0x03, 0x05, 0x01,
0x09, 0x30, 0x09, 0x31, 0x09, 0x38, 0x15, 0x81,
0x25, 0x7f, 0x75, 0x08, 0x95, 0x03, 0x81, 0x06,
0xc0, 0xc0
};
static const struct usb_temp_packet_size mouse_intr_mps = {
.mps[USB_SPEED_LOW] = 8,
.mps[USB_SPEED_FULL] = 8,
.mps[USB_SPEED_HIGH] = 8,
};
static const struct usb_temp_interval mouse_intr_interval = {
.bInterval[USB_SPEED_LOW] = 2, /* 2ms */
.bInterval[USB_SPEED_FULL] = 2, /* 2ms */
.bInterval[USB_SPEED_HIGH] = 5, /* 2ms */
};
static const struct usb_temp_endpoint_desc mouse_ep_0 = {
.ppRawDesc = NULL, /* no raw descriptors */
.pPacketSize = &mouse_intr_mps,
.pIntervals = &mouse_intr_interval,
.bEndpointAddress = UE_DIR_IN,
.bmAttributes = UE_INTERRUPT,
};
static const struct usb_temp_endpoint_desc *mouse_endpoints[] = {
&mouse_ep_0,
NULL,
};
static const uint8_t mouse_raw_desc[] = {
0x09, 0x21, 0x10, 0x01, 0x00, 0x01, 0x22, sizeof(mouse_hid_descriptor),
0x00
};
static const void *mouse_iface_0_desc[] = {
mouse_raw_desc,
NULL,
};
static const struct usb_temp_interface_desc mouse_iface_0 = {
.ppRawDesc = mouse_iface_0_desc,
.ppEndpoints = mouse_endpoints,
.bInterfaceClass = 3,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 2,
.iInterface = INDEX_MOUSE,
};
static const struct usb_temp_interface_desc *mouse_interfaces[] = {
&mouse_iface_0,
NULL,
};
static const struct usb_temp_config_desc mouse_config_desc = {
.ppIfaceDesc = mouse_interfaces,
.bmAttributes = UC_BUS_POWERED,
.bMaxPower = 25, /* 50 mA */
.iConfiguration = INDEX_PRODUCT,
};
static const struct usb_temp_config_desc *mouse_configs[] = {
&mouse_config_desc,
NULL,
};
static usb_temp_get_string_desc_t mouse_get_string_desc;
static usb_temp_get_vendor_desc_t mouse_get_vendor_desc;
const struct usb_temp_device_desc usb_template_mouse = {
.getStringDesc = &mouse_get_string_desc,
.getVendorDesc = &mouse_get_vendor_desc,
.ppConfigDesc = mouse_configs,
.idVendor = USB_TEMPLATE_VENDOR,
.idProduct = 0x00AE,
.bcdDevice = 0x0100,
.bDeviceClass = UDCLASS_COMM,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
.iManufacturer = 0,
.iProduct = INDEX_PRODUCT,
.iSerialNumber = 0,
};
/*------------------------------------------------------------------------*
* mouse_get_vendor_desc
*
* Return values:
* NULL: Failure. No such vendor descriptor.
* Else: Success. Pointer to vendor descriptor is returned.
*------------------------------------------------------------------------*/
static const void *
mouse_get_vendor_desc(const struct usb_device_request *req, uint16_t *plen)
{
if ((req->bmRequestType == 0x81) && (req->bRequest == 0x06) &&
(req->wValue[0] == 0x00) && (req->wValue[1] == 0x22) &&
(req->wIndex[1] == 0) && (req->wIndex[0] == 0)) {
*plen = sizeof(mouse_hid_descriptor);
return (mouse_hid_descriptor);
}
return (NULL);
}
/*------------------------------------------------------------------------*
* mouse_get_string_desc
*
* Return values:
* NULL: Failure. No such string.
* Else: Success. Pointer to string descriptor is returned.
*------------------------------------------------------------------------*/
static const void *
mouse_get_string_desc(uint16_t lang_id, uint8_t string_index)
{
static const void *ptr[INDEX_MAX] = {
[INDEX_LANG] = &usb_string_lang_en,
[INDEX_MOUSE] = &string_mouse,
[INDEX_PRODUCT] = &string_product,
};
if (string_index == 0) {
return (&usb_string_lang_en);
}
if (lang_id != 0x0409) {
return (NULL);
}
if (string_index < INDEX_MAX) {
return (ptr[string_index]);
}
return (NULL);
}
| 2,519 |
2,918 | import re
import json
import fnmatch
import sys
config = json.load(open('config.json'))
test_error_set = set(config['TestErrorMap'].keys())
obsolete_disabled_tests = set()
all_tests = set()
failing_tests = set()
unimpl_tests = set()
disabled_tests = set()
passed_tests = set()
for line in sys.stdin:
m = re.match('^(PASSED|UNIMPLEMENTED|FAILED|DISABLED) \((.*)\)$', line.strip())
if m:
status, name = m.groups()
if name in test_error_set:
test_error_set.remove(name)
all_tests.add(name)
if status == 'FAILED':
failing_tests.add(name)
elif status == 'UNIMPLEMENTED':
unimpl_tests.add(name)
elif status == 'DISABLED':
disabled_tests.add(name)
elif status == 'PASSED':
passed_tests.add(name)
if disabled_tests:
for disabled_glob in sorted(config['DisabledTests'].keys()):
tests_matching_glob = fnmatch.filter(disabled_tests, disabled_glob)
if not tests_matching_glob:
print 'DisabledTests glob', disabled_glob, 'matches no tests'
else:
print '(DisabledTests unchecked)'
print len(all_tests), 'total tests'
print len(passed_tests), 'passed'
print len(failing_tests), 'tests failing'
print len(unimpl_tests), 'tests not supported'
if test_error_set:
print 'unknown TestErrorMap keys', test_error_set
| 575 |
5,383 | <filename>test/ext/mypy/files/inspect.py<gh_stars>1000+
"""
test inspect()
however this is not really working
"""
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Mapper
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
a1 = A(data="d")
e = create_engine("sqlite://")
# TODO: I can't get these to work, pylance and mypy both don't want
# to accommodate for different types for the first argument
t: bool = inspect(a1).transient
m: Mapper = inspect(A)
inspect(e).get_table_names()
i: Inspector = inspect(e)
with e.connect() as conn:
inspect(conn).get_table_names()
| 303 |
1,037 | <reponame>fgdadiaonan/android-open-project-demo<filename>sticky-header-listview-demo-sunfusheng/app/src/main/java/com/sunfusheng/StickyHeaderListView/model/OperationEntity.java
package com.sunfusheng.StickyHeaderListView.model;
import java.io.Serializable;
/**
* Created by sunfusheng on 16/4/20.
*/
public class OperationEntity implements Serializable {
private String title;
private String subtitle;
private String image_url;
public OperationEntity() {
}
public String getSubtitle() {
return subtitle;
}
public void setSubtitle(String subtitle) {
this.subtitle = subtitle;
}
public OperationEntity(String title, String subtitle, String image_url) {
this.title = title;
this.subtitle = subtitle;
this.image_url = image_url;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getImage_url() {
return image_url;
}
public void setImage_url(String image_url) {
this.image_url = image_url;
}
}
| 425 |
2,151 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/ash/chrome_browser_main_extra_parts_ash.h"
#include <utility>
#include "ash/public/cpp/ash_switches.h"
#include "ash/public/cpp/mus_property_mirror_ash.h"
#include "ash/public/cpp/shelf_model.h"
#include "ash/public/cpp/window_properties.h"
#include "ash/public/interfaces/constants.mojom.h"
#include "ash/public/interfaces/process_creation_time_recorder.mojom.h"
#include "ash/shell.h"
#include "base/command_line.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chromeos/ash_config.h"
#include "chrome/browser/chromeos/night_light/night_light_client.h"
#include "chrome/browser/profiles/profile_manager.h"
#include "chrome/browser/ui/app_list/app_list_client_impl.h"
#include "chrome/browser/ui/ash/accessibility/accessibility_controller_client.h"
#include "chrome/browser/ui/ash/ash_shell_init.h"
#include "chrome/browser/ui/ash/auto_connect_notifier.h"
#include "chrome/browser/ui/ash/cast_config_client_media_router.h"
#include "chrome/browser/ui/ash/chrome_new_window_client.h"
#include "chrome/browser/ui/ash/chrome_shell_content_state.h"
#include "chrome/browser/ui/ash/ime_controller_client.h"
#include "chrome/browser/ui/ash/launcher/chrome_launcher_controller.h"
#include "chrome/browser/ui/ash/login_screen_client.h"
#include "chrome/browser/ui/ash/media_client.h"
#include "chrome/browser/ui/ash/network/data_promo_notification.h"
#include "chrome/browser/ui/ash/network/network_connect_delegate_chromeos.h"
#include "chrome/browser/ui/ash/network/network_portal_notification_controller.h"
#include "chrome/browser/ui/ash/session_controller_client.h"
#include "chrome/browser/ui/ash/system_tray_client.h"
#include "chrome/browser/ui/ash/tab_scrubber.h"
#include "chrome/browser/ui/ash/tablet_mode_client.h"
#include "chrome/browser/ui/ash/volume_controller.h"
#include "chrome/browser/ui/ash/vpn_list_forwarder.h"
#include "chrome/browser/ui/ash/wallpaper_controller_client.h"
#include "chrome/browser/ui/views/frame/immersive_context_mus.h"
#include "chrome/browser/ui/views/frame/immersive_handler_factory_mus.h"
#include "chrome/browser/ui/views/ime_driver/ime_driver_mus.h"
#include "chrome/browser/ui/views/select_file_dialog_extension.h"
#include "chrome/browser/ui/views/select_file_dialog_extension_factory.h"
#include "chromeos/network/network_connect.h"
#include "chromeos/network/network_handler.h"
#include "chromeos/network/portal_detector/network_portal_detector.h"
#include "components/session_manager/core/session_manager.h"
#include "components/session_manager/core/session_manager_observer.h"
#include "components/startup_metric_utils/browser/startup_metric_utils.h"
#include "content/public/common/content_switches.h"
#include "content/public/common/service_manager_connection.h"
#include "services/service_manager/public/cpp/connector.h"
#include "services/ui/public/interfaces/constants.mojom.h"
#include "services/ui/public/interfaces/user_activity_monitor.mojom.h"
#include "ui/aura/mus/property_converter.h"
#include "ui/aura/mus/user_activity_forwarder.h"
#include "ui/base/ime/chromeos/input_method_manager.h"
#include "ui/base/user_activity/user_activity_detector.h"
#include "ui/views/mus/mus_client.h"
#if BUILDFLAG(ENABLE_WAYLAND_SERVER)
#include "chrome/browser/exo_parts.h"
#endif
#if BUILDFLAG(ENABLE_CROS_ASSISTANT)
#include "chrome/browser/ui/ash/assistant/assistant_client.h"
#endif
namespace {
void PushProcessCreationTimeToAsh() {
ash::mojom::ProcessCreationTimeRecorderPtr recorder;
content::ServiceManagerConnection::GetForProcess()
->GetConnector()
->BindInterface(ash::mojom::kServiceName, &recorder);
DCHECK(!startup_metric_utils::MainEntryPointTicks().is_null());
recorder->SetMainProcessCreationTime(
startup_metric_utils::MainEntryPointTicks());
}
} // namespace
namespace internal {
// Creates a ChromeLauncherController on the first active session notification.
// Used to avoid constructing a ChromeLauncherController with no active profile.
class ChromeLauncherControllerInitializer
: public session_manager::SessionManagerObserver {
public:
ChromeLauncherControllerInitializer() {
session_manager::SessionManager::Get()->AddObserver(this);
}
~ChromeLauncherControllerInitializer() override {
if (!chrome_launcher_controller_)
session_manager::SessionManager::Get()->RemoveObserver(this);
}
// session_manager::SessionManagerObserver:
void OnSessionStateChanged() override {
DCHECK(!chrome_launcher_controller_);
DCHECK(!ChromeLauncherController::instance());
if (session_manager::SessionManager::Get()->session_state() ==
session_manager::SessionState::ACTIVE) {
// Chrome keeps its own ShelfModel copy in sync with Ash's ShelfModel.
chrome_shelf_model_ = std::make_unique<ash::ShelfModel>();
chrome_launcher_controller_ = std::make_unique<ChromeLauncherController>(
nullptr, chrome_shelf_model_.get());
chrome_launcher_controller_->Init();
session_manager::SessionManager::Get()->RemoveObserver(this);
}
}
private:
// By default |chrome_shelf_model_| is synced with Ash's ShelfController
// instance in Mash and in Classic Ash; otherwise this is not created and
// Ash's ShelfModel instance is used directly.
std::unique_ptr<ash::ShelfModel> chrome_shelf_model_;
std::unique_ptr<ChromeLauncherController> chrome_launcher_controller_;
DISALLOW_COPY_AND_ASSIGN(ChromeLauncherControllerInitializer);
};
} // namespace internal
ChromeBrowserMainExtraPartsAsh::ChromeBrowserMainExtraPartsAsh() {}
ChromeBrowserMainExtraPartsAsh::~ChromeBrowserMainExtraPartsAsh() {}
void ChromeBrowserMainExtraPartsAsh::ServiceManagerConnectionStarted(
content::ServiceManagerConnection* connection) {
if (chromeos::GetAshConfig() == ash::Config::MASH) {
// ash::Shell will not be created because ash is running out-of-process.
ash::Shell::SetIsBrowserProcessWithMash();
DCHECK(views::MusClient::Exists());
views::MusClient* mus_client = views::MusClient::Get();
aura::WindowTreeClientDelegate* delegate = mus_client;
// Register ash-specific window properties with Chrome's property converter.
// Values of registered properties will be transported between the services.
ash::RegisterWindowProperties(delegate->GetPropertyConverter());
mus_client->SetMusPropertyMirror(
std::make_unique<ash::MusPropertyMirrorAsh>());
}
}
void ChromeBrowserMainExtraPartsAsh::PreProfileInit() {
// IME driver must be available at login screen, so initialize before profile.
IMEDriver::Register();
// NetworkConnect handles the network connection state machine for the UI.
network_connect_delegate_ =
std::make_unique<NetworkConnectDelegateChromeOS>();
chromeos::NetworkConnect::Initialize(network_connect_delegate_.get());
if (chromeos::GetAshConfig() != ash::Config::MASH) {
ash_shell_init_ = std::make_unique<AshShellInit>();
} else {
immersive_context_ = std::make_unique<ImmersiveContextMus>();
immersive_handler_factory_ = std::make_unique<ImmersiveHandlerFactoryMus>();
// Enterprise support in the browser can monitor user activity. Connect to
// the UI service to monitor activity. The ash process has its own monitor.
user_activity_detector_ = std::make_unique<ui::UserActivityDetector>();
ui::mojom::UserActivityMonitorPtr user_activity_monitor;
content::ServiceManagerConnection::GetForProcess()
->GetConnector()
->BindInterface(ui::mojom::kServiceName, &user_activity_monitor);
user_activity_forwarder_ = std::make_unique<aura::UserActivityForwarder>(
std::move(user_activity_monitor), user_activity_detector_.get());
}
app_list_client_ = std::make_unique<AppListClientImpl>();
// Must be available at login screen, so initialize before profile.
accessibility_controller_client_ =
std::make_unique<AccessibilityControllerClient>();
accessibility_controller_client_->Init();
chrome_new_window_client_ = std::make_unique<ChromeNewWindowClient>();
ime_controller_client_ = std::make_unique<ImeControllerClient>(
chromeos::input_method::InputMethodManager::Get());
ime_controller_client_->Init();
session_controller_client_ = std::make_unique<SessionControllerClient>();
session_controller_client_->Init();
system_tray_client_ = std::make_unique<SystemTrayClient>();
// Makes mojo request to TabletModeController in ash.
tablet_mode_client_ = std::make_unique<TabletModeClient>();
tablet_mode_client_->Init();
volume_controller_ = std::make_unique<VolumeController>();
vpn_list_forwarder_ = std::make_unique<VpnListForwarder>();
wallpaper_controller_client_ = std::make_unique<WallpaperControllerClient>();
wallpaper_controller_client_->Init();
chrome_launcher_controller_initializer_ =
std::make_unique<internal::ChromeLauncherControllerInitializer>();
ui::SelectFileDialog::SetFactory(new SelectFileDialogExtensionFactory);
#if BUILDFLAG(ENABLE_WAYLAND_SERVER)
exo_parts_ = ExoParts::CreateIfNecessary();
#endif
PushProcessCreationTimeToAsh();
}
void ChromeBrowserMainExtraPartsAsh::PostProfileInit() {
if (chromeos::GetAshConfig() == ash::Config::MASH)
chrome_shell_content_state_ = std::make_unique<ChromeShellContentState>();
cast_config_client_media_router_ =
std::make_unique<CastConfigClientMediaRouter>();
login_screen_client_ = std::make_unique<LoginScreenClient>();
media_client_ = std::make_unique<MediaClient>();
// Do not create a NetworkPortalNotificationController for tests since the
// NetworkPortalDetector instance may be replaced.
if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
::switches::kTestType)) {
chromeos::NetworkPortalDetector* detector =
chromeos::network_portal_detector::GetInstance();
CHECK(detector);
network_portal_notification_controller_ =
std::make_unique<chromeos::NetworkPortalNotificationController>(
detector);
}
// TODO(mash): Port TabScrubber.
if (chromeos::GetAshConfig() != ash::Config::MASH) {
// Initialize TabScrubber after the Ash Shell has been initialized.
TabScrubber::GetInstance();
}
if (chromeos::NetworkHandler::IsInitialized() &&
chromeos::NetworkHandler::Get()->auto_connect_handler()) {
auto_connect_notifier_ = std::make_unique<AutoConnectNotifier>(
ProfileManager::GetActiveUserProfile(),
chromeos::NetworkHandler::Get()->network_connection_handler(),
chromeos::NetworkHandler::Get()->network_state_handler(),
chromeos::NetworkHandler::Get()->auto_connect_handler());
}
#if BUILDFLAG(ENABLE_CROS_ASSISTANT)
assistant_client_ = std::make_unique<AssistantClient>();
#endif
}
void ChromeBrowserMainExtraPartsAsh::PostBrowserStart() {
data_promo_notification_ = std::make_unique<DataPromoNotification>();
if (ash::switches::IsNightLightEnabled()) {
night_light_client_ = std::make_unique<NightLightClient>(
g_browser_process->system_request_context());
night_light_client_->Start();
}
}
void ChromeBrowserMainExtraPartsAsh::PostMainMessageLoopRun() {
#if BUILDFLAG(ENABLE_WAYLAND_SERVER)
// ExoParts uses state from ash, delete it before ash so that exo can
// uninstall correctly.
exo_parts_.reset();
#endif
night_light_client_.reset();
data_promo_notification_.reset();
#if BUILDFLAG(ENABLE_CROS_ASSISTANT)
assistant_client_.reset();
#endif
chrome_launcher_controller_initializer_.reset();
wallpaper_controller_client_.reset();
vpn_list_forwarder_.reset();
volume_controller_.reset();
system_tray_client_.reset();
session_controller_client_.reset();
chrome_new_window_client_.reset();
network_portal_notification_controller_.reset();
media_client_.reset();
login_screen_client_.reset();
ime_controller_client_.reset();
auto_connect_notifier_.reset();
cast_config_client_media_router_.reset();
accessibility_controller_client_.reset();
// AppListClientImpl indirectly holds WebContents for answer card and
// needs to be released before destroying the profile.
app_list_client_.reset();
ash_shell_init_.reset();
chromeos::NetworkConnect::Shutdown();
network_connect_delegate_.reset();
// Views code observes TabletModeClient and may not be destroyed until
// ash::Shell is so destroy |tablet_mode_client_| after ash::Shell.
tablet_mode_client_.reset();
}
| 4,095 |
569 | package us.koller.cameraroll.ui;
import android.annotation.SuppressLint;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.drawable.Animatable;
import android.graphics.drawable.AnimatedVectorDrawable;
import android.graphics.drawable.Drawable;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.annotation.RequiresApi;
import android.support.design.widget.BottomSheetDialog;
import android.support.v4.content.ContextCompat;
import android.support.v4.content.LocalBroadcastManager;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.view.ViewTreeObserver;
import android.view.WindowInsets;
import android.widget.Button;
import android.widget.Toast;
import java.io.IOException;
import java.io.OutputStream;
import us.koller.cameraroll.R;
import us.koller.cameraroll.data.Settings;
import us.koller.cameraroll.data.fileOperations.FileOperation;
import us.koller.cameraroll.data.provider.retriever.MediaStoreRetriever;
import us.koller.cameraroll.ui.widget.CropImageView;
import us.koller.cameraroll.util.ExifUtil;
import us.koller.cameraroll.util.InfoUtil;
import us.koller.cameraroll.util.MediaType;
import us.koller.cameraroll.util.Util;
public class EditImageActivity extends AppCompatActivity {
public static final String IMAGE_PATH = "IMAGE_PATH";
public static final String IMAGE_VIEW_STATE = "IMAGE_VIEW_STATE";
public static final int STORAGE_FRAMEWORK_REQUEST_CODE = 69;
public static final int JPEG_QUALITY = 90;
private String imagePath;
private CropImageView.Result result;
private ExifUtil.ExifItem[] exifData;
@Override
protected void onCreate(@Nullable final Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_edit_image);
Intent intent = getIntent();
if (intent == null) {
return;
}
final Toolbar toolbar = findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setTitle("");
actionBar.setDisplayHomeAsUpEnabled(true);
}
Uri uri = intent.getData();
if (uri == null) {
finish();
return;
}
String mimeType = MediaType.getMimeType(this, uri);
if (!(MediaType.checkImageMimeType(mimeType) || MediaType.checkRAWMimeType(mimeType))) {
Toast.makeText(this, R.string.editing_file_format_not_supported, Toast.LENGTH_SHORT).show();
finish();
}
imagePath = intent.getStringExtra(IMAGE_PATH);
final CropImageView imageView = findViewById(R.id.cropImageView);
CropImageView.State state = null;
if (savedInstanceState != null) {
state = (CropImageView.State) savedInstanceState.getSerializable(IMAGE_VIEW_STATE);
}
imageView.loadImage(uri, state);
final Button doneButton = findViewById(R.id.done_button);
doneButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
done(view);
}
});
//setting window insets manually
final ViewGroup rootView = findViewById(R.id.root_view);
final View actionArea = findViewById(R.id.action_area);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT_WATCH) {
rootView.setOnApplyWindowInsetsListener(new View.OnApplyWindowInsetsListener() {
@Override
@RequiresApi(api = Build.VERSION_CODES.KITKAT_WATCH)
public WindowInsets onApplyWindowInsets(View view, WindowInsets insets) {
// clear this listener so insets aren't re-applied
rootView.setOnApplyWindowInsetsListener(null);
toolbar.setPadding(toolbar.getPaddingStart() + insets.getSystemWindowInsetLeft(),
toolbar.getPaddingTop() + insets.getSystemWindowInsetTop(),
toolbar.getPaddingEnd() + insets.getSystemWindowInsetRight(),
toolbar.getPaddingBottom());
actionArea.setPadding(actionArea.getPaddingStart() + insets.getSystemWindowInsetLeft(),
actionArea.getPaddingTop(),
actionArea.getPaddingEnd() + insets.getSystemWindowInsetRight(),
actionArea.getPaddingBottom() + insets.getSystemWindowInsetBottom());
imageView.setPadding(imageView.getPaddingStart() + insets.getSystemWindowInsetLeft(),
imageView.getPaddingTop()/* + insets.getSystemWindowInsetTop()*/,
imageView.getPaddingEnd() + insets.getSystemWindowInsetRight(),
imageView.getPaddingBottom()/* + insets.getSystemWindowInsetBottom()*/);
return insets.consumeSystemWindowInsets();
}
});
} else {
rootView.getViewTreeObserver()
.addOnGlobalLayoutListener(
new ViewTreeObserver.OnGlobalLayoutListener() {
@Override
public void onGlobalLayout() {
rootView.getViewTreeObserver().removeOnGlobalLayoutListener(this);
// hacky way of getting window insets on pre-Lollipop
// somewhat works...
int[] screenSize = Util.getScreenSize(EditImageActivity.this);
int[] windowInsets = new int[]{
Math.abs(screenSize[0] - rootView.getLeft()),
Math.abs(screenSize[1] - rootView.getTop()),
Math.abs(screenSize[2] - rootView.getRight()),
Math.abs(screenSize[3] - rootView.getBottom())};
toolbar.setPadding(toolbar.getPaddingStart() + windowInsets[0],
toolbar.getPaddingTop() + windowInsets[1],
toolbar.getPaddingEnd() + windowInsets[2],
toolbar.getPaddingBottom());
actionArea.setPadding(actionArea.getPaddingStart() + windowInsets[0],
actionArea.getPaddingTop(),
actionArea.getPaddingEnd() + windowInsets[2],
actionArea.getPaddingBottom() + windowInsets[3]);
imageView.setPadding(imageView.getPaddingStart() + windowInsets[0],
imageView.getPaddingTop()/* + windowInsets[1]*/,
imageView.getPaddingEnd() + windowInsets[2],
imageView.getPaddingBottom()/* + windowInsets[3]*/);
}
});
}
imageView.getViewTreeObserver().addOnGlobalLayoutListener(
new ViewTreeObserver.OnGlobalLayoutListener() {
@Override
public void onGlobalLayout() {
imageView.getViewTreeObserver().removeOnGlobalLayoutListener(this);
imageView.setPadding(imageView.getPaddingStart(),
imageView.getPaddingTop() + toolbar.getHeight(),
imageView.getPaddingEnd(),
imageView.getPaddingBottom() + actionArea.getHeight());
}
});
//needed to achieve transparent navBar
getWindow().getDecorView().setSystemUiVisibility(
View.SYSTEM_UI_FLAG_LAYOUT_STABLE
| View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION
| View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN
| View.SYSTEM_UI_FLAG_IMMERSIVE);
}
public void done(View v) {
CropImageView cropImageView = findViewById(R.id.cropImageView);
final ExifUtil.ExifItem[] exifData = ExifUtil.retrieveExifData(this, cropImageView.getImageUri());
cropImageView.getCroppedBitmap(new CropImageView.OnResultListener() {
@Override
public void onResult(final CropImageView.Result result) {
final BottomSheetDialog dialog = new BottomSheetDialog(EditImageActivity.this);
@SuppressLint("InflateParams")
View sheetView = EditImageActivity.this.getLayoutInflater()
.inflate(R.layout.edit_image_export_dialog, null);
View save = sheetView.findViewById(R.id.save);
View export = sheetView.findViewById(R.id.export);
View.OnClickListener clickListener = new View.OnClickListener() {
@Override
public void onClick(View view) {
dialog.dismiss();
switch (view.getId()) {
case R.id.save:
saveCroppedImage(result.getImageUri(), result.getCroppedBitmap(), exifData);
break;
case R.id.export:
EditImageActivity.this.result = result;
EditImageActivity.this.exifData = exifData;
Uri imageUri = getIntent().getData();
String filename = InfoUtil.retrieveFileName(EditImageActivity.this, imageUri);
if (filename == null) {
filename = "image_edit.jpeg";
}
Intent intent = new Intent(Intent.ACTION_CREATE_DOCUMENT);
intent.setType("image/jpeg");
intent.putExtra(Intent.EXTRA_TITLE, filename);
startActivityForResult(intent, STORAGE_FRAMEWORK_REQUEST_CODE);
break;
default:
break;
}
}
};
save.setOnClickListener(clickListener);
if (imagePath == null) {
save.setEnabled(false);
save.setAlpha(0.5f);
}
export.setOnClickListener(clickListener);
dialog.setContentView(sheetView);
dialog.show();
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
switch (requestCode) {
case STORAGE_FRAMEWORK_REQUEST_CODE:
if (resultCode == RESULT_OK) {
Uri uri = data.getData();
saveCroppedImage(uri, result.getCroppedBitmap(), exifData);
}
break;
default:
break;
}
}
private void saveCroppedImage(final Uri uri, final Bitmap bitmap, final ExifUtil.ExifItem[] exifData) {
if (uri == null || bitmap == null) {
Toast.makeText(EditImageActivity.this, R.string.error, Toast.LENGTH_SHORT).show();
return;
}
final String newPath = MediaStoreRetriever.getPathForUri(EditImageActivity.this, uri);
AsyncTask.execute(new Runnable() {
@Override
public void run() {
try {
OutputStream outputStream;
try {
outputStream = getContentResolver().openOutputStream(uri);
} catch (SecurityException e) {
outputStream = null;
}
if (outputStream != null) {
bitmap.compress(Bitmap.CompressFormat.JPEG, JPEG_QUALITY, outputStream);
outputStream.flush();
outputStream.close();
} else {
return;
}
//save Exif-Data
if (exifData != null) {
ExifUtil.saveExifData(newPath, exifData);
}
//scan path
if (imagePath != null) {
FileOperation.Util.scanPaths(EditImageActivity.this, new String[]{newPath},
new FileOperation.Util.MediaScannerCallback() {
@Override
public void onAllPathsScanned() {
Intent intent = new Intent(FileOperation.RESULT_DONE);
LocalBroadcastManager.getInstance(EditImageActivity.this).sendBroadcast(intent);
}
});
}
EditImageActivity.this.runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(EditImageActivity.this, R.string.success, Toast.LENGTH_SHORT).show();
finish();
}
});
} catch (IOException e) {
e.printStackTrace();
}
}
});
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.image_edit, menu);
MenuItem rotate = menu.findItem(R.id.rotate);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
AnimatedVectorDrawable avd = (AnimatedVectorDrawable)
ContextCompat.getDrawable(this, R.drawable.ic_rotate_90_avd);
rotate.setIcon(avd);
}
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
final CropImageView imageView = findViewById(R.id.cropImageView);
switch (item.getItemId()) {
case android.R.id.home:
onBackPressed();
break;
case R.id.rotate:
boolean showAnimations = Settings.getInstance(this).showAnimations();
Drawable d = item.getIcon();
if (showAnimations && d instanceof Animatable && !((Animatable) d).isRunning()) {
((Animatable) d).start();
}
rotate90Degrees();
break;
case R.id.done:
done(item.getActionView());
break;
case R.id.aspect_ratio_free:
item.setChecked(true);
imageView.setFreeAspectRatio();
break;
case R.id.aspect_ratio_original:
item.setChecked(true);
imageView.setOriginalAspectRatioFixed();
break;
case R.id.aspect_ratio_square:
item.setChecked(true);
imageView.setAspectRatio(1.0);
break;
case R.id.aspect_ratio_3_2:
item.setChecked(true);
imageView.setAspectRatio(3.0 / 2.0);
break;
case R.id.aspect_ratio_4_3:
item.setChecked(true);
imageView.setAspectRatio(4.0 / 3.0);
break;
case R.id.aspect_ratio_16_9:
item.setChecked(true);
imageView.setAspectRatio(16.0 / 9.0);
break;
case R.id.restore:
imageView.restore();
break;
default:
break;
}
return super.onOptionsItemSelected(item);
}
private void rotate90Degrees() {
CropImageView imageView = findViewById(R.id.cropImageView);
imageView.rotate90Degree();
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
CropImageView imageView = findViewById(R.id.cropImageView);
outState.putSerializable(IMAGE_VIEW_STATE, imageView.getCropImageViewState());
}
}
| 9,003 |
828 | <filename>hasor-db/src/main/java/net/hasor/db/dal/dynamic/nodes/ArrayDynamicSql.java<gh_stars>100-1000
/*
* Copyright 2008-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.hasor.db.dal.dynamic.nodes;
import net.hasor.db.dal.dynamic.BuilderContext;
import net.hasor.db.dal.dynamic.DynamicSql;
import net.hasor.db.dal.dynamic.QuerySqlBuilder;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* 多个 SQL 节点组合成一个 SqlNode
* @author 赵永春 (<EMAIL>)
* @version : 2021-05-24
*/
public class ArrayDynamicSql implements DynamicSql {
/** 子节点 */
protected List<DynamicSql> subNodes = new ArrayList<>();
/** 追加子节点 */
public void addChildNode(DynamicSql node) {
this.subNodes.add(node);
}
/** 最后一个节点是文本 */
public boolean lastIsText() {
return this.subNodes.get(this.subNodes.size() - 1) instanceof TextDynamicSql;
}
/** 追加文本 */
public void appendText(String text) {
if (!this.subNodes.isEmpty()) {
DynamicSql dynamicSql = this.subNodes.get(this.subNodes.size() - 1);
if (dynamicSql instanceof TextDynamicSql) {
((TextDynamicSql) dynamicSql).appendText(text);
return;
}
}
this.addChildNode(new TextDynamicSql(text));
}
@Override
public boolean isHavePlaceholder() {
for (DynamicSql dynamicSql : this.subNodes) {
if (dynamicSql.isHavePlaceholder()) {
return true;
}
}
return false;
}
@Override
public void buildQuery(BuilderContext builderContext, QuerySqlBuilder querySqlBuilder) throws SQLException {
for (int i = 0; i < this.subNodes.size(); i++) {
DynamicSql dynamicSql = this.subNodes.get(i);
if (visitItem(i, dynamicSql, builderContext, querySqlBuilder)) {
dynamicSql.buildQuery(builderContext, querySqlBuilder);
}
}
}
protected boolean visitItem(int i, DynamicSql dynamicSql, BuilderContext builderContext, QuerySqlBuilder querySqlBuilder) {
return true;
}
} | 1,106 |
852 | <gh_stars>100-1000
/*
<NAME>, <NAME>, <NAME> (C) JINR/Dubna
<EMAIL>, <EMAIL>, <EMAIL>
November. 2, 2005
*/
#ifndef HADRONDECAYER_INCLUDED
#define HADRONDECAYER_INCLUDED
#include "DatabasePDG.h"
#include "Particle.h"
double GetDecayTime(const Particle &p, double weakDecayLimit);
void Decay(List_t &output, Particle &p, ParticleAllocator &allocator, DatabasePDG *database);
#endif
| 342 |
21,684 | // Copyright 2010-2014 RethinkDB, all rights reserved.
#include "rdb_protocol/geo/distances.hpp"
#include "rdb_protocol/geo/ellipsoid.hpp"
#include "rdb_protocol/geo/exceptions.hpp"
#include "rdb_protocol/geo/geojson.hpp"
#include "rdb_protocol/geo/geo_visitor.hpp"
#include "rdb_protocol/geo/karney/geodesic.h"
#include "rdb_protocol/geo/s2/s2.h"
#include "rdb_protocol/geo/s2/s2latlng.h"
#include "rdb_protocol/geo/s2/s2latlngrect.h"
#include "rdb_protocol/geo/s2/s2polygon.h"
#include "rdb_protocol/geo/s2/s2polyline.h"
double geodesic_distance(const lon_lat_point_t &p1,
const lon_lat_point_t &p2,
const ellipsoid_spec_t &e) {
// Use Karney's algorithm
struct geod_geodesic g;
geod_init(&g, e.equator_radius(), e.flattening());
double dist;
geod_inverse(&g, p1.latitude, p1.longitude, p2.latitude, p2.longitude, &dist, NULL, NULL);
return dist;
}
double geodesic_distance(const geo::S2Point &p,
const ql::datum_t &g,
const ellipsoid_spec_t &e) {
class distance_estimator_t : public s2_geo_visitor_t<double> {
public:
distance_estimator_t(
lon_lat_point_t r, const geo::S2Point &r_s2, const ellipsoid_spec_t &_e)
: ref_(r), ref_s2_(r_s2), e_(_e) { }
double on_point(const geo::S2Point &point) {
lon_lat_point_t llpoint =
lon_lat_point_t(geo::S2LatLng::Longitude(point).degrees(),
geo::S2LatLng::Latitude(point).degrees());
return geodesic_distance(ref_, llpoint, e_);
}
double on_line(const geo::S2Polyline &line) {
// This sometimes over-estimates large distances, because the
// projection assumes spherical rather than ellipsoid geometry.
int next_vertex;
geo::S2Point prj = line.Project(ref_s2_, &next_vertex);
if (prj == ref_s2_) {
// ref_ is on the line
return 0.0;
} else {
lon_lat_point_t llprj =
lon_lat_point_t(geo::S2LatLng::Longitude(prj).degrees(),
geo::S2LatLng::Latitude(prj).degrees());
return geodesic_distance(ref_, llprj, e_);
}
}
double on_polygon(const geo::S2Polygon &polygon) {
// This sometimes over-estimates large distances, because the
// projection assumes spherical rather than ellipsoid geometry.
geo::S2Point prj = polygon.Project(ref_s2_);
if (prj == ref_s2_) {
// ref_ is inside/on the polygon
return 0.0;
} else {
lon_lat_point_t llprj =
lon_lat_point_t(geo::S2LatLng::Longitude(prj).degrees(),
geo::S2LatLng::Latitude(prj).degrees());
return geodesic_distance(ref_, llprj, e_);
}
}
double on_latlngrect(const geo::S2LatLngRect &) {
throw geo_exception_t("Distance calculation not implemented on LatLngRect.");
}
lon_lat_point_t ref_;
const geo::S2Point &ref_s2_;
const ellipsoid_spec_t &e_;
};
distance_estimator_t estimator(
lon_lat_point_t(geo::S2LatLng::Longitude(p).degrees(),
geo::S2LatLng::Latitude(p).degrees()),
p, e);
return visit_geojson(&estimator, g);
}
lon_lat_point_t geodesic_point_at_dist(const lon_lat_point_t &p,
double dist,
double azimuth,
const ellipsoid_spec_t &e) {
// Use Karney's algorithm
struct geod_geodesic g;
geod_init(&g, e.equator_radius(), e.flattening());
double lat, lon;
geod_direct(&g, p.latitude, p.longitude, azimuth, dist, &lat, &lon, NULL);
return lon_lat_point_t(lon, lat);
}
dist_unit_t parse_dist_unit(const std::string &s) {
if (s == "m") {
return dist_unit_t::M;
} else if (s == "km") {
return dist_unit_t::KM;
} else if (s == "mi") {
return dist_unit_t::MI;
} else if (s == "nm") {
return dist_unit_t::NM;
} else if (s == "ft") {
return dist_unit_t::FT;
} else {
throw geo_exception_t("Unrecognized distance unit `" + s + "`.");
}
}
double unit_to_meters(dist_unit_t u) {
switch (u) {
case dist_unit_t::M: return 1.0;
case dist_unit_t::KM: return 1000.0;
case dist_unit_t::MI: return 1609.344;
case dist_unit_t::NM: return 1852.0;
case dist_unit_t::FT: return 0.3048;
default: unreachable();
}
}
double convert_dist_unit(double d, dist_unit_t from, dist_unit_t to) {
// First convert to meters
double conv_factor = unit_to_meters(from);
// Then to the result unit
conv_factor /= unit_to_meters(to);
return d * conv_factor;
}
| 2,643 |
4,857 | <gh_stars>1000+
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.stream.Collectors;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
/**
* This class provides API to access backup system table<br>
* Backup system table schema:<br>
* <p>
* <ul>
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL
* timestamp]</li>
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file
* name</li>
* </ul>
* </p>
*/
@InterfaceAudience.Private
public final class BackupSystemTable implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(BackupSystemTable.class);
static class WALItem {
String backupId;
String walFile;
String backupRoot;
WALItem(String backupId, String walFile, String backupRoot) {
this.backupId = backupId;
this.walFile = walFile;
this.backupRoot = backupRoot;
}
public String getBackupId() {
return backupId;
}
public String getWalFile() {
return walFile;
}
public String getBackupRoot() {
return backupRoot;
}
@Override
public String toString() {
return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + Path.SEPARATOR + walFile;
}
}
/**
* Backup system table (main) name
*/
private TableName tableName;
/**
* Backup System table name for bulk loaded files. We keep all bulk loaded file references in a
* separate table because we have to isolate general backup operations: create, merge etc from
* activity of RegionObserver, which controls process of a bulk loading
* {@link org.apache.hadoop.hbase.backup.BackupObserver}
*/
private TableName bulkLoadTableName;
/**
* Stores backup sessions (contexts)
*/
final static byte[] SESSIONS_FAMILY = Bytes.toBytes("session");
/**
* Stores other meta
*/
final static byte[] META_FAMILY = Bytes.toBytes("meta");
final static byte[] BULK_LOAD_FAMILY = Bytes.toBytes("bulk");
/**
* Connection to HBase cluster, shared among all instances
*/
private final Connection connection;
private final static String BACKUP_INFO_PREFIX = "session:";
private final static String START_CODE_ROW = "startcode:";
private final static byte[] ACTIVE_SESSION_ROW = Bytes.toBytes("activesession:");
private final static byte[] ACTIVE_SESSION_COL = Bytes.toBytes("c");
private final static byte[] ACTIVE_SESSION_YES = Bytes.toBytes("yes");
private final static byte[] ACTIVE_SESSION_NO = Bytes.toBytes("no");
private final static String INCR_BACKUP_SET = "incrbackupset:";
private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
private final static String RS_LOG_TS_PREFIX = "rslogts:";
private final static String BULK_LOAD_PREFIX = "bulk:";
private final static byte[] BULK_LOAD_PREFIX_BYTES = Bytes.toBytes(BULK_LOAD_PREFIX);
private final static byte[] DELETE_OP_ROW = Bytes.toBytes("delete_op_row");
private final static byte[] MERGE_OP_ROW = Bytes.toBytes("merge_op_row");
final static byte[] TBL_COL = Bytes.toBytes("tbl");
final static byte[] FAM_COL = Bytes.toBytes("fam");
final static byte[] PATH_COL = Bytes.toBytes("path");
final static byte[] STATE_COL = Bytes.toBytes("state");
// the two states a bulk loaded file can be
final static byte[] BL_PREPARE = Bytes.toBytes("R");
final static byte[] BL_COMMIT = Bytes.toBytes("D");
private final static String SET_KEY_PREFIX = "backupset:";
// separator between BULK_LOAD_PREFIX and ordinals
private final static String BLK_LD_DELIM = ":";
private final static byte[] EMPTY_VALUE = new byte[] {};
// Safe delimiter in a string
private final static String NULL = "\u0000";
public BackupSystemTable(Connection conn) throws IOException {
this.connection = conn;
Configuration conf = this.connection.getConfiguration();
tableName = BackupSystemTable.getTableName(conf);
bulkLoadTableName = BackupSystemTable.getTableNameForBulkLoadedData(conf);
checkSystemTable();
}
private void checkSystemTable() throws IOException {
try (Admin admin = connection.getAdmin()) {
verifyNamespaceExists(admin);
Configuration conf = connection.getConfiguration();
if (!admin.tableExists(tableName)) {
TableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(conf);
admin.createTable(backupHTD);
}
if (!admin.tableExists(bulkLoadTableName)) {
TableDescriptor blHTD = BackupSystemTable.getSystemTableForBulkLoadedDataDescriptor(conf);
admin.createTable(blHTD);
}
waitForSystemTable(admin, tableName);
waitForSystemTable(admin, bulkLoadTableName);
}
}
private void verifyNamespaceExists(Admin admin) throws IOException {
String namespaceName = tableName.getNamespaceAsString();
NamespaceDescriptor ns = NamespaceDescriptor.create(namespaceName).build();
NamespaceDescriptor[] list = admin.listNamespaceDescriptors();
boolean exists = false;
for (NamespaceDescriptor nsd : list) {
if (nsd.getName().equals(ns.getName())) {
exists = true;
break;
}
}
if (!exists) {
admin.createNamespace(ns);
}
}
private void waitForSystemTable(Admin admin, TableName tableName) throws IOException {
// Return fast if the table is available and avoid a log message
if (admin.tableExists(tableName) && admin.isTableAvailable(tableName)) {
return;
}
long TIMEOUT = 60000;
long startTime = EnvironmentEdgeManager.currentTime();
LOG.debug("Backup table {} is not present and available, waiting for it to become so",
tableName);
while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
}
if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) {
throw new IOException(
"Failed to create backup system table " + tableName + " after " + TIMEOUT + "ms");
}
}
LOG.debug("Backup table {} exists and available", tableName);
}
@Override
public void close() {
// do nothing
}
/**
* Updates status (state) of a backup session in backup system table table
* @param info backup info
* @throws IOException exception
*/
public void updateBackupInfo(BackupInfo info) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("update backup status in backup system table for: " + info.getBackupId()
+ " set status=" + info.getState());
}
try (Table table = connection.getTable(tableName)) {
Put put = createPutForBackupInfo(info);
table.put(put);
}
}
/*
* @param backupId the backup Id
* @return Map of rows to path of bulk loaded hfile
*/
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res = null;
Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
while ((res = scanner.next()) != null) {
res.advance();
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
}
}
}
return map;
}
}
/*
* Used during restore
* @param backupId the backup Id
* @param sTableList List of tables
* @return array of Map of family to List of Paths
*/
public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
throws IOException {
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res = null;
while ((res = scanner.next()) != null) {
res.advance();
TableName tbl = null;
byte[] fam = null;
String path = null;
for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
BackupSystemTable.TBL_COL.length) == 0) {
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
fam = CellUtil.cloneValue(cell);
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
path = Bytes.toString(CellUtil.cloneValue(cell));
}
}
int srcIdx = IncrementalTableBackupClient.getIndex(tbl, sTableList);
if (srcIdx == -1) {
// the table is not among the query
continue;
}
if (mapForSrc[srcIdx] == null) {
mapForSrc[srcIdx] = new TreeMap<>(Bytes.BYTES_COMPARATOR);
}
List<Path> files;
if (!mapForSrc[srcIdx].containsKey(fam)) {
files = new ArrayList<Path>();
mapForSrc[srcIdx].put(fam, files);
} else {
files = mapForSrc[srcIdx].get(fam);
}
files.add(new Path(path));
if (LOG.isDebugEnabled()) {
LOG.debug("found bulk loaded file : " + tbl + " " + Bytes.toString(fam) + " " + path);
}
}
return mapForSrc;
}
}
/**
* Deletes backup status from backup system table table
* @param backupId backup id
* @throws IOException exception
*/
public void deleteBackupInfo(String backupId) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("delete backup status in backup system table for " + backupId);
}
try (Table table = connection.getTable(tableName)) {
Delete del = createDeleteForBackupInfo(backupId);
table.delete(del);
}
}
/*
* For postBulkLoadHFile() hook.
* @param tabName table name
* @param region the region receiving hfile
* @param finalPaths family and associated hfiles
*/
public void writePathsPostBulkLoad(TableName tabName, byte[] region,
Map<byte[], List<Path>> finalPaths) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
+ " entries");
}
try (Table table = connection.getTable(bulkLoadTableName)) {
List<Put> puts = BackupSystemTable.createPutForCommittedBulkload(tabName, region, finalPaths);
table.put(puts);
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
}
}
/*
* For preCommitStoreFile() hook
* @param tabName table name
* @param region the region receiving hfile
* @param family column family
* @param pairs list of paths for hfiles
*/
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
final List<Pair<Path, Path>> pairs) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
}
try (Table table = connection.getTable(bulkLoadTableName)) {
List<Put> puts =
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
table.put(puts);
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
}
}
/*
* Removes rows recording bulk loaded hfiles from backup table
* @param lst list of table names
* @param rows the rows to be deleted
*/
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
try (Table table = connection.getTable(bulkLoadTableName)) {
List<Delete> lstDels = new ArrayList<>();
for (byte[] row : rows) {
Delete del = new Delete(row);
lstDels.add(del);
LOG.debug("orig deleting the row: " + Bytes.toString(row));
}
table.delete(lstDels);
LOG.debug("deleted " + rows.size() + " original bulkload rows");
}
}
/*
* Reads the rows from backup table recording bulk loaded hfiles
* @param tableList list of table names
* @return The keys of the Map are table, region and column family. Value of the map reflects
* whether the hfile was recorded by preCommitStoreFile hook (true)
*/
public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>>
readBulkloadRows(List<TableName> tableList) throws IOException {
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = new HashMap<>();
List<byte[]> rows = new ArrayList<>();
for (TableName tTable : tableList) {
Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res = null;
while ((res = scanner.next()) != null) {
res.advance();
String fam = null;
String path = null;
boolean raw = false;
byte[] row;
String region = null;
for (Cell cell : res.listCells()) {
row = CellUtil.cloneRow(cell);
rows.add(row);
String rowStr = Bytes.toString(row);
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
fam = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
path = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
BackupSystemTable.STATE_COL.length) == 0) {
byte[] state = CellUtil.cloneValue(cell);
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
raw = true;
} else {
raw = false;
}
}
}
if (map.get(tTable) == null) {
map.put(tTable, new HashMap<>());
tblMap = map.get(tTable);
}
if (tblMap.get(region) == null) {
tblMap.put(region, new HashMap<>());
}
Map<String, List<Pair<String, Boolean>>> famMap = tblMap.get(region);
if (famMap.get(fam) == null) {
famMap.put(fam, new ArrayList<>());
}
famMap.get(fam).add(new Pair<>(path, raw));
LOG.debug("found orig " + path + " for " + fam + " of table " + region);
}
}
}
return new Pair<>(map, rows);
}
/*
* @param sTableList List of tables
* @param maps array of Map of family to List of Paths
* @param backupId the backup Id
*/
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
String backupId) throws IOException {
try (Table table = connection.getTable(bulkLoadTableName)) {
long ts = EnvironmentEdgeManager.currentTime();
int cnt = 0;
List<Put> puts = new ArrayList<>();
for (int idx = 0; idx < maps.length; idx++) {
Map<byte[], List<Path>> map = maps[idx];
TableName tn = sTableList.get(idx);
if (map == null) {
continue;
}
for (Map.Entry<byte[], List<Path>> entry : map.entrySet()) {
byte[] fam = entry.getKey();
List<Path> paths = entry.getValue();
for (Path p : paths) {
Put put = BackupSystemTable.createPutForBulkLoadedFile(tn, fam, p.toString(), backupId,
ts, cnt++);
puts.add(put);
}
}
}
if (!puts.isEmpty()) {
table.put(puts);
}
}
}
/**
* Reads backup status object (instance of backup info) from backup system table table
* @param backupId backup id
* @return Current status of backup session or null
*/
public BackupInfo readBackupInfo(String backupId) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("read backup status from backup system table for: " + backupId);
}
try (Table table = connection.getTable(tableName)) {
Get get = createGetForBackupInfo(backupId);
Result res = table.get(get);
if (res.isEmpty()) {
return null;
}
return resultToBackupInfo(res);
}
}
/**
* Read the last backup start code (timestamp) of last successful backup. Will return null if
* there is no start code stored on hbase or the value is of length 0. These two cases indicate
* there is no successful backup completed so far.
* @param backupRoot directory path to backup destination
* @return the timestamp of last successful backup
* @throws IOException exception
*/
public String readBackupStartCode(String backupRoot) throws IOException {
LOG.trace("read backup start code from backup system table");
try (Table table = connection.getTable(tableName)) {
Get get = createGetForStartCode(backupRoot);
Result res = table.get(get);
if (res.isEmpty()) {
return null;
}
Cell cell = res.listCells().get(0);
byte[] val = CellUtil.cloneValue(cell);
if (val.length == 0) {
return null;
}
return new String(val);
}
}
/**
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
* @param startCode start code
* @param backupRoot root directory path to backup
* @throws IOException exception
*/
public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("write backup start code to backup system table " + startCode);
}
try (Table table = connection.getTable(tableName)) {
Put put = createPutForStartCode(startCode.toString(), backupRoot);
table.put(put);
}
}
/**
* Exclusive operations are: create, delete, merge
* @throws IOException if a table operation fails or an active backup exclusive operation is
* already underway
*/
public void startBackupExclusiveOperation() throws IOException {
LOG.debug("Start new backup exclusive operation");
try (Table table = connection.getTable(tableName)) {
Put put = createPutForStartBackupSession();
// First try to put if row does not exist
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifNotExists().thenPut(put)) {
// Row exists, try to put if value == ACTIVE_SESSION_NO
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
throw new ExclusiveOperationException();
}
}
}
}
private Put createPutForStartBackupSession() {
Put put = new Put(ACTIVE_SESSION_ROW);
put.addColumn(SESSIONS_FAMILY, ACTIVE_SESSION_COL, ACTIVE_SESSION_YES);
return put;
}
public void finishBackupExclusiveOperation() throws IOException {
LOG.debug("Finish backup exclusive operation");
try (Table table = connection.getTable(tableName)) {
Put put = createPutForStopBackupSession();
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)) {
throw new IOException("There is no active backup exclusive operation");
}
}
}
private Put createPutForStopBackupSession() {
Put put = new Put(ACTIVE_SESSION_ROW);
put.addColumn(SESSIONS_FAMILY, ACTIVE_SESSION_COL, ACTIVE_SESSION_NO);
return put;
}
/**
* Get the Region Servers log information after the last log roll from backup system table.
* @param backupRoot root directory path to backup
* @return RS log info
* @throws IOException exception
*/
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
throws IOException {
LOG.trace("read region server last roll log result to backup system table");
Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);
try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res;
HashMap<String, Long> rsTimestampMap = new HashMap<>();
while ((res = scanner.next()) != null) {
res.advance();
Cell cell = res.current();
byte[] row = CellUtil.cloneRow(cell);
String server = getServerNameForReadRegionServerLastLogRollResult(row);
byte[] data = CellUtil.cloneValue(cell);
rsTimestampMap.put(server, Bytes.toLong(data));
}
return rsTimestampMap;
}
}
/**
* Writes Region Server last roll log result (timestamp) to backup system table table
* @param server Region Server name
* @param ts last log timestamp
* @param backupRoot root directory path to backup
* @throws IOException exception
*/
public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
throws IOException {
LOG.trace("write region server last roll log result to backup system table");
try (Table table = connection.getTable(tableName)) {
Put put = createPutForRegionServerLastLogRollResult(server, ts, backupRoot);
table.put(put);
}
}
/**
* Get all completed backup information (in desc order by time)
* @param onlyCompleted true, if only successfully completed sessions
* @return history info of BackupCompleteData
* @throws IOException exception
*/
public ArrayList<BackupInfo> getBackupHistory(boolean onlyCompleted) throws IOException {
LOG.trace("get backup history from backup system table");
BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY;
ArrayList<BackupInfo> list = getBackupInfos(state);
return BackupUtils.sortHistoryListDesc(list);
}
/**
* Get all backups history
* @return list of backup info
* @throws IOException if getting the backup history fails
*/
public List<BackupInfo> getBackupHistory() throws IOException {
return getBackupHistory(false);
}
/**
* Get first n backup history records
* @param n number of records, if n== -1 - max number is ignored
* @return list of records
* @throws IOException if getting the backup history fails
*/
public List<BackupInfo> getHistory(int n) throws IOException {
List<BackupInfo> history = getBackupHistory();
if (n == -1 || history.size() <= n) {
return history;
}
return Collections.unmodifiableList(history.subList(0, n));
}
/**
* Get backup history records filtered by list of filters.
* @param n max number of records, if n == -1 , then max number is ignored
* @param filters list of filters
* @return backup records
* @throws IOException if getting the backup history fails
*/
public List<BackupInfo> getBackupHistory(int n, BackupInfo.Filter... filters) throws IOException {
if (filters.length == 0) {
return getHistory(n);
}
List<BackupInfo> history = getBackupHistory();
List<BackupInfo> result = new ArrayList<>();
for (BackupInfo bi : history) {
if (n >= 0 && result.size() == n) {
break;
}
boolean passed = true;
for (int i = 0; i < filters.length; i++) {
if (!filters[i].apply(bi)) {
passed = false;
break;
}
}
if (passed) {
result.add(bi);
}
}
return result;
}
/*
* Retrieve TableName's for completed backup of given type
* @param type backup type
* @return List of table names
*/
public List<TableName> getTablesForBackupType(BackupType type) throws IOException {
Set<TableName> names = new HashSet<>();
List<BackupInfo> infos = getBackupHistory(true);
for (BackupInfo info : infos) {
if (info.getType() == type) {
names.addAll(info.getTableNames());
}
}
return new ArrayList<>(names);
}
/**
* Get history for backup destination
* @param backupRoot backup destination path
* @return List of backup info
* @throws IOException if getting the backup history fails
*/
public List<BackupInfo> getBackupHistory(String backupRoot) throws IOException {
ArrayList<BackupInfo> history = getBackupHistory(false);
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
BackupInfo info = iterator.next();
if (!backupRoot.equals(info.getBackupRootDir())) {
iterator.remove();
}
}
return history;
}
/**
* Get history for a table
* @param name table name
* @return history for a table
* @throws IOException if getting the backup history fails
*/
public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOException {
List<BackupInfo> history = getBackupHistory();
List<BackupInfo> tableHistory = new ArrayList<>();
for (BackupInfo info : history) {
List<TableName> tables = info.getTableNames();
if (tables.contains(name)) {
tableHistory.add(info);
}
}
return tableHistory;
}
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
String backupRoot) throws IOException {
List<BackupInfo> history = getBackupHistory(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
BackupInfo info = iterator.next();
if (!backupRoot.equals(info.getBackupRootDir())) {
continue;
}
List<TableName> tables = info.getTableNames();
for (TableName tableName : tables) {
if (set.contains(tableName)) {
ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
if (list == null) {
list = new ArrayList<>();
tableHistoryMap.put(tableName, list);
}
list.add(info);
}
}
}
return tableHistoryMap;
}
/**
* Get all backup sessions with a given state (in descending order by time)
* @param state backup session state
* @return history info of backup info objects
* @throws IOException exception
*/
public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOException {
LOG.trace("get backup infos from backup system table");
Scan scan = createScanForBackupHistory();
ArrayList<BackupInfo> list = new ArrayList<>();
try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res;
while ((res = scanner.next()) != null) {
res.advance();
BackupInfo context = cellToBackupInfo(res.current());
if (state != BackupState.ANY && context.getState() != state) {
continue;
}
list.add(context);
}
return list;
}
}
/**
* Write the current timestamps for each regionserver to backup system table after a successful
* full or incremental backup. The saved timestamp is of the last log file that was backed up
* already.
* @param tables tables
* @param newTimestamps timestamps
* @param backupRoot root directory path to backup
* @throws IOException exception
*/
public void writeRegionServerLogTimestamp(Set<TableName> tables,
Map<String, Long> newTimestamps, String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("write RS log time stamps to backup system table for tables ["
+ StringUtils.join(tables, ",") + "]");
}
List<Put> puts = new ArrayList<>();
for (TableName table : tables) {
byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray();
Put put = createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot);
puts.add(put);
}
try (Table table = connection.getTable(tableName)) {
table.put(puts);
}
}
/**
* Read the timestamp for each region server log after the last successful backup. Each table has
* its own set of the timestamps. The info is stored for each table as a concatenated string of
* rs->timestapmp
* @param backupRoot root directory path to backup
* @return the timestamp for each region server. key: tableName value:
* RegionServer,PreviousTimeStamp
* @throws IOException exception
*/
public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot)
throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
}
Map<TableName, Map<String, Long>> tableTimestampMap = new HashMap<>();
Scan scan = createScanForReadLogTimestampMap(backupRoot);
try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res;
while ((res = scanner.next()) != null) {
res.advance();
Cell cell = res.current();
byte[] row = CellUtil.cloneRow(cell);
String tabName = getTableNameForReadLogTimestampMap(row);
TableName tn = TableName.valueOf(tabName);
byte[] data = CellUtil.cloneValue(cell);
if (data == null) {
throw new IOException("Data of last backup data from backup system table "
+ "is empty. Create a backup first.");
}
if (data != null && data.length > 0) {
HashMap<String, Long> lastBackup =
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
tableTimestampMap.put(tn, lastBackup);
}
}
return tableTimestampMap;
}
}
private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
Map<String, Long> map) {
BackupProtos.TableServerTimestamp.Builder tstBuilder =
BackupProtos.TableServerTimestamp.newBuilder();
tstBuilder
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
for (Entry<String, Long> entry : map.entrySet()) {
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder();
ServerName sn = ServerName.parseServerName(entry.getKey());
snBuilder.setHostName(sn.getHostname());
snBuilder.setPort(sn.getPort());
builder.setServerName(snBuilder.build());
builder.setTimestamp(entry.getValue());
tstBuilder.addServerTimestamp(builder.build());
}
return tstBuilder.build();
}
private HashMap<String, Long>
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp proto) {
HashMap<String, Long> map = new HashMap<>();
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
for (BackupProtos.ServerTimestamp st : list) {
ServerName sn =
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp());
}
return map;
}
/**
* Return the current tables covered by incremental backup.
* @param backupRoot root directory path to backup
* @return set of tableNames
* @throws IOException exception
*/
public Set<TableName> getIncrementalBackupTableSet(String backupRoot) throws IOException {
LOG.trace("get incremental backup table set from backup system table");
TreeSet<TableName> set = new TreeSet<>();
try (Table table = connection.getTable(tableName)) {
Get get = createGetForIncrBackupTableSet(backupRoot);
Result res = table.get(get);
if (res.isEmpty()) {
return set;
}
List<Cell> cells = res.listCells();
for (Cell cell : cells) {
// qualifier = table name - we use table names as qualifiers
set.add(TableName.valueOf(CellUtil.cloneQualifier(cell)));
}
return set;
}
}
/**
* Add tables to global incremental backup set
* @param tables set of tables
* @param backupRoot root directory path to backup
* @throws IOException exception
*/
public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot
+ " tables [" + StringUtils.join(tables, " ") + "]");
}
if (LOG.isDebugEnabled()) {
tables.forEach(table -> LOG.debug(Objects.toString(table)));
}
try (Table table = connection.getTable(tableName)) {
Put put = createPutForIncrBackupTableSet(tables, backupRoot);
table.put(put);
}
}
/**
* Deletes incremental backup set for a backup destination
* @param backupRoot backup root
*/
public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Delete incremental backup table set to backup system table. ROOT=" + backupRoot);
}
try (Table table = connection.getTable(tableName)) {
Delete delete = createDeleteForIncrBackupTableSet(backupRoot);
table.delete(delete);
}
}
/**
* Checks if we have at least one backup session in backup system table This API is used by
* BackupLogCleaner
* @return true, if - at least one session exists in backup system table table
* @throws IOException exception
*/
public boolean hasBackupSessions() throws IOException {
LOG.trace("Has backup sessions from backup system table");
boolean result = false;
Scan scan = createScanForBackupHistory();
scan.setCaching(1);
try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) {
if (scanner.next() != null) {
result = true;
}
return result;
}
}
/**
* BACKUP SETS
*/
/**
* Get backup set list
* @return backup set list
* @throws IOException if a table or scanner operation fails
*/
public List<String> listBackupSets() throws IOException {
LOG.trace("Backup set list");
List<String> list = new ArrayList<>();
try (Table table = connection.getTable(tableName)) {
Scan scan = createScanForBackupSetList();
scan.readVersions(1);
try (ResultScanner scanner = table.getScanner(scan)) {
Result res;
while ((res = scanner.next()) != null) {
res.advance();
list.add(cellKeyToBackupSetName(res.current()));
}
return list;
}
}
}
/**
* Get backup set description (list of tables)
* @param name set's name
* @return list of tables in a backup set
* @throws IOException if a table operation fails
*/
public List<TableName> describeBackupSet(String name) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace(" Backup set describe: " + name);
}
try (Table table = connection.getTable(tableName)) {
Get get = createGetForBackupSet(name);
Result res = table.get(get);
if (res.isEmpty()) {
return null;
}
res.advance();
String[] tables = cellValueToBackupSet(res.current());
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
.collect(Collectors.toList());
}
}
/**
* Add backup set (list of tables)
* @param name set name
* @param newTables list of tables, comma-separated
* @throws IOException if a table operation fails
*/
public void addToBackupSet(String name, String[] newTables) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]");
}
String[] union = null;
try (Table table = connection.getTable(tableName)) {
Get get = createGetForBackupSet(name);
Result res = table.get(get);
if (res.isEmpty()) {
union = newTables;
} else {
res.advance();
String[] tables = cellValueToBackupSet(res.current());
union = merge(tables, newTables);
}
Put put = createPutForBackupSet(name, union);
table.put(put);
}
}
/**
* Remove tables from backup set (list of tables)
* @param name set name
* @param toRemove list of tables
* @throws IOException if a table operation or deleting the backup set fails
*/
public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace(
" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ") + "]");
}
String[] disjoint;
String[] tables;
try (Table table = connection.getTable(tableName)) {
Get get = createGetForBackupSet(name);
Result res = table.get(get);
if (res.isEmpty()) {
LOG.warn("Backup set '" + name + "' not found.");
return;
} else {
res.advance();
tables = cellValueToBackupSet(res.current());
disjoint = disjoin(tables, toRemove);
}
if (disjoint.length > 0 && disjoint.length != tables.length) {
Put put = createPutForBackupSet(name, disjoint);
table.put(put);
} else if (disjoint.length == tables.length) {
LOG.warn("Backup set '" + name + "' does not contain tables ["
+ StringUtils.join(toRemove, " ") + "]");
} else { // disjoint.length == 0 and tables.length >0
// Delete backup set
LOG.info("Backup set '" + name + "' is empty. Deleting.");
deleteBackupSet(name);
}
}
}
private String[] merge(String[] existingTables, String[] newTables) {
Set<String> tables = new HashSet<>(Arrays.asList(existingTables));
tables.addAll(Arrays.asList(newTables));
return tables.toArray(new String[0]);
}
private String[] disjoin(String[] existingTables, String[] toRemove) {
Set<String> tables = new HashSet<>(Arrays.asList(existingTables));
Arrays.asList(toRemove).forEach(table -> tables.remove(table));
return tables.toArray(new String[0]);
}
/**
* Delete backup set
* @param name set's name
* @throws IOException if getting or deleting the table fails
*/
public void deleteBackupSet(String name) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace(" Backup set delete: " + name);
}
try (Table table = connection.getTable(tableName)) {
Delete del = createDeleteForBackupSet(name);
table.delete(del);
}
}
/**
* Get backup system table descriptor
* @return table's descriptor
*/
public static TableDescriptor getSystemTableDescriptor(Configuration conf) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
ColumnFamilyDescriptorBuilder colBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
colBuilder.setMaxVersions(1);
Configuration config = HBaseConfiguration.create();
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
colBuilder.setTimeToLive(ttl);
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
builder.setColumnFamily(colSessionsDesc);
colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
colBuilder.setTimeToLive(ttl);
builder.setColumnFamily(colBuilder.build());
return builder.build();
}
public static TableName getTableName(Configuration conf) {
String name = conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT);
return TableName.valueOf(name);
}
public static String getTableNameAsString(Configuration conf) {
return getTableName(conf).getNameAsString();
}
public static String getSnapshotName(Configuration conf) {
return "snapshot_" + getTableNameAsString(conf).replace(":", "_");
}
/**
* Get backup system table descriptor
* @return table's descriptor
*/
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
ColumnFamilyDescriptorBuilder colBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
colBuilder.setMaxVersions(1);
Configuration config = HBaseConfiguration.create();
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
colBuilder.setTimeToLive(ttl);
ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
builder.setColumnFamily(colSessionsDesc);
colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
colBuilder.setTimeToLive(ttl);
builder.setColumnFamily(colBuilder.build());
return builder.build();
}
public static TableName getTableNameForBulkLoadedData(Configuration conf) {
String name = conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk";
return TableName.valueOf(name);
}
/**
* Creates Put operation for a given backup info object
* @param context backup info
* @return put operation
* @throws IOException exception
*/
private Put createPutForBackupInfo(BackupInfo context) throws IOException {
Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId()));
put.addColumn(BackupSystemTable.SESSIONS_FAMILY, Bytes.toBytes("context"),
context.toByteArray());
return put;
}
/**
* Creates Get operation for a given backup id
* @param backupId backup's ID
* @return get operation
* @throws IOException exception
*/
private Get createGetForBackupInfo(String backupId) throws IOException {
Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId));
get.addFamily(BackupSystemTable.SESSIONS_FAMILY);
get.readVersions(1);
return get;
}
/**
* Creates Delete operation for a given backup id
* @param backupId backup's ID
* @return delete operation
*/
private Delete createDeleteForBackupInfo(String backupId) {
Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId));
del.addFamily(BackupSystemTable.SESSIONS_FAMILY);
return del;
}
/**
* Converts Result to BackupInfo
* @param res HBase result
* @return backup info instance
* @throws IOException exception
*/
private BackupInfo resultToBackupInfo(Result res) throws IOException {
res.advance();
Cell cell = res.current();
return cellToBackupInfo(cell);
}
/**
* Creates Get operation to retrieve start code from backup system table
* @return get operation
* @throws IOException exception
*/
private Get createGetForStartCode(String rootPath) throws IOException {
Get get = new Get(rowkey(START_CODE_ROW, rootPath));
get.addFamily(BackupSystemTable.META_FAMILY);
get.readVersions(1);
return get;
}
/**
* Creates Put operation to store start code to backup system table
* @return put operation
*/
private Put createPutForStartCode(String startCode, String rootPath) {
Put put = new Put(rowkey(START_CODE_ROW, rootPath));
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("startcode"),
Bytes.toBytes(startCode));
return put;
}
/**
* Creates Get to retrieve incremental backup table set from backup system table
* @return get operation
* @throws IOException exception
*/
private Get createGetForIncrBackupTableSet(String backupRoot) throws IOException {
Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot));
get.addFamily(BackupSystemTable.META_FAMILY);
get.readVersions(1);
return get;
}
/**
* Creates Put to store incremental backup table set
* @param tables tables
* @return put operation
*/
private Put createPutForIncrBackupTableSet(Set<TableName> tables, String backupRoot) {
Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot));
for (TableName table : tables) {
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()),
EMPTY_VALUE);
}
return put;
}
/**
* Creates Delete for incremental backup table set
* @param backupRoot backup root
* @return delete operation
*/
private Delete createDeleteForIncrBackupTableSet(String backupRoot) {
Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot));
delete.addFamily(BackupSystemTable.META_FAMILY);
return delete;
}
/**
* Creates Scan operation to load backup history
* @return scan operation
*/
private Scan createScanForBackupHistory() {
Scan scan = new Scan();
byte[] startRow = Bytes.toBytes(BACKUP_INFO_PREFIX);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.SESSIONS_FAMILY);
scan.readVersions(1);
return scan;
}
/**
* Converts cell to backup info instance.
* @param current current cell
* @return backup backup info instance
* @throws IOException exception
*/
private BackupInfo cellToBackupInfo(Cell current) throws IOException {
byte[] data = CellUtil.cloneValue(current);
return BackupInfo.fromByteArray(data);
}
/**
* Creates Put to write RS last roll log timestamp map
* @param table table
* @param smap map, containing RS:ts
* @return put operation
*/
private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
String backupRoot) {
Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap);
return put;
}
/**
* Creates Scan to load table-> { RS -> ts} map of maps
* @return scan operation
*/
private Scan createScanForReadLogTimestampMap(String backupRoot) {
Scan scan = new Scan();
byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
return scan;
}
/**
* Get table name from rowkey
* @param cloneRow rowkey
* @return table name
*/
private String getTableNameForReadLogTimestampMap(byte[] cloneRow) {
String s = Bytes.toString(cloneRow);
int index = s.lastIndexOf(NULL);
return s.substring(index + 1);
}
/**
* Creates Put to store RS last log result
* @param server server name
* @param timestamp log roll result (timestamp)
* @return put operation
*/
private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
String backupRoot) {
Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"),
Bytes.toBytes(timestamp));
return put;
}
/**
* Creates Scan operation to load last RS log roll results
* @return scan operation
*/
private Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) {
Scan scan = new Scan();
byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
scan.readVersions(1);
return scan;
}
/**
* Get server's name from rowkey
* @param row rowkey
* @return server's name
*/
private String getServerNameForReadRegionServerLastLogRollResult(byte[] row) {
String s = Bytes.toString(row);
int index = s.lastIndexOf(NULL);
return s.substring(index + 1);
}
/*
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
*/
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
Map<byte[], List<Path>> finalPaths) {
List<Put> puts = new ArrayList<>();
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
for (Path path : entry.getValue()) {
String file = path.toString();
int lastSlash = file.lastIndexOf("/");
String filename = file.substring(lastSlash + 1);
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
Bytes.toString(region), BLK_LD_DELIM, filename));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
puts.add(put);
LOG.debug(
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
}
}
return puts;
}
public static void snapshot(Connection conn) throws IOException {
try (Admin admin = conn.getAdmin()) {
Configuration conf = conn.getConfiguration();
admin.snapshot(BackupSystemTable.getSnapshotName(conf), BackupSystemTable.getTableName(conf));
}
}
public static void restoreFromSnapshot(Connection conn) throws IOException {
Configuration conf = conn.getConfiguration();
LOG.debug("Restoring " + BackupSystemTable.getTableNameAsString(conf) + " from snapshot");
try (Admin admin = conn.getAdmin()) {
String snapshotName = BackupSystemTable.getSnapshotName(conf);
if (snapshotExists(admin, snapshotName)) {
admin.disableTable(BackupSystemTable.getTableName(conf));
admin.restoreSnapshot(snapshotName);
admin.enableTable(BackupSystemTable.getTableName(conf));
LOG.debug("Done restoring backup system table");
} else {
// Snapshot does not exists, i.e completeBackup failed after
// deleting backup system table snapshot
// In this case we log WARN and proceed
LOG.warn(
"Could not restore backup system table. Snapshot " + snapshotName + " does not exists.");
}
}
}
private static boolean snapshotExists(Admin admin, String snapshotName) throws IOException {
List<SnapshotDescription> list = admin.listSnapshots();
for (SnapshotDescription desc : list) {
if (desc.getName().equals(snapshotName)) {
return true;
}
}
return false;
}
public static boolean snapshotExists(Connection conn) throws IOException {
return snapshotExists(conn.getAdmin(), getSnapshotName(conn.getConfiguration()));
}
public static void deleteSnapshot(Connection conn) throws IOException {
Configuration conf = conn.getConfiguration();
LOG.debug("Deleting " + BackupSystemTable.getSnapshotName(conf) + " from the system");
try (Admin admin = conn.getAdmin()) {
String snapshotName = BackupSystemTable.getSnapshotName(conf);
if (snapshotExists(admin, snapshotName)) {
admin.deleteSnapshot(snapshotName);
LOG.debug("Done deleting backup system table snapshot");
} else {
LOG.error("Snapshot " + snapshotName + " does not exists");
}
}
}
/*
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
*/
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
final List<Pair<Path, Path>> pairs) {
List<Put> puts = new ArrayList<>(pairs.size());
for (Pair<Path, Path> pair : pairs) {
Path path = pair.getSecond();
String file = path.toString();
int lastSlash = file.lastIndexOf("/");
String filename = file.substring(lastSlash + 1);
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
Bytes.toString(region), BLK_LD_DELIM, filename));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
puts.add(put);
LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region));
}
return puts;
}
public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst) {
List<Delete> lstDels = new ArrayList<>(lst.size());
for (TableName table : lst) {
Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM));
del.addFamily(BackupSystemTable.META_FAMILY);
lstDels.add(del);
}
return lstDels;
}
private Put createPutForDeleteOperation(String[] backupIdList) {
byte[] value = Bytes.toBytes(StringUtils.join(backupIdList, ","));
Put put = new Put(DELETE_OP_ROW);
put.addColumn(META_FAMILY, FAM_COL, value);
return put;
}
private Delete createDeleteForBackupDeleteOperation() {
Delete delete = new Delete(DELETE_OP_ROW);
delete.addFamily(META_FAMILY);
return delete;
}
private Get createGetForDeleteOperation() {
Get get = new Get(DELETE_OP_ROW);
get.addFamily(META_FAMILY);
return get;
}
public void startDeleteOperation(String[] backupIdList) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Start delete operation for backups: " + StringUtils.join(backupIdList));
}
Put put = createPutForDeleteOperation(backupIdList);
try (Table table = connection.getTable(tableName)) {
table.put(put);
}
}
public void finishDeleteOperation() throws IOException {
LOG.trace("Finsih delete operation for backup ids");
Delete delete = createDeleteForBackupDeleteOperation();
try (Table table = connection.getTable(tableName)) {
table.delete(delete);
}
}
public String[] getListOfBackupIdsFromDeleteOperation() throws IOException {
LOG.trace("Get delete operation for backup ids");
Get get = createGetForDeleteOperation();
try (Table table = connection.getTable(tableName)) {
Result res = table.get(get);
if (res.isEmpty()) {
return null;
}
Cell cell = res.listCells().get(0);
byte[] val = CellUtil.cloneValue(cell);
if (val.length == 0) {
return null;
}
return new String(val).split(",");
}
}
private Put createPutForMergeOperation(String[] backupIdList) {
byte[] value = Bytes.toBytes(StringUtils.join(backupIdList, ","));
Put put = new Put(MERGE_OP_ROW);
put.addColumn(META_FAMILY, FAM_COL, value);
return put;
}
public boolean isMergeInProgress() throws IOException {
Get get = new Get(MERGE_OP_ROW);
try (Table table = connection.getTable(tableName)) {
Result res = table.get(get);
return (!res.isEmpty());
}
}
private Put createPutForUpdateTablesForMerge(List<TableName> tables) {
byte[] value = Bytes.toBytes(StringUtils.join(tables, ","));
Put put = new Put(MERGE_OP_ROW);
put.addColumn(META_FAMILY, PATH_COL, value);
return put;
}
private Delete createDeleteForBackupMergeOperation() {
Delete delete = new Delete(MERGE_OP_ROW);
delete.addFamily(META_FAMILY);
return delete;
}
private Get createGetForMergeOperation() {
Get get = new Get(MERGE_OP_ROW);
get.addFamily(META_FAMILY);
return get;
}
public void startMergeOperation(String[] backupIdList) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Start merge operation for backups: " + StringUtils.join(backupIdList));
}
Put put = createPutForMergeOperation(backupIdList);
try (Table table = connection.getTable(tableName)) {
table.put(put);
}
}
public void updateProcessedTablesForMerge(List<TableName> tables) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Update tables for merge : " + StringUtils.join(tables, ","));
}
Put put = createPutForUpdateTablesForMerge(tables);
try (Table table = connection.getTable(tableName)) {
table.put(put);
}
}
public void finishMergeOperation() throws IOException {
LOG.trace("Finish merge operation for backup ids");
Delete delete = createDeleteForBackupMergeOperation();
try (Table table = connection.getTable(tableName)) {
table.delete(delete);
}
}
public String[] getListOfBackupIdsFromMergeOperation() throws IOException {
LOG.trace("Get backup ids for merge operation");
Get get = createGetForMergeOperation();
try (Table table = connection.getTable(tableName)) {
Result res = table.get(get);
if (res.isEmpty()) {
return null;
}
Cell cell = res.listCells().get(0);
byte[] val = CellUtil.cloneValue(cell);
if (val.length == 0) {
return null;
}
return new String(val).split(",");
}
}
static Scan createScanForOrigBulkLoadedFiles(TableName table) {
Scan scan = new Scan();
byte[] startRow = rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
scan.readVersions(1);
return scan;
}
static String getTableNameFromOrigBulkLoadRow(String rowStr) {
String[] parts = rowStr.split(BLK_LD_DELIM);
return parts[1];
}
static String getRegionNameFromOrigBulkLoadRow(String rowStr) {
// format is bulk : namespace : table : region : file
String[] parts = rowStr.split(BLK_LD_DELIM);
int idx = 3;
if (parts.length == 4) {
// the table is in default namespace
idx = 2;
}
LOG.debug("bulk row string " + rowStr + " region " + parts[idx]);
return parts[idx];
}
/*
* Used to query bulk loaded hfiles which have been copied by incremental backup
* @param backupId the backup Id. It can be null when querying for all tables
* @return the Scan object
*/
static Scan createScanForBulkLoadedFiles(String backupId) {
Scan scan = new Scan();
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
scan.readVersions(1);
return scan;
}
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
long ts, int idx) {
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(p));
return put;
}
/**
* Creates Scan operation to load backup set list
* @return scan operation
*/
private Scan createScanForBackupSetList() {
Scan scan = new Scan();
byte[] startRow = Bytes.toBytes(SET_KEY_PREFIX);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
return scan;
}
/**
* Creates Get operation to load backup set content
* @return get operation
*/
private Get createGetForBackupSet(String name) {
Get get = new Get(rowkey(SET_KEY_PREFIX, name));
get.addFamily(BackupSystemTable.META_FAMILY);
return get;
}
/**
* Creates Delete operation to delete backup set content
* @param name backup set's name
* @return delete operation
*/
private Delete createDeleteForBackupSet(String name) {
Delete del = new Delete(rowkey(SET_KEY_PREFIX, name));
del.addFamily(BackupSystemTable.META_FAMILY);
return del;
}
/**
* Creates Put operation to update backup set content
* @param name backup set's name
* @param tables list of tables
* @return put operation
*/
private Put createPutForBackupSet(String name, String[] tables) {
Put put = new Put(rowkey(SET_KEY_PREFIX, name));
byte[] value = convertToByteArray(tables);
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("tables"), value);
return put;
}
private byte[] convertToByteArray(String[] tables) {
return Bytes.toBytes(StringUtils.join(tables, ","));
}
/**
* Converts cell to backup set list.
* @param current current cell
* @return backup set as array of table names
*/
private String[] cellValueToBackupSet(Cell current) {
byte[] data = CellUtil.cloneValue(current);
if (!ArrayUtils.isEmpty(data)) {
return Bytes.toString(data).split(",");
}
return new String[0];
}
/**
* Converts cell key to backup set name.
* @param current current cell
* @return backup set name
*/
private String cellKeyToBackupSetName(Cell current) {
byte[] data = CellUtil.cloneRow(current);
return Bytes.toString(data).substring(SET_KEY_PREFIX.length());
}
private static byte[] rowkey(String s, String... other) {
StringBuilder sb = new StringBuilder(s);
for (String ss : other) {
sb.append(ss);
}
return Bytes.toBytes(sb.toString());
}
}
| 24,228 |
442 | <filename>jmqtt-mqtt/src/main/java/org/jmqtt/mqtt/protocol/impl/PubCompProcessor.java
package org.jmqtt.mqtt.protocol.impl;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.mqtt.MqttMessage;
import org.jmqtt.mqtt.MQTTConnection;
import org.jmqtt.mqtt.netty.MqttNettyUtils;
import org.jmqtt.mqtt.protocol.RequestProcessor;
/**
* 出栈消息接收到的qos2消息的pubComp报文:清除缓存的出栈消息
*/
public class PubCompProcessor implements RequestProcessor {
@Override
public void processRequest(ChannelHandlerContext ctx, MqttMessage mqttMessage) {
MQTTConnection mqttConnection = MqttNettyUtils.mqttConnection(ctx.channel());
mqttConnection.processPubComp(mqttMessage);
}
}
| 331 |
3,075 | package org.powermock.core.classloader;
import org.assertj.core.api.ThrowableAssert.ThrowingCallable;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.powermock.core.classloader.MockClassLoaderFactoryTest.TestContainer.ExceptionTestClass;
import org.powermock.core.classloader.MockClassLoaderFactoryTest.TestContainer.JavassistTestClass;
import org.powermock.core.classloader.MockClassLoaderFactoryTest.TestContainer.PrepareEverythingForTestTestClass;
import org.powermock.core.classloader.MockClassLoaderFactoryTest.TestContainer.SuppressStaticInitializationForTestClass;
import org.powermock.core.classloader.annotations.PrepareEverythingForTest;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.core.classloader.javassist.JavassistMockClassLoader;
import org.powermock.reflect.Whitebox;
import powermock.test.support.MainMockTransformerTestSupport.SupportClasses;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import static org.assertj.core.api.Java6Assertions.assertThat;
import static org.assertj.core.api.Java6Assertions.assertThatThrownBy;
import static org.powermock.core.test.ContainsCondition.contains;
@RunWith(Enclosed.class)
public class MockClassLoaderFactoryTest {
@RunWith(Parameterized.class)
public static class AnnotationTestOnClassLevelCases extends BasePrepareForTestCases {
public AnnotationTestOnClassLevelCases(final Class<?> testClass, String expectedClassToModify) {
super(testClass, expectedClassToModify);
}
@Test
public void should_extract_classes_to_modify_from_class_level_annotation() {
final ClassLoader classLoader = objectUnderTest.createForClass();
assertThat(classLoader)
.as("An instance of MockClassLoader is created")
.isInstanceOf(MockClassLoader.class);
assertThat(((MockClassLoader) classLoader).getConfiguration())
.as("MockClassLoader configuration contains expected class: %s", expectedClassToModify)
.extracting("modify")
.are(contains(expectedClassToModify));
}
}
@RunWith(Parameterized.class)
public static class AnnotationOnMethodLevelCases extends BasePrepareForTestCases {
@Parameters(name = "Test parameter: {0}")
public static Collection<Object[]> parameters() {
final ArrayList<Object[]> parameters = new ArrayList<Object[]>();
parameters.add(new Object[]{JavassistTestClass.class, "powermock.test.support.MainMockTransformerTestSupport$SupportClasses$FinalInnerClass"});
parameters.add(new Object[]{SuppressStaticInitializationForTestClass.class, "SupportClasses.FinalInnerClass"});
parameters.add(new Object[]{PrepareEverythingForTestTestClass.class, "*"});
return parameters;
}
public AnnotationOnMethodLevelCases(final Class<?> testClass, String expectedClassToModify) {
super(testClass, expectedClassToModify);
}
@Test
public void should_extract_classes_to_modify_method_level_annotation_if_exist() {
final Method method = Whitebox.getMethod(testClass, "someTestWithPrepareForTest");
final ClassLoader classLoader = objectUnderTest.createForMethod(method);
assertThat(classLoader)
.as("An instance of MockClassLoader is created")
.isInstanceOf(MockClassLoader.class);
assertThat(((MockClassLoader) classLoader).getConfiguration())
.as("MockClassLoader configuration contains expected class: %s", expectedClassToModify)
.extracting("modify")
.are(contains(expectedClassToModify));
}
}
@RunWith(Parameterized.class)
public static class DifferentByteCodeFrameworkCases {
@Parameters(name = "Test parameter: {0}")
public static Collection<Object[]> parameters() {
final ArrayList<Object[]> parameters = new ArrayList<Object[]>();
parameters.add(new Object[]{JavassistTestClass.class, JavassistMockClassLoader.class});
return parameters;
}
private final Class<?> testClass;
private final Class<? extends MockClassLoader> expectedClassLoaderClass;
private MockClassLoaderFactory objectUnderTest;
public DifferentByteCodeFrameworkCases(final Class<?> testClass, final Class<? extends MockClassLoader> expectedClassLoaderClass) {
this.testClass = testClass;
this.expectedClassLoaderClass = expectedClassLoaderClass;
}
@Before
public void setUp() {
objectUnderTest = new MockClassLoaderFactory(testClass);
}
@Test
public void should_create_a_correct_instance_of_class_loader_depends_on_PrepareForTest_parameter_of_class() {
assertThat(objectUnderTest.createForClass())
.as("A classloader of the expected classes %s is created.", expectedClassLoaderClass.getName())
.isExactlyInstanceOf(expectedClassLoaderClass);
}
@Test
public void should_create_a_correct_instance_of_class_loader_depends_on_PrepareForTest_parameter_of_method() {
final Method method = Whitebox.getMethod(testClass, "someTestWithPrepareForTest");
assertThat(objectUnderTest.createForMethod(method))
.as("A classloader of the expected classes %s is created.", expectedClassLoaderClass.getName())
.isExactlyInstanceOf(expectedClassLoaderClass);
}
@Test
public void should_create_a_correct_instance_of_class_loader_depends_and_use_PrepareForTest_from_class_if_method_does_not_have_annotation() {
final Method method = Whitebox.getMethod(testClass, "someTestWithoutPrepareForTest");
assertThat(objectUnderTest.createForMethod(method))
.as("A classloader of the expected classes %s is created.", expectedClassLoaderClass.getName())
.isExactlyInstanceOf(expectedClassLoaderClass);
}
}
public static class ExceptionCases{
MockClassLoaderFactory objectUnderTest;
private Class<?> testClass;
@Before
public void setUp() {
testClass = ExceptionTestClass.class;
objectUnderTest = new MockClassLoaderFactory(testClass);
}
@Test
public void should_throw_exception_if_trying_to_create_an_instance_of_class_loader_for_method_without_annotations_and_class_without_annotation() {
final Method method = Whitebox.getMethod(testClass, "someTestWithoutPrepareForTest");
assertThatThrownBy(new ThrowingCallable() {
@Override
public void call() {
objectUnderTest.createForMethod(method);
}
}).as("Exception is thrown.")
.isExactlyInstanceOf(IllegalArgumentException.class);
}
}
public abstract static class BasePrepareForTestCases {
@Parameters(name = "Test parameter: {0}")
public static Collection<Object[]> parameters() {
final ArrayList<Object[]> parameters = new ArrayList<Object[]>();
parameters.add(new Object[]{JavassistTestClass.class, "powermock.test.support.MainMockTransformerTestSupport$SupportClasses"});
parameters.add(new Object[]{SuppressStaticInitializationForTestClass.class, "SupportClasses.FinalInnerClass"});
parameters.add(new Object[]{PrepareEverythingForTestTestClass.class, "*"});
return parameters;
}
MockClassLoaderFactory objectUnderTest;
final String expectedClassToModify;
final Class<?> testClass;
BasePrepareForTestCases(final Class<?> testClass, String expectedClassToModify) {
this.testClass = testClass;
this.expectedClassToModify = expectedClassToModify;
}
@Before
public void setUp() {
objectUnderTest = new MockClassLoaderFactory(testClass);
}
}
@SuppressWarnings("WeakerAccess")
public abstract static class TestContainer {
@PrepareForTest(SupportClasses.class)
public static class JavassistTestClass {
@Test
@PrepareForTest(SupportClasses.FinalInnerClass.class)
public void someTestWithPrepareForTest() {
}
@Test
public void someTestWithoutPrepareForTest() {
}
}
@PrepareEverythingForTest
public static class PrepareEverythingForTestTestClass {
@Test
@PrepareEverythingForTest
public void someTestWithPrepareForTest() {
}
@Test
public void someTestWithoutPrepareForTest() {
}
}
@SuppressStaticInitializationFor("SupportClasses.FinalInnerClass")
public static class SuppressStaticInitializationForTestClass {
@Test
@SuppressStaticInitializationFor("SupportClasses.FinalInnerClass")
public void someTestWithPrepareForTest() {
}
@Test
public void someTestWithoutPrepareForTest() {
}
}
public static class ExceptionTestClass {
@Test
public void someTestWithoutPrepareForTest() {
}
}
}
} | 4,337 |
13,885 | /*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined (WIN32)
#ifdef max
#undef max
#endif
#ifdef min
#undef min
#endif
#ifdef far
#undef far
#endif
#ifdef near
#undef near
#endif
#ifdef ERROR
#undef ERROR
#endif
#ifdef OPAQUE
#undef OPAQUE
#endif
#ifdef TRANSPARENT
#undef TRANSPARENT
#endif
#ifdef PURE
#undef PURE
#endif
#endif
| 298 |
5,133 | <reponame>Saljack/mapstruct
/*
* Copyright MapStruct Authors.
*
* Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package org.mapstruct.ap.test.inheritfromconfig;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.factory.Mappers;
/**
* @author <NAME>
*/
@Mapper(
config = AutoInheritedReverseConfig.class
)
public abstract class CarMapperReverseWithAutoInheritance {
public static final CarMapperReverseWithAutoInheritance INSTANCE =
Mappers.getMapper( CarMapperReverseWithAutoInheritance.class );
@Mapping( target = "colour", source = "color" )
public abstract CarDto toCarDto(CarEntity entity);
}
| 248 |
839 | <filename>microprofile-opentracing/src/main/java/org/wildfly/quickstarts/microprofile/opentracing/ExplicitlyTracedBean.java
/*
* JBoss, Home of Professional Open Source
* Copyright 2020, Red Hat, Inc. and/or its affiliates, and individual
* contributors by the @authors tag. See the copyright.txt in the
* distribution for a full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wildfly.quickstarts.microprofile.opentracing;
import io.opentracing.Span;
import io.opentracing.Tracer;
import org.eclipse.microprofile.opentracing.Traced;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
@ApplicationScoped
public class ExplicitlyTracedBean {
@Inject
private Tracer tracer;
@Traced
public String getHello() {
Span prepareHelloSpan = tracer.buildSpan("prepare-hello").start();
String hello = "hello";
Span processHelloSpan = tracer.buildSpan("process-hello").start();
hello = hello.toUpperCase();
processHelloSpan.finish();
prepareHelloSpan.finish();
return hello;
}
}
| 513 |
2,453 | //
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 30 2020 21:18:12).
//
// Copyright (C) 1997-2019 <NAME>.
//
#import <AppKit/NSView.h>
@class NSString;
@interface IDEAnalyzeSubActionPlaceholderView : NSView
{
NSString *_placeholderMessage;
}
- (void).cxx_destruct;
@property(copy) NSString *placeholderMessage; // @synthesize placeholderMessage=_placeholderMessage;
- (void)drawRect:(struct CGRect)arg1;
@end
| 160 |
1,108 | <filename>rest/tests/test_rest_service.py<gh_stars>1000+
'''
Offline tests
'''
from unittest import TestCase
from mock import MagicMock
from mock import mock_open
from rest_service import RestService
from rest_service import (log_call, error_catch, validate_json,
validate_schema)
import mock
import json
import flask
import six
from kafka.common import OffsetOutOfRangeError
from kafka.conn import ConnectionStates
from kafka.common import KafkaError
from redis.exceptions import ConnectionError
class Override(RestService):
@log_call("test logger")
def test_log_call(self):
pass
@error_catch
def test_error1(self):
raise Exception()
pass
@error_catch
def test_error2(self):
return "test data"
@error_catch
def test_error3(self):
return "test data", 109
@validate_json
def test_json(self):
return 'data'
@validate_schema('key')
def test_schema(self):
return 'data'
class TestRestService(TestCase):
def setUp(self):
self.rest_service = RestService("settings.py")
self.rest_service.settings = self.rest_service.wrapper.load("settings.py")
self.rest_service.logger = MagicMock()
@mock.patch('os.listdir', MagicMock(return_value=['hey.json']))
@mock.patch('six.moves.builtins.open', mock_open(read_data='bibble'), create=True)
def test_load_schemas_bad(self):
with self.assertRaises(ValueError):
self.rest_service._load_schemas()
@mock.patch('os.listdir', MagicMock(return_value=['hey2.json']))
@mock.patch('six.moves.builtins.open', mock_open(read_data='{\"stuff\":\"value\"}'), create=True)
def test_load_schemas_bad(self):
self.rest_service._load_schemas()
self.assertEqual(self.rest_service.schemas,
{'hey2': {u'stuff': u'value'}})
def test_process_messages(self):
self.rest_service.consumer = MagicMock()
self.rest_service._check_kafka_disconnect = MagicMock()
# handle kafka offset errors
self.rest_service.consumer = MagicMock(
side_effect=OffsetOutOfRangeError("1"))
try:
self.rest_service._process_messages()
except OffsetOutOfRangeError:
self.fail("_process_messages did not handle Kafka Offset Error")
# handle bad json errors
message_string = "{\"sdasdf sd}"
# fake class so we can use dot notation
class a(object):
pass
m = a()
m.value = message_string
messages = [m]
self.rest_service.consumer = MagicMock()
self.rest_service.consumer.__iter__.return_value = messages
try:
self.rest_service._process_messages()
except OffsetOutOfRangeError:
self.fail("_process_messages did not handle bad json")
# test got poll result
self.rest_service.uuids = {'abc123': 'poll'}
self.rest_service._send_result_to_redis = MagicMock()
message_string = "{\"uuid\":\"abc123\"}"
m.value = message_string
messages = [m]
self.rest_service._process_messages()
self.assertTrue(self.rest_service._send_result_to_redis.called)
# test got in process call result
self.rest_service.uuids = {'abc123': None}
self.rest_service._send_result_to_redis = MagicMock()
message_string = "{\"uuid\":\"abc123\"}"
m.value = message_string
messages = [m]
self.rest_service._process_messages()
self.assertEqual(self.rest_service.uuids, {'abc123': {u'uuid': u'abc123'}})
def test_send_result_to_redis(self):
# test not connected
self.rest_service.redis_connected = False
self.rest_service.logger.warning = MagicMock()
self.rest_service._send_result_to_redis('stuff')
self.assertTrue(self.rest_service.logger.warning.called)
# test connected
self.rest_service.redis_connected = True
self.rest_service.redis_conn = MagicMock()
self.rest_service.redis_conn.set = MagicMock()
self.rest_service._send_result_to_redis({'uuid': 'abc'})
self.rest_service.redis_conn.set.assert_called_with('rest:poll:abc',
'{"uuid": "abc"}')
# throw error
self.rest_service._spawn_redis_connection_thread = MagicMock()
self.rest_service.redis_conn.set = MagicMock(side_effect=ConnectionError)
self.rest_service._send_result_to_redis({'uuid': 'abc'})
self.assertTrue(self.rest_service._spawn_redis_connection_thread.called)
def test_check_kafka_disconnect(self):
# connection setup
class State(object):
pass
d1 = State()
d1.state = ConnectionStates.DISCONNECTED
d2 = State()
d2.state = ConnectionStates.DISCONNECTING
d3 = State()
d3.state = ConnectionStates.CONNECTED
class Client(object):
pass
self.rest_service._spawn_kafka_connection_thread = MagicMock()
self.rest_service.consumer = MagicMock()
# all connected
client = Client()
client._conns = {'1': d3}
self.rest_service.consumer._client = client
self.rest_service._check_kafka_disconnect()
self.assertFalse(self.rest_service._spawn_kafka_connection_thread.called)
# disconnecting
client = Client()
client._conns = {'1': d2}
self.rest_service.consumer._client = client
self.rest_service._check_kafka_disconnect()
self.assertTrue(self.rest_service._spawn_kafka_connection_thread.called)
self.rest_service._spawn_kafka_connection_thread.reset_mock()
# disconnected
client = Client()
client._conns = {'1': d1}
self.rest_service.consumer._client = client
self.rest_service._check_kafka_disconnect()
self.assertTrue(self.rest_service._spawn_kafka_connection_thread.called)
@mock.patch('socket.gethostname', return_value='host')
def test_report_self(self, h):
# test not connected
self.rest_service.redis_connected = False
self.rest_service.logger.warn = MagicMock()
self.rest_service._report_self()
self.assertTrue(self.rest_service.logger.warn.called)
# test connected
self.rest_service.my_uuid = 'abc999'
self.rest_service.get_time = MagicMock(return_value=5)
self.rest_service.redis_connected = True
self.rest_service.redis_conn = MagicMock()
self.rest_service.redis_conn.set = MagicMock()
self.rest_service._report_self()
self.rest_service.redis_conn.set.assert_called_with('stats:rest:self:host:abc999',
5)
# throw error
self.rest_service._spawn_redis_connection_thread = MagicMock()
self.rest_service.redis_conn.expire = MagicMock(side_effect=ConnectionError)
self.rest_service._report_self()
self.assertTrue(self.rest_service._spawn_redis_connection_thread.called)
def test_setup_kafka(self):
self.rest_service._create_producer = MagicMock()
self.rest_service._spawn_kafka_consumer_thread = MagicMock()
# consumer/producer != None
self.rest_service.consumer = MagicMock()
self.rest_service.consumer.close = MagicMock()
self.rest_service.producer = MagicMock()
self.rest_service.producer.close = MagicMock()
# eary exit to ensure everything is closed
self.rest_service._create_consumer = MagicMock(side_effect=Exception())
try:
self.rest_service._setup_kafka()
except:
pass
self.assertEqual(self.rest_service.consumer, None)
self.assertEqual(self.rest_service.producer, None)
# test if everything flows through
self.rest_service._create_consumer = MagicMock()
self.rest_service._setup_kafka()
self.assertTrue(self.rest_service.kafka_connected)
self.assertTrue(self.rest_service._spawn_kafka_consumer_thread.called)
def test_create_ret_object(self):
# failure
r = {
"status": "FAILURE",
"data": None,
"error": None
}
self.assertEqual(self.rest_service._create_ret_object(status=self.rest_service.FAILURE), r)
# success
r = {
"status": "SUCCESS",
"data": None,
"error": None
}
self.assertEqual(self.rest_service._create_ret_object(status=self.rest_service.SUCCESS), r)
# data
r = {
"status": "SUCCESS",
"data": 'blah',
"error": None
}
self.assertEqual(self.rest_service._create_ret_object(status=self.rest_service.SUCCESS, data='blah'), r)
# error message
r = {
"status": "FAILURE",
"data": None,
"error": {
"message": 'err'
}
}
self.assertEqual(self.rest_service._create_ret_object(status=self.rest_service.FAILURE,
error=True,
error_message='err'), r)
# error cause
r = {
"status": "FAILURE",
"data": None,
"error": {
"message": 'err',
"cause": "the cause"
}
}
self.assertEqual(self.rest_service._create_ret_object(status=self.rest_service.FAILURE,
error=True,
error_message='err',
error_cause="the cause"), r)
def test_close_thread(self):
thread = MagicMock()
results = [True, False]
def ret_val(*args):
return results.pop(0)
thread.isAlive = MagicMock(side_effect=ret_val)
# closed fine
self.rest_service.logger.warn = MagicMock()
self.rest_service._close_thread(thread, "blah")
self.assertFalse(self.rest_service.logger.warn.called)
# didnt close
results = [True, True]
self.rest_service.logger.warn = MagicMock()
self.rest_service._close_thread(thread, "blah2")
self.assertTrue(self.rest_service.logger.warn.called)
def test_close(self):
self.rest_service.consumer = MagicMock()
self.rest_service.consumer.close = MagicMock()
self.rest_service.producer = MagicMock()
self.rest_service.producer.close = MagicMock()
self.rest_service._close_thread = MagicMock()
self.rest_service.close()
self.assertEqual(self.rest_service._close_thread.call_count, 4)
self.assertTrue(self.rest_service.closed)
self.assertTrue(self.rest_service.consumer.close.called)
self.assertTrue(self.rest_service.producer.close.called)
def test_calculate_health(self):
self.rest_service.redis_connected = False
self.rest_service.kafka_connected = False
self.assertEqual(self.rest_service._calculate_health(), "RED")
self.rest_service.redis_connected = True
self.rest_service.kafka_connected = False
self.assertEqual(self.rest_service._calculate_health(), "YELLOW")
self.rest_service.redis_connected = False
self.rest_service.kafka_connected = True
self.assertEqual(self.rest_service._calculate_health(), "YELLOW")
self.rest_service.redis_connected = True
self.rest_service.kafka_connected = True
self.assertEqual(self.rest_service._calculate_health(), "GREEN")
def test_feed_to_kafka(self):
self.rest_service.producer = MagicMock()
# test good
self.assertTrue(self.rest_service._feed_to_kafka({}))
# test bad
self.rest_service._spawn_kafka_connection_thread = MagicMock()
self.rest_service.logger.error = MagicMock()
bad_future = MagicMock()
bad_future.get = MagicMock(side_effect=KafkaError)
self.rest_service.producer.send = MagicMock(return_value=bad_future)
self.assertFalse(self.rest_service._feed_to_kafka({}))
self.assertTrue(self.rest_service.logger.error.called)
self.assertTrue(self.rest_service._spawn_kafka_connection_thread.called)
# Route decorators --------
def test_log_call(self):
override = Override('settings.py')
override.logger = MagicMock()
override.logger.info = MagicMock()
with self.rest_service.app.test_request_context():
override.test_log_call()
self.assertTrue(override.logger.info.called)
self.assertEqual(override.logger.info.call_args[0][0], "test logger")
def test_error_catch(self):
override = Override('settings.py')
override.logger = MagicMock()
# test uncaught exception thrown
with self.rest_service.app.test_request_context():
override.logger.error = MagicMock()
results = override.test_error1()
self.assertTrue(override.logger.error.called)
self.assertEqual(override.logger.error.call_args[0][0],
"Uncaught Exception Thrown")
d = {
u'data': None,
u'error': {
u'message': u'An error occurred while processing your request.'
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
# test normal response
with self.rest_service.app.test_request_context():
override.logger.error.reset_mock()
results = override.test_error2()
self.assertFalse(override.logger.error.called)
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, 'test data')
self.assertEqual(results[1], 200)
# test normal response with alternate response code
with self.rest_service.app.test_request_context():
override.logger.error.reset_mock()
results = override.test_error3()
self.assertFalse(override.logger.error.called)
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, 'test data')
self.assertEqual(results[1], 109)
def test_validate_json(self):
override = Override('settings.py')
override.logger = MagicMock()
# bad json
data = '["a list", ashdasd ,\\ !]'
with self.rest_service.app.test_request_context(data=data,
content_type='application/json'):
results = override.test_json()
self.assertTrue(override.logger.error.called)
self.assertEqual(override.logger.error.call_args[0][0],
'The payload must be valid JSON.')
d = {
u'data': None,
u'error': {
u'message': u'The payload must be valid JSON.'
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 400)
# no json
data = '["a list", ashdasd ,\\ !]'
with self.rest_service.app.test_request_context(data=data):
self.rest_service.logger.error.reset_mock()
results = override.test_json()
self.assertTrue(override.logger.error.called)
self.assertEqual(override.logger.error.call_args[0][0],
'The payload must be valid JSON.')
d = {
u'data': None,
u'error': {
u'message': u'The payload must be valid JSON.'
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 400)
# good json
data = '["a list", "2", "3"]'
with self.rest_service.app.test_request_context(data=data,
content_type='application/json'):
override.logger.reset_mock()
results = override.test_json()
self.assertFalse(override.logger.error.called)
self.assertEqual(results, 'data')
def test_validate_schema(self):
override = Override('settings.py')
override.logger = MagicMock()
override.logger.error = MagicMock()
override.schemas['key'] = {
"type": "object",
"properties": {
"value": {
"type": "string",
"minLength": 1,
"maxLength": 100
}
},
"required": [
"value"
],
"additionalProperties": False
}
# valid schema
data = '{"value": "data here"}'
with self.rest_service.app.test_request_context(data=data,
content_type='application/json'):
results = override.test_schema()
self.assertFalse(override.logger.error.called)
self.assertEqual(results, 'data')
# invalid schema
data = u'{"value": "data here", "otherkey": "bad data"}'
with self.rest_service.app.test_request_context(data=data,
content_type='application/json'):
results = override.test_schema()
self.assertTrue(override.logger.error.called)
self.assertEqual(override.logger.error.call_args[0][0],
"Invalid Schema")
if six.PY3:
cause_text = u"Additional properties are not allowed ('otherkey' was unexpected)"
else:
cause_text = u"Additional properties are not allowed (u'otherkey' was unexpected)"
d = {
u'data': None,
u'error': {
u'message': u"JSON did not validate against schema.",
u'cause': cause_text
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 400)
# Routes ------------------
def test_index(self):
with self.rest_service.app.test_request_context():
self.rest_service.get_time = MagicMock(return_value=5)
self.rest_service.my_uuid = 'a908'
results = self.rest_service.index()
d = {
"kafka_connected": False,
"redis_connected": False,
"uptime_sec": 5,
"my_id": 'a908',
"node_health": 'RED'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
def test_feed(self):
# test not connected
self.rest_service.kafka_connected = False
self.rest_service.logger.warn = MagicMock()
with self.rest_service.app.test_request_context(data='{}',
content_type='application/json'):
results = self.rest_service.feed()
self.assertTrue(self.rest_service.logger.warn.called)
d = {
u'data': None,
u'error': {
u'message': u"Unable to connect to Kafka",
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
# connected
self.rest_service.kafka_connected = True
# test failed to send to kafka
self.rest_service._feed_to_kafka = MagicMock(return_value=False)
with self.rest_service.app.test_request_context(data='{}',
content_type='application/json'):
self.rest_service.logger.warn.reset_mock()
results = self.rest_service.feed()
self.assertTrue(self.rest_service.logger.warn.called)
d = {
u'data': None,
u'error': {
u'message': u"Unable to connect to Kafka",
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
# test no uuid
self.rest_service._feed_to_kafka = MagicMock(return_value=True)
with self.rest_service.app.test_request_context(data='{}',
content_type='application/json'):
results = self.rest_service.feed()
d = {
u'data': None,
u'error': None,
u'status': u'SUCCESS'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 200)
# test with uuid, got response
time_list = [0, 1, 2, 3, 4, 5]
def fancy_get_time():
r = time_list.pop(0)
# fake multithreaded response from kafka
if r > 4:
self.rest_service.uuids['key'] = 'data'
return r
with self.rest_service.app.test_request_context(data='{"uuid":"key"}',
content_type='application/json'):
self.rest_service.get_time = MagicMock(side_effect=fancy_get_time)
results = self.rest_service.feed()
d = {
u'data': 'data',
u'error': None,
u'status': u'SUCCESS'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 200)
self.assertFalse('key' in self.rest_service.uuids)
# test with uuid, no response
time_list = [0, 1, 2, 3, 4, 5, 6]
def fancy_get_time2():
return time_list.pop(0)
with self.rest_service.app.test_request_context(data='{"uuid":"key"}',
content_type='application/json'):
self.rest_service.get_time = MagicMock(side_effect=fancy_get_time2)
results = self.rest_service.feed()
d = {
u'data': {'poll_id': "key"},
u'error': None,
u'status': u'SUCCESS'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 200)
self.assertTrue('key' in self.rest_service.uuids)
self.assertEqual(self.rest_service.uuids['key'], 'poll')
def test_poll(self):
orig = self.rest_service.validator
self.rest_service.validator = MagicMock()
self.rest_service.schemas['poll'] = MagicMock()
# test not connected
self.rest_service.redis_connected = False
self.rest_service.logger.warn = MagicMock()
with self.rest_service.app.test_request_context(data='{}',
content_type='application/json'):
results = self.rest_service.poll()
self.assertTrue(self.rest_service.logger.warn.called)
d = {
u'data': None,
u'error': {
u'message': u"Unable to connect to Redis",
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
# test connected found poll key
self.rest_service.redis_conn = MagicMock()
self.rest_service.redis_conn.get = MagicMock(return_value='["data"]')
self.rest_service.redis_connected = True
with self.rest_service.app.test_request_context(data='{"poll_id":"key"}',
content_type='application/json'):
results = self.rest_service.poll()
d = {
u'data': ['data'],
u'error': None,
u'status': u'SUCCESS'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 200)
# test connected didnt find poll key
self.rest_service.redis_conn.get = MagicMock(return_value=None)
self.rest_service.redis_connected = True
with self.rest_service.app.test_request_context(data='{"poll_id":"key"}',
content_type='application/json'):
results = self.rest_service.poll()
d = {
u'data': None,
u'error': {
"message": "Could not find matching poll_id"
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 404)
# test connection error
self.rest_service._spawn_redis_connection_thread = MagicMock()
self.rest_service.logger.error = MagicMock()
with self.rest_service.app.test_request_context(data='{"poll_id":"key"}',
content_type='application/json'):
self.rest_service.redis_conn.get = MagicMock(side_effect=ConnectionError)
results = self.rest_service.poll()
self.assertTrue(self.rest_service.logger.error.called)
self.assertEqual(self.rest_service.logger.error.call_args[0][0], "Lost connection to Redis")
self.assertTrue(self.rest_service._spawn_redis_connection_thread.called)
d = {
u'data': None,
u'error': {
u'message': u"Unable to connect to Redis",
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
# test value error
self.rest_service.logger.warning = MagicMock()
with self.rest_service.app.test_request_context(data='{"poll_id":"key"}',
content_type='application/json'):
self.rest_service.redis_conn.get = MagicMock(side_effect=ValueError)
results = self.rest_service.poll()
self.assertTrue(self.rest_service.logger.warning.called)
self.assertEqual(self.rest_service.logger.warning.call_args[0][0], "Unparseable JSON Received from redis")
d = {
u'data': None,
u'error': {
u'message': u"Unparseable JSON Received from redis",
},
u'status': u'FAILURE'
}
data = json.loads(results[0].data.decode('utf-8'))
self.assertEqual(data, d)
self.assertEqual(results[1], 500)
self.rest_service.validator = orig
| 14,027 |
782 | <filename>ch04/src/main/java/coprocessor/RowCountEndpoint.java
package coprocessor;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import coprocessor.generated.RowCounterProtos;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
// cc RowCountEndpoint Example endpoint implementation, adding a row and cell count method.
// vv RowCountEndpoint
public class RowCountEndpoint extends RowCounterProtos.RowCountService
implements Coprocessor, CoprocessorService {
private RegionCoprocessorEnvironment env;
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment) {
this.env = (RegionCoprocessorEnvironment) env;
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
}
@Override
public void stop(CoprocessorEnvironment env) throws IOException {
// nothing to do when coprocessor is shutting down
}
@Override
public Service getService() {
return this;
}
@Override
public void getRowCount(RpcController controller,
RowCounterProtos.CountRequest request,
RpcCallback<RowCounterProtos.CountResponse> done) {
RowCounterProtos.CountResponse response = null;
try {
long count = getCount(new FirstKeyOnlyFilter(), false);
response = RowCounterProtos.CountResponse.newBuilder()
.setCount(count).build();
} catch (IOException ioe) {
ResponseConverter.setControllerException(controller, ioe);
}
done.run(response);
}
@Override
public void getCellCount(RpcController controller,
RowCounterProtos.CountRequest request,
RpcCallback<RowCounterProtos.CountResponse> done) {
RowCounterProtos.CountResponse response = null;
try {
long count = getCount(null, true);
response = RowCounterProtos.CountResponse.newBuilder()
.setCount(count).build();
} catch (IOException ioe) {
ResponseConverter.setControllerException(controller, ioe);
}
done.run(response);
}
/**
* Helper method to count rows or cells.
* *
* @param filter The optional filter instance.
* @param countCells Hand in <code>true</code> for cell counting.
* @return The count as per the flags.
* @throws IOException When something fails with the scan.
*/
private long getCount(Filter filter, boolean countCells)
throws IOException {
long count = 0;
Scan scan = new Scan();
scan.setMaxVersions(1);
if (filter != null) {
scan.setFilter(filter);
}
try (
InternalScanner scanner = env.getRegion().getScanner(scan);
) {
List<Cell> results = new ArrayList<Cell>();
boolean hasMore = false;
byte[] lastRow = null;
do {
hasMore = scanner.next(results);
for (Cell cell : results) {
if (!countCells) {
if (lastRow == null || !CellUtil.matchingRow(cell, lastRow)) {
lastRow = CellUtil.cloneRow(cell);
count++;
}
} else count++;
}
results.clear();
} while (hasMore);
}
return count;
}
}
// ^^ RowCountEndpoint
| 1,393 |
3,913 | <gh_stars>1000+
package cn.iocoder.yudao.framework.sms.config;
import cn.iocoder.yudao.framework.sms.core.client.SmsClientFactory;
import cn.iocoder.yudao.framework.sms.core.client.impl.SmsClientFactoryImpl;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* 短信配置类
*
* @author 芋道源码
*/
@Configuration
public class YudaoSmsAutoConfiguration {
@Bean
public SmsClientFactory smsClientFactory() {
return new SmsClientFactoryImpl();
}
}
| 213 |
679 | <filename>main/ucb/source/ucp/webdav/SerfSession.hxx<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef INCLUDED_SERFSESSION_HXX
#define INCLUDED_SERFSESSION_HXX
#include <vector>
#include <boost/shared_ptr.hpp>
#include <osl/mutex.hxx>
#include "DAVResource.hxx"
#include "DAVSession.hxx"
#include "SerfTypes.hxx"
#include "SerfLockStore.hxx"
#include "SerfUri.hxx"
#include <com/sun/star/lang/XMultiServiceFactory.hpp>
namespace ucbhelper { class ProxyDecider; }
namespace http_dav_ucp
{
class SerfRequestProcessor;
// -------------------------------------------------------------------
// SerfSession
// A DAVSession implementation using the neon/expat library
// -------------------------------------------------------------------
class SerfSession : public DAVSession
{
private:
osl::Mutex m_aMutex;
SerfUri m_aUri;
rtl::OUString m_aProxyName;
sal_Int32 m_nProxyPort;
// The server, according RFC7231
// http://tools.ietf.org/html/rfc7231#section-7.4.2
rtl::OUString m_aServerHeaderField;
SerfConnection* m_pSerfConnection;
serf_context_t* m_pSerfContext;
serf_bucket_alloc_t* m_pSerfBucket_Alloc;
bool m_bIsHeadRequestInProgress;
bool m_bUseChunkedEncoding;
sal_Int16 m_bNoOfTransferEncodingSwitches;
const ucbhelper::InternetProxyDecider & m_rProxyDecider;
DAVRequestEnvironment m_aEnv;
static SerfLockStore m_aSerfLockStore;
char* getHostinfo();
bool isSSLNeeded();
SerfRequestProcessor* createReqProc( const rtl::OUString & inPath );
rtl::OUString composeCurrentUri( const rtl::OUString & inPath );
protected:
virtual ~SerfSession();
public:
SerfSession( const rtl::Reference< DAVSessionFactory > & rSessionFactory,
const rtl::OUString& inUri,
const ucbhelper::InternetProxyDecider & rProxyDecider )
throw ( DAVException );
// Serf library callbacks
apr_status_t setupSerfConnection( apr_socket_t * inAprSocket,
serf_bucket_t **outSerfInputBucket,
serf_bucket_t **outSerfOutputBucket,
apr_pool_t* inAprPool );
apr_status_t provideSerfCredentials( bool bGiveProvidedCredentialsASecondTry,
char ** outUsername,
char ** outPassword,
serf_request_t * inRequest,
int inCode,
const char *inAuthProtocol,
const char *inRealm,
apr_pool_t *inAprPool );
apr_status_t verifySerfCertificateChain (
int nFailures,
const serf_ssl_certificate_t * const * pCertificateChainBase64Encoded,
int nCertificateChainLength);
serf_bucket_t* acceptSerfResponse( serf_request_t * inSerfRequest,
serf_bucket_t * inSerfStreamBucket,
apr_pool_t* inAprPool );
// Serf-related data structures
apr_pool_t* getAprPool();
serf_bucket_alloc_t* getSerfBktAlloc();
serf_context_t* getSerfContext();
SerfConnection* getSerfConnection();
// DAVSession methods
virtual sal_Bool CanUse( const ::rtl::OUString & inUri );
virtual sal_Bool UsesProxy();
const DAVRequestEnvironment & getRequestEnvironment() const
{ return m_aEnv; }
// allprop & named
virtual void
PROPFIND( const ::rtl::OUString & inPath,
const Depth inDepth,
const std::vector< ::rtl::OUString > & inPropNames,
std::vector< DAVResource > & ioResources,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
// propnames
virtual void
PROPFIND( const ::rtl::OUString & inPath,
const Depth inDepth,
std::vector< DAVResourceInfo >& ioResInfo,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
PROPPATCH( const ::rtl::OUString & inPath,
const std::vector< ProppatchValue > & inValues,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
HEAD( const ::rtl::OUString & inPath,
const std::vector< ::rtl::OUString > & inHeaderNames,
DAVResource & ioResource,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
bool isHeadRequestInProgress();
virtual com::sun::star::uno::Reference< com::sun::star::io::XInputStream >
GET( const ::rtl::OUString & inPath,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
GET( const ::rtl::OUString & inPath,
com::sun::star::uno::Reference<
com::sun::star::io::XOutputStream > & ioOutputStream,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual com::sun::star::uno::Reference< com::sun::star::io::XInputStream >
GET( const ::rtl::OUString & inPath,
const std::vector< ::rtl::OUString > & inHeaderNames,
DAVResource & ioResource,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
GET( const ::rtl::OUString & inPath,
com::sun::star::uno::Reference<
com::sun::star::io::XOutputStream > & ioOutputStream,
const std::vector< ::rtl::OUString > & inHeaderNames,
DAVResource & ioResource,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
PUT( const ::rtl::OUString & inPath,
const com::sun::star::uno::Reference<
com::sun::star::io::XInputStream > & inInputStream,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual com::sun::star::uno::Reference< com::sun::star::io::XInputStream >
POST( const rtl::OUString & inPath,
const rtl::OUString & rContentType,
const rtl::OUString & rReferer,
const com::sun::star::uno::Reference<
com::sun::star::io::XInputStream > & inInputStream,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
POST( const rtl::OUString & inPath,
const rtl::OUString & rContentType,
const rtl::OUString & rReferer,
const com::sun::star::uno::Reference<
com::sun::star::io::XInputStream > & inInputStream,
com::sun::star::uno::Reference<
com::sun::star::io::XOutputStream > & oOutputStream,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
MKCOL( const ::rtl::OUString & inPath,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void
COPY( const ::rtl::OUString & inSourceURL,
const ::rtl::OUString & inDestinationURL,
const DAVRequestEnvironment & rEnv,
sal_Bool inOverWrite )
throw ( DAVException );
virtual void
MOVE( const ::rtl::OUString & inSourceURL,
const ::rtl::OUString & inDestinationURL,
const DAVRequestEnvironment & rEnv,
sal_Bool inOverWrite )
throw ( DAVException );
virtual void DESTROY( const ::rtl::OUString & inPath,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
// set new lock.
virtual void LOCK( const ::rtl::OUString & inURL,
com::sun::star::ucb::Lock & inLock,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
// refresh existing lock.
virtual sal_Int64 LOCK( const ::rtl::OUString & inURL,
sal_Int64 nTimeout,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
virtual void UNLOCK( const ::rtl::OUString & inURL,
const DAVRequestEnvironment & rEnv )
throw ( DAVException );
// helpers
virtual void abort()
throw ( DAVException );
const rtl::OUString & getHostName() const { return m_aUri.GetHost(); }
int getPort() const { return m_aUri.GetPort(); }
const ::uno::Reference< ::lang::XMultiServiceFactory > getMSF()
{ return m_xFactory->getServiceFactory(); }
sal_Bool isDomainMatch( rtl::OUString certHostName );
const rtl::OUString & getServerHeaderField() { return m_aServerHeaderField; };
void setServerHeaderField( rtl::OUString aServerHeaderField ) { m_aServerHeaderField = aServerHeaderField; };
private:
friend class SerfLockStore;
void Init( void )
throw ( DAVException );
void Init( const DAVRequestEnvironment & rEnv )
throw ( DAVException );
void HandleError( boost::shared_ptr<SerfRequestProcessor> rReqProc )
throw ( DAVException );
const ucbhelper::InternetProxyServer & getProxySettings() const;
bool removeExpiredLocktoken( const rtl::OUString & inURL,
const DAVRequestEnvironment & rEnv );
// refresh lock, called by SerfLockStore::refreshLocks
bool LOCK( SerfLock * pLock,
sal_Int32 & rlastChanceToSendRefreshRequest );
// unlock, called by SerfLockStore::~SerfLockStore
bool UNLOCK( SerfLock * pLock );
/*
// low level GET implementation, used by public GET implementations
static int GET( SerfConnection * sess,
const char * uri,
//ne_block_reader reader,
bool getheaders,
void * userdata );
// Buffer-based PUT implementation. Serf only has file descriptor-
// based API.
static int PUT( SerfConnection * sess,
const char * uri,
const char * buffer,
size_t size );
// Buffer-based POST implementation. Serf only has file descriptor-
// based API.
int POST( SerfConnection * sess,
const char * uri,
const char * buffer,
//ne_block_reader reader,
void * userdata,
const rtl::OUString & rContentType,
const rtl::OUString & rReferer );
*/
// Helper: XInputStream -> Sequence< sal_Int8 >
static bool getDataFromInputStream(
const com::sun::star::uno::Reference<
com::sun::star::io::XInputStream > & xStream,
com::sun::star::uno::Sequence< sal_Int8 > & rData,
bool bAppendTrailingZeroByte );
/*
rtl::OUString makeAbsoluteURL( rtl::OUString const & rURL ) const;
*/
};
} // namespace http_dav_ucp
#endif // INCLUDED_SERFSESSION_HXX
| 5,074 |
4,036 | int x;
x == 4; // most likely = was intended. Otherwise this statement has no effect.
...
| 25 |
925 | <gh_stars>100-1000
package org.openni;
/**
* The CoordinateConverter class converts points between the different coordinate systems.
*
* <b>Depth and World coordinate systems</b>
*
* OpenNI applications commonly use two different coordinate systems to represent depth. These two
* systems are referred to as Depth and World representation.
*
* Depth coordinates are the native data representation. In this system, the frame is a map (two
* dimensional array), and each pixel is assigned a depth value. This depth value represents the
* distance between the camera plane and whatever object is in the given pixel. The X and Y
* coordinates are simply the location in the map, where the origin is the top-left corner of the
* field of view.
*
* World coordinates superimpose a more familiar 3D Cartesian coordinate system on the world, with
* the camera lens at the origin. In this system, every point is specified by 3 points -- x, y and
* z. The x axis of this system is along a line that passes through the infrared projector and CMOS
* imager of the camera. The y axis is parallel to the front face of the camera, and perpendicular
* to the x axis (it will also be perpendicular to the ground if the camera is upright and level).
* The z axis runs into the scene, perpendicular to both the x and y axis. From the perspective of
* the camera, an object moving from left to right is moving along the increasing x axis. An object
* moving up is moving along the increasing y axis, and an object moving away from the camera is
* moving along the increasing z axis.
*
* Mathematically, the Depth coordinate system is the projection of the scene on the CMOS. If the
* sensor's angular field of view and resolution are known, then an angular size can be calculated
* for each pixel. This is how the conversion algorithms work. The dependence of this calculation on
* FoV and resolution is the reason that a {@link VideoStream} object must be provided to these
* functions. The {@link VideoStream} object is used to determine parameters for the specific points
* to be converted.
*
* Since Depth coordinates are a projective, the apparent size of objects in depth coordinates
* (measured in pixels) will increase as an object moves closer to the sensor. The size of objects
* in the World coordinate system is independent of distance from the sensor.
*
* Note that converting from Depth to World coordinates is relatively expensive computationally. It
* is generally not practical to convert the entire raw depth map to World coordinates. A better
* approach is to have your computer vision algorithm work in Depth coordinates for as long as
* possible, and only converting a few specific points to World coordinates right before output.
*
* Note that when converting from Depth to World or vice versa, the Z value remains the same.
*/
public class CoordinateConverter {
/**
* Converts a single point from the World coordinate system to the Depth coordinate system.
*
* @param depthStream Reference to an openni::VideoStream that will be used to determine the
* format of the Depth coordinates
* @param worldX The X coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @param worldY The Y coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @param worldZ The Z coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @return Point3D<Integer> Coordinate of the output value, and depth measured in the
* {@link PixelFormat} of depthStream
*/
public static Point3D<Integer> convertWorldToDepthInt(final VideoStream depthStream,
float worldX, float worldY, float worldZ) {
OutArg<Float> x = new OutArg<Float>();
OutArg<Float> y = new OutArg<Float>();
OutArg<Float> z = new OutArg<Float>();
NativeMethods.checkReturnStatus(NativeMethods.oniCoordinateConverterWorldToDepth(
depthStream.getHandle(), worldX, worldY, worldZ, x, y, z));
int depthX = x.mValue.intValue();
int depthY = y.mValue.intValue();
int depthZ = z.mValue.intValue();
return new Point3D<Integer>(depthX, depthY, depthZ);
}
/**
* Converts a single point from the World coordinate system to a floating point representation of
* the Depth coordinate system
*
* @param depthStream Reference to an openni::VideoStream that will be used to determine the
* format of the Depth coordinates
* @param worldX The X coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @param worldY The Y coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @param worldZ The Z coordinate of the point to be converted, measured in millimeters in World
* coordinates
* @return Point3DPoint to a place to store: the X coordinate of the output value, measured in
* pixels with 0.0 at far left of the image
* <p>
* <t> the Y coordinate of the output value, measured in pixels with 0.0 at the top of the
* image
* <p>
* <t> the Z(depth) coordinate of the output value, measured in millimeters with 0.0 at
* the camera lens
*/
public static Point3D<Float> convertWorldToDepthFloat(final VideoStream depthStream,
float worldX, float worldY, float worldZ) {
OutArg<Float> x = new OutArg<Float>();
OutArg<Float> y = new OutArg<Float>();
OutArg<Float> z = new OutArg<Float>();
NativeMethods.checkReturnStatus(NativeMethods.oniCoordinateConverterWorldToDepth(
depthStream.getHandle(), worldX, worldY, worldZ, x, y, z));
float depthX = x.mValue;
float depthY = y.mValue;
float depthZ = z.mValue;
return new Point3D<Float>(depthX, depthY, depthZ);
}
/**
* Converts a single point from the Depth coordinate system to the World coordinate system.
*
* @param depthStream Reference to an {@link VideoStream} that will be used to determine the
* format of the Depth coordinates
* @param depthX The X coordinate of the point to be converted, measured in pixels with 0 at the
* far left of the image
* @param depthY The Y coordinate of the point to be converted, measured in pixels with 0 at the
* top of the image
* @param depthZ the Z(depth) coordinate of the point to be converted, measured in the
* {@link PixelFormat} of depthStream
* @return Point3D<Float> to a place to store the X,Y,Z coordinate of the output value, measured
* in millimeters in World coordinates
*/
public static Point3D<Float> convertDepthToWorld(final VideoStream depthStream, int depthX,
int depthY, short depthZ) {
OutArg<Float> y = new OutArg<Float>();
OutArg<Float> x = new OutArg<Float>();
OutArg<Float> z = new OutArg<Float>();
NativeMethods.checkReturnStatus(NativeMethods.oniCoordinateConverterDepthToWorld(
depthStream.getHandle(), depthX, depthY, depthZ, x, y, z));
float worldX = x.mValue;
float worldY = y.mValue;
float worldZ = z.mValue;
return new Point3D<Float>(worldX, worldY, worldZ);
}
/**
* Converts a single point from a floating point representation of the Depth coordinate system to
* the World coordinate system.
*
* @param depthStream Reference to an openni::VideoStream that will be used to determine the
* format of the Depth coordinates
* @param depthX The X coordinate of the point to be converted, measured in pixels with 0.0 at the
* far left of the image
* @param depthY The Y coordinate of the point to be converted, measured in pixels with 0.0 at the
* top of the image
* @param depthZ Z(depth) coordinate of the point to be converted, measured in the
* {@link PixelFormat}of depthStream
* @return Point3D<Float> to a place to store the X coordinate of the output value, measured in
* millimeters in World coordinates
*/
public static Point3D<Float> convertDepthToWorld(final VideoStream depthStream, float depthX,
float depthY, float depthZ) {
OutArg<Float> x = new OutArg<Float>();
OutArg<Float> y = new OutArg<Float>();
OutArg<Float> z = new OutArg<Float>();
NativeMethods.checkReturnStatus(NativeMethods.oniCoordinateConverterDepthToWorld(
depthStream.getHandle(), depthX, depthY, depthZ, x, y, z));
float worldX = x.mValue;
float worldY = y.mValue;
float worldZ = z.mValue;
return new Point3D<Float>(worldX, worldY, worldZ);
}
/**
* For a given depth point, provides the coordinates of the corresponding color value. Useful for
* superimposing the depth and color images. This operation is the same as turning on
* registration, but is performed on a single pixel rather than the whole image.
*
* @param depthStream Reference to a openni::VideoStream that produced the depth value
* @param colorStream Reference to a openni::VideoStream that we want to find the appropriate
* color pixel in
* @param depthX value of the depth point, given in Depth coordinates and measured in pixels
* @param depthY value of the depth point, given in Depth coordinates and measured in pixels
* @param depthZ value of the depth point, given in the {@link PixelFormat} of depthStream
* @return The Point2D with X,Y coordinate of the color pixel that overlaps the given depth pixel,
* measured in pixels
*/
public static Point2D<Integer> convertDepthToColor(final VideoStream depthStream,
final VideoStream colorStream, int depthX, int depthY, short depthZ) {
OutArg<Integer> x = new OutArg<Integer>();
OutArg<Integer> y = new OutArg<Integer>();
NativeMethods.checkReturnStatus(NativeMethods.oniCoordinateConverterDepthToColor(
depthStream.getHandle(), colorStream.getHandle(), depthX, depthY, depthZ, x, y));
int colorX = x.mValue;
int colorY = y.mValue;
return new Point2D<Integer>(colorX, colorY);
}
}
| 3,020 |
584 | <gh_stars>100-1000
package com.mercury.platform;
import com.mercury.platform.files.FileDescriptor;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.function.Consumer;
public class PatchDescriptor<E extends FileDescriptor> implements Iterable<E>, Serializable{
private List<E> descriptors = new ArrayList<>();
public void add(E object){
descriptors.add(object);
}
@Override
public Iterator<E> iterator() {
return descriptors.iterator();
}
@Override
public void forEach(Consumer<? super E> action) {
descriptors.forEach(action);
}
@Override
public Spliterator<E> spliterator() {
return descriptors.spliterator();
}
}
| 289 |
1,275 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.broker.routing.instanceselector;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.pinot.broker.routing.segmentpreselector.SegmentPreSelector;
import org.apache.pinot.common.request.BrokerRequest;
/**
* The instance selector selects server instances to serve the query based on the selected segments.
*/
public interface InstanceSelector {
/**
* Initializes the instance selector with the enabled instances, ideal stateexternal view, and online segments
* (segments with ONLINE/CONSUMING instances in the ideal state and pre-selected by the {@link SegmentPreSelector}).
* Should be called only once before calling other methods.
*/
void init(Set<String> enabledInstances, IdealState idealState, ExternalView externalView, Set<String> onlineSegments);
/**
* Processes the instances change. Changed instances are pre-computed based on the current and previous enabled
* instances only once on the caller side and passed to all the instance selectors.
*/
void onInstancesChange(Set<String> enabledInstances, List<String> changedInstances);
/**
* Processes the segment assignment (ideal state or external view) change based on the given online segments (segments
* with ONLINE/CONSUMING instances in the ideal state and pre-selected by the {@link SegmentPreSelector}).
*/
void onAssignmentChange(IdealState idealState, ExternalView externalView, Set<String> onlineSegments);
/**
* Selects the server instances for the given segments queried by the given broker request, returns a map from segment
* to selected server instance hosting the segment and a set of unavailable segments (no enabled instance or all
* enabled instances are in ERROR state).
*/
SelectionResult select(BrokerRequest brokerRequest, List<String> segments);
class SelectionResult {
private final Map<String, String> _segmentToInstanceMap;
private final List<String> _unavailableSegments;
public SelectionResult(Map<String, String> segmentToInstanceMap, List<String> unavailableSegments) {
_segmentToInstanceMap = segmentToInstanceMap;
_unavailableSegments = unavailableSegments;
}
/**
* Returns the map from segment to selected server instance hosting the segment.
*/
public Map<String, String> getSegmentToInstanceMap() {
return _segmentToInstanceMap;
}
/**
* Returns the unavailable segments (no enabled instance or all enabled instances are in ERROR state).
*/
public List<String> getUnavailableSegments() {
return _unavailableSegments;
}
}
}
| 959 |
698 | <reponame>joschu/c
#include "execution.h"
#include <cstdio>
#include <queue>
#include <set>
#include <vector>
#include "util/ThreadPool.h"
#include "unistd.h"
namespace cgt {
using std::vector;
using std::atomic;
using std::mutex;
// ================================================================
// Sequential interpreter
// ================================================================
class SequentialInterpreter : public Interpreter {
public:
SequentialInterpreter(ExecutionGraph* eg, const vector<MemLocation>& output_locs)
: eg_(eg), output_locs_(output_locs), storage_(eg->n_locs()), args_(NULL) { }
cgtObject * get(const MemLocation& m) {
return storage_[m.index()].get();
}
void set(const MemLocation& m, cgtObject * val) {
storage_[m.index()] = val;
}
cgtObject * getarg(int argind) {
cgt_assert(argind < args_->len);
return args_->members[argind].get();
}
cgtTuple * run(cgtTuple * newargs) {
args_ = newargs;
cgt_assert(newargs != NULL);
cgt_assert(newargs->len == eg_->n_args());
for (Instruction* instr : eg_->instrs()) {
instr->fire(this);
}
args_ = NULL;
long n_outputs = output_locs_.size();
cgtTuple * out = new cgtTuple(n_outputs);
for (int i=0; i < n_outputs; ++i) {
int index = output_locs_[i].index(); // XXX what is this used for?
out->setitem(i, get(output_locs_[i]));
}
return out;
// todo actually do something with outputs
}
~SequentialInterpreter() {
}
private:
ExecutionGraph* eg_;
vector<MemLocation> output_locs_;
vector<IRC<cgtObject>> storage_;
cgtTuple * args_;
};
// ================================================================
// Parallel interpreter
// ================================================================
/**
In constructor, we build up a DAG on the instructions, where each instruction
points to the instructions that depend on it.
Also, each instruction knows its "in degree".
Thus, when all the predecessors of an instruction fire, we can enqueue that instruction.
*/
// #define DBG_PAR
class ParallelInterpreter: public Interpreter {
public:
using InstrInd = long;
ParallelInterpreter(ExecutionGraph* eg, const vector<MemLocation>& output_locs, int num_threads);
cgtObject * get(const MemLocation& m) {
return storage_[m.index()].get();
}
void set(const MemLocation& m, cgtObject * val) {
storage_[m.index()] = val;
}
cgtObject * getarg(int argind) {
cgt_assert(argind < args_->len);
return args_->members[argind].get();
}
cgtTuple * run(cgtTuple * newargs);
void fire_instr(InstrInd instr_ind);
void trigger_instr(InstrInd instr_ind);
~ParallelInterpreter() {
// delete[] loc2mutex_;
delete[] instr2mutex_;
}
private:
ExecutionGraph* eg_;
vector<MemLocation> output_locs_;
vector<IRC<cgtObject>> storage_;
cgtTuple * args_;
ThreadPool pool_;
vector<vector<InstrInd>> instr2next_; // instr -> instrs that depend on it
vector<int> instr2indegree_; // instr -> number of instrs that it depends on
vector<InstrInd> no_prereqs_; // instructions that can be fired initially
vector<int> instr2insofar_;
atomic<int> n_pending_; // number of pending instrs
atomic<int> n_total_; // total instructions fired (just for debugging purposes)
// mutex* loc2mutex_; // lock while writing to loc during execution. using c array because vector requires move constructor
// xxx i think we can remove this
mutex* instr2mutex_; // lock while checking if this instruction can fire or firing it
};
ParallelInterpreter::ParallelInterpreter(ExecutionGraph* eg, const vector<MemLocation>& output_locs, int num_threads)
: eg_(eg),
output_locs_(output_locs),
storage_(eg->n_locs()),
args_(NULL),
pool_(num_threads),
instr2next_(eg->n_instrs()),
instr2indegree_(eg->n_instrs()),
no_prereqs_(),
instr2insofar_(eg->n_instrs()),
n_pending_(0),
n_total_(0),
// loc2mutex_(new mutex[eg->n_locs()]),
instr2mutex_(new mutex[eg->n_instrs()])
{
vector<InstrInd> loc2lastwriter(eg->n_locs()); // for each location, last instruction to write to it
InstrInd instr_ind=0; // will loop over instr index
for (auto& instr : eg_->instrs()) {
auto write_ind = instr->get_writeloc().index();
// Instructions that have no read locations can be fired initially, except for ReturnByRef instructions
if (instr->get_readlocs().empty() && instr->kind() != ReturnByRefKind) {
no_prereqs_.push_back(instr_ind);
}
else {
// All instructions depend on last writer to each read location
for (auto& readmemloc : instr->get_readlocs()) {
InstrInd lastwriter = loc2lastwriter[readmemloc.index()];
instr2next_[lastwriter].push_back(instr_ind);
++instr2indegree_[instr_ind];
}
// ReturnByRef instruction depends on last writer to write location
if (instr->kind() == ReturnByRefKind) {
InstrInd lastwriter = loc2lastwriter[write_ind];
instr2next_[lastwriter].push_back(instr_ind);
++instr2indegree_[instr_ind];
}
}
loc2lastwriter[write_ind] = instr_ind;
++instr_ind;
}
#ifdef DBG_PAR
for (int i=0; i < eg->n_instrs(); ++i) {
printf("instrution %i triggers", i);
for (InstrInd ii : instr2next_[i]) printf(" %i", ii);
printf(", in degree = %i\n", instr2indegree_[i]);
}
#endif
}
cgtTuple * ParallelInterpreter::run(cgtTuple * newargs) {
args_ = newargs;
cgt_assert(newargs != NULL);
cgt_assert(newargs->len == eg_->n_args());
// setup
n_pending_=0;
n_total_=0;
for (auto& n : instr2insofar_) { n = 0;}
// trigger instructions that are ready initially
for (InstrInd i : no_prereqs_) {
trigger_instr(i);
}
// wait until everything's done
while (n_pending_ > 0) {
usleep(100);
}
args_ = NULL;
long n_outputs = output_locs_.size();
cgtTuple * out = new cgtTuple(n_outputs);
for (int i=0; i < n_outputs; ++i) {
int index = output_locs_[i].index();
out->setitem(i, get(output_locs_[i]));
}
cgt_assert(n_total_ == eg_->n_instrs());
return out;
}
void ParallelInterpreter::fire_instr(InstrInd instr_ind)
{
Instruction* instr = eg_->instrs()[instr_ind];
// XXX once we do in-place increments we'll have to start locking write location
// for (auto& m : instr->get_readlocs()) instr2mutex_[m.index()].lock();
// instr2mutex_[instr->get_writeloc().index()].lock();
instr->fire(this);
// for (auto& m : instr->get_readlocs()) instr2mutex_[m.index()].unlock();
// instr2mutex_[instr->get_writeloc().index()].unlock();
for (InstrInd& nextind : instr2next_[instr_ind]) {
std::lock_guard<std::mutex> lock(instr2mutex_[nextind]);
++instr2insofar_[nextind];
if (instr2insofar_[nextind] == instr2indegree_[nextind]) {
trigger_instr(nextind);
}
}
--n_pending_;
// if (!instr->quick()) printf("finished %i %s\n", instr_ind, instr->repr().c_str());
}
void ParallelInterpreter::trigger_instr(InstrInd instr_ind)
{
++n_pending_;
++n_total_;
instr2insofar_[instr_ind] = 0;
auto instr = eg_->instrs()[instr_ind];
if (instr->quick()) {
fire_instr(instr_ind);
}
else {
// printf("triggered %i %s\n", instr_ind, instr->repr().c_str());
pool_.enqueue(&ParallelInterpreter::fire_instr, this, instr_ind);
}
}
// ================================================================
// Instructions
// ================================================================
void LoadArgument::fire(Interpreter* interp) {
interp->set(writeloc, interp->getarg(ind));
}
void Alloc::fire(Interpreter* interp) {
int ndim = readlocs.size();
vector<long> shape(ndim);
for (int i=0; i < ndim; ++i) {
cgtArray * sizeval = (cgtArray *)interp->get(readlocs[i]);
cgt_assert(sizeval->dtype() == cgt_i8);
shape[i] = sizeval->at<long>(0);
}
cgtArray* cur = static_cast<cgtArray*>(interp->get(writeloc));
if (!(cur && cur->ndim() == ndim && std::equal(shape.begin(), shape.end(), cur->shape()))) {
interp->set(writeloc, new cgtArray(ndim, shape.data(), dtype, writeloc.devtype()));
}
}
void BuildTup::fire(Interpreter* interp) {
cgtTuple * out = new cgtTuple(readlocs.size());
for (int i=0; i < readlocs.size(); ++i) {
out->setitem(i, interp->get(readlocs[i]));
}
interp->set(writeloc, out);
}
void ReturnByRef::fire(Interpreter* interp) {
int n_inputs = readlocs.size();
cgtObject * reads[n_inputs];
for (int i=0; i < n_inputs; ++i) {
reads[i] = interp->get(readlocs[i]);
}
cgtObject * write = interp->get(writeloc);
callable(reads, write);
}
// TODO actually allocate tuple
void ReturnByVal::fire(Interpreter* interp) {
int n_inputs = readlocs.size();
vector<cgtObject *> args(n_inputs);
for (int i = 0; i < n_inputs; ++i) {
args[i] = interp->get(readlocs[i]);
}
interp->set(writeloc, callable(args.data())); // XXX
}
// ================================================================
// Misc
// ================================================================
ExecutionGraph::~ExecutionGraph() {
for (auto instr : instrs_) delete instr;
}
Interpreter* create_interpreter(ExecutionGraph* eg, vector<MemLocation> output_locs, int num_threads) {
if (num_threads > 1) {
return new ParallelInterpreter(eg, output_locs, num_threads);
}
else{
return new SequentialInterpreter(eg, output_locs);
}
}
} | 4,165 |
672 | /*
* Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or <EMAIL>
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* Compressor Pager.
* Memory Object Management.
*/
#include <kern/host_statistics.h>
#include <kern/kalloc.h>
#include <kern/ipc_kobject.h>
#include <mach/memory_object_control.h>
#include <mach/memory_object_types.h>
#include <mach/upl.h>
#include <vm/memory_object.h>
#include <vm/vm_compressor_pager.h>
#include <vm/vm_external.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
/* memory_object interfaces */
void compressor_memory_object_reference(memory_object_t mem_obj);
void compressor_memory_object_deallocate(memory_object_t mem_obj);
kern_return_t compressor_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
memory_object_cluster_size_t pager_page_size);
kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
kern_return_t compressor_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
__unused vm_prot_t protection_required,
memory_object_fault_info_t fault_info);
kern_return_t compressor_memory_object_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
__unused int upl_flags);
kern_return_t compressor_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size);
kern_return_t compressor_memory_object_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t size,
__unused vm_prot_t desired_access);
kern_return_t compressor_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_size_t length,
__unused vm_sync_t flags);
kern_return_t compressor_memory_object_map(
__unused memory_object_t mem_obj,
__unused vm_prot_t prot);
kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
kern_return_t compressor_memory_object_data_reclaim(
__unused memory_object_t mem_obj,
__unused boolean_t reclaim_backing_store);
const struct memory_object_pager_ops compressor_pager_ops = {
compressor_memory_object_reference,
compressor_memory_object_deallocate,
compressor_memory_object_init,
compressor_memory_object_terminate,
compressor_memory_object_data_request,
compressor_memory_object_data_return,
compressor_memory_object_data_initialize,
compressor_memory_object_data_unlock,
compressor_memory_object_synchronize,
compressor_memory_object_map,
compressor_memory_object_last_unmap,
compressor_memory_object_data_reclaim,
"compressor pager"
};
/* internal data structures */
struct {
uint64_t data_returns;
uint64_t data_requests;
uint64_t put;
uint64_t get;
uint64_t state_clr;
uint64_t state_get;
uint64_t transfer;
} compressor_pager_stats;
typedef int compressor_slot_t;
typedef struct compressor_pager {
/* mandatory generic header */
struct memory_object cpgr_hdr;
/* pager-specific data */
lck_mtx_t cpgr_lock;
unsigned int cpgr_references;
unsigned int cpgr_num_slots;
unsigned int cpgr_num_slots_occupied;
union {
compressor_slot_t cpgr_eslots[2]; /* embedded slots */
compressor_slot_t *cpgr_dslots; /* direct slots */
compressor_slot_t **cpgr_islots; /* indirect slots */
} cpgr_slots;
} *compressor_pager_t;
#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
MACRO_BEGIN \
if (_mem_obj_ == NULL || \
_mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
_cpgr_ = NULL; \
} else { \
_cpgr_ = (compressor_pager_t) _mem_obj_; \
} \
MACRO_END
zone_t compressor_pager_zone;
lck_grp_t compressor_pager_lck_grp;
lck_grp_attr_t compressor_pager_lck_grp_attr;
lck_attr_t compressor_pager_lck_attr;
#define compressor_pager_lock(_cpgr_) \
lck_mtx_lock(&(_cpgr_)->cpgr_lock)
#define compressor_pager_unlock(_cpgr_) \
lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
#define compressor_pager_lock_init(_cpgr_) \
lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
#define compressor_pager_lock_destroy(_cpgr_) \
lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
/* forward declarations */
unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
int num_slots,
int flags,
int *failures);
void compressor_pager_slot_lookup(
compressor_pager_t pager,
boolean_t do_alloc,
memory_object_offset_t offset,
compressor_slot_t **slot_pp);
kern_return_t
compressor_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
__unused memory_object_cluster_size_t pager_page_size)
{
compressor_pager_t pager;
assert(pager_page_size == PAGE_SIZE);
memory_object_control_reference(control);
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL)
panic("compressor_memory_object_init: bad request");
pager->cpgr_hdr.mo_control = control;
compressor_pager_unlock(pager);
return KERN_SUCCESS;
}
kern_return_t
compressor_memory_object_synchronize(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t length,
__unused vm_sync_t flags)
{
panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_map(
__unused memory_object_t mem_obj,
__unused vm_prot_t prot)
{
panic("compressor_memory_object_map");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_last_unmap(
__unused memory_object_t mem_obj)
{
panic("compressor_memory_object_last_unmap");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_data_reclaim(
__unused memory_object_t mem_obj,
__unused boolean_t reclaim_backing_store)
{
panic("compressor_memory_object_data_reclaim");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_terminate(
memory_object_t mem_obj)
{
memory_object_control_t control;
compressor_pager_t pager;
/*
* control port is a receive right, not a send right.
*/
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
/*
* After memory_object_terminate both memory_object_init
* and a no-senders notification are possible, so we need
* to clean up our reference to the memory_object_control
* to prepare for a new init.
*/
control = pager->cpgr_hdr.mo_control;
pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
compressor_pager_unlock(pager);
/*
* Now we deallocate our reference on the control.
*/
memory_object_control_deallocate(control);
return KERN_SUCCESS;
}
void
compressor_memory_object_reference(
memory_object_t mem_obj)
{
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return;
compressor_pager_lock(pager);
assert(pager->cpgr_references > 0);
pager->cpgr_references++;
compressor_pager_unlock(pager);
}
void
compressor_memory_object_deallocate(
memory_object_t mem_obj)
{
compressor_pager_t pager;
unsigned int num_slots_freed;
/*
* Because we don't give out multiple first references
* for a memory object, there can't be a race
* between getting a deallocate call and creating
* a new reference for the object.
*/
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return;
compressor_pager_lock(pager);
if (--pager->cpgr_references > 0) {
compressor_pager_unlock(pager);
return;
}
/*
* We shouldn't get a deallocation call
* when the kernel has the object cached.
*/
if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL)
panic("compressor_memory_object_deallocate(): bad request");
/*
* Unlock the pager (though there should be no one
* waiting for it).
*/
compressor_pager_unlock(pager);
/* free the compressor slots */
int num_chunks;
int i;
compressor_slot_t *chunk;
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
/* we have an array of chunks */
for (i = 0; i < num_chunks; i++) {
chunk = pager->cpgr_slots.cpgr_islots[i];
if (chunk != NULL) {
num_slots_freed =
compressor_pager_slots_chunk_free(
chunk,
COMPRESSOR_SLOTS_PER_CHUNK,
0,
NULL);
pager->cpgr_slots.cpgr_islots[i] = NULL;
kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
}
}
kfree(pager->cpgr_slots.cpgr_islots,
num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
pager->cpgr_slots.cpgr_islots = NULL;
} else if (pager->cpgr_num_slots > 2) {
chunk = pager->cpgr_slots.cpgr_dslots;
num_slots_freed =
compressor_pager_slots_chunk_free(
chunk,
pager->cpgr_num_slots,
0,
NULL);
pager->cpgr_slots.cpgr_dslots = NULL;
kfree(chunk,
(pager->cpgr_num_slots *
sizeof (pager->cpgr_slots.cpgr_dslots[0])));
} else {
chunk = &pager->cpgr_slots.cpgr_eslots[0];
num_slots_freed =
compressor_pager_slots_chunk_free(
chunk,
pager->cpgr_num_slots,
0,
NULL);
}
compressor_pager_lock_destroy(pager);
zfree(compressor_pager_zone, pager);
}
kern_return_t
compressor_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
__unused vm_prot_t protection_required,
__unused memory_object_fault_info_t fault_info)
{
compressor_pager_t pager;
kern_return_t kr;
compressor_slot_t *slot_p;
compressor_pager_stats.data_requests++;
/*
* Request must be on a page boundary and a multiple of pages.
*/
if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
panic("compressor_memory_object_data_request(): bad alignment");
if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
return KERN_FAILURE;
}
compressor_pager_lookup(mem_obj, pager);
if (length == 0) {
/* we're only querying the pager for this page */
} else {
panic("compressor: data_request");
}
/* find the compressor slot for that page */
compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
/* out of range */
kr = KERN_FAILURE;
} else if (slot_p == NULL || *slot_p == 0) {
/* compressor does not have this page */
kr = KERN_FAILURE;
} else {
/* compressor does have this page */
kr = KERN_SUCCESS;
}
return kr;
}
/*
* memory_object_data_initialize: check whether we already have each page, and
* write it if we do not. The implementation is far from optimized, and
* also assumes that the default_pager is single-threaded.
*/
/* It is questionable whether or not a pager should decide what is relevant */
/* and what is not in data sent from the kernel. Data initialize has been */
/* changed to copy back all data sent to it in preparation for its eventual */
/* merge with data return. It is the kernel that should decide what pages */
/* to write back. As of the writing of this note, this is indeed the case */
/* the kernel writes back one page at a time through this interface */
kern_return_t
compressor_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size)
{
compressor_pager_t pager;
memory_object_offset_t cur_offset;
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
for (cur_offset = offset;
cur_offset < offset + size;
cur_offset += PAGE_SIZE) {
panic("do a data_return() if slot for this page is empty");
}
compressor_pager_unlock(pager);
return KERN_SUCCESS;
}
kern_return_t
compressor_memory_object_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
panic("compressor_memory_object_data_unlock()");
return KERN_FAILURE;
}
/*ARGSUSED*/
kern_return_t
compressor_memory_object_data_return(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_cluster_size_t size,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
__unused int upl_flags)
{
panic("compressor: data_return");
return KERN_FAILURE;
}
/*
* Routine: default_pager_memory_object_create
* Purpose:
* Handle requests for memory objects from the
* kernel.
* Notes:
* Because we only give out the default memory
* manager port to the kernel, we don't have to
* be so paranoid about the contents.
*/
kern_return_t
compressor_memory_object_create(
memory_object_size_t new_size,
memory_object_t *new_mem_obj)
{
compressor_pager_t pager;
int num_chunks;
if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
/* 32-bit overflow for number of pages */
panic("%s: size 0x%llx overflow\n",
__FUNCTION__, (uint64_t) new_size);
return KERN_INVALID_ARGUMENT;
}
pager = (compressor_pager_t) zalloc(compressor_pager_zone);
if (pager == NULL) {
return KERN_RESOURCE_SHORTAGE;
}
compressor_pager_lock_init(pager);
pager->cpgr_references = 1;
pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
pager->cpgr_num_slots_occupied = 0;
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
} else if (pager->cpgr_num_slots > 2) {
pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
} else {
pager->cpgr_slots.cpgr_eslots[0] = 0;
pager->cpgr_slots.cpgr_eslots[1] = 0;
}
/*
* Set up associations between this memory object
* and this compressor_pager structure
*/
pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
*new_mem_obj = (memory_object_t) pager;
return KERN_SUCCESS;
}
unsigned int
compressor_pager_slots_chunk_free(
compressor_slot_t *chunk,
int num_slots,
int flags,
int *failures)
{
int i;
int retval;
unsigned int num_slots_freed;
if (failures)
*failures = 0;
num_slots_freed = 0;
for (i = 0; i < num_slots; i++) {
if (chunk[i] != 0) {
retval = vm_compressor_free(&chunk[i], flags);
if (retval == 0)
num_slots_freed++;
else {
if (retval == -2)
assert(flags & C_DONT_BLOCK);
if (failures)
*failures += 1;
}
}
}
return num_slots_freed;
}
void
compressor_pager_slot_lookup(
compressor_pager_t pager,
boolean_t do_alloc,
memory_object_offset_t offset,
compressor_slot_t **slot_pp)
{
int num_chunks;
uint32_t page_num;
int chunk_idx;
int slot_idx;
compressor_slot_t *chunk;
compressor_slot_t *t_chunk;
page_num = (uint32_t)(offset/PAGE_SIZE);
if (page_num != (offset/PAGE_SIZE)) {
/* overflow */
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
*slot_pp = NULL;
return;
}
if (page_num >= pager->cpgr_num_slots) {
/* out of range */
*slot_pp = NULL;
return;
}
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
/* we have an array of chunks */
chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
if (chunk == NULL && do_alloc) {
t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
compressor_pager_lock(pager);
if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
/*
* On some platforms, the memory stores from
* the bzero(t_chunk) above might not have been
* made visible and another thread might see
* the contents of this new chunk before it's
* been fully zero-filled.
* This memory barrier should take care of this
* according to the platform requirements.
*/
__c11_atomic_thread_fence(memory_order_release);
chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
t_chunk = NULL;
}
compressor_pager_unlock(pager);
if (t_chunk)
kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
}
if (chunk == NULL) {
*slot_pp = NULL;
} else {
slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
*slot_pp = &chunk[slot_idx];
}
} else if (pager->cpgr_num_slots > 2) {
slot_idx = page_num;
*slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
} else {
slot_idx = page_num;
*slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
}
}
void
vm_compressor_pager_init(void)
{
lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
lck_attr_setdefault(&compressor_pager_lck_attr);
compressor_pager_zone = zinit(sizeof (struct compressor_pager),
10000 * sizeof (struct compressor_pager),
8192, "compressor_pager");
zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
vm_compressor_init();
}
kern_return_t
vm_compressor_pager_put(
memory_object_t mem_obj,
memory_object_offset_t offset,
ppnum_t ppnum,
void **current_chead,
char *scratch_buf,
int *compressed_count_delta_p)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
#if __arm__ || __arm64__
unsigned int prev_wimg = VM_WIMG_DEFAULT;
boolean_t set_cache_attr = FALSE;
#endif
compressor_pager_stats.put++;
*compressed_count_delta_p = 0;
/* This routine is called by the pageout thread. The pageout thread */
/* cannot be blocked by read activities unless the read activities */
/* Therefore the grant of vs lock must be done on a try versus a */
/* blocking basis. The code below relies on the fact that the */
/* interface is synchronous. Should this interface be again async */
/* for some type of pager in the future the pages will have to be */
/* returned through a separate, asynchronous path. */
compressor_pager_lookup(mem_obj, pager);
if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
/* overflow */
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
return KERN_RESOURCE_SHORTAGE;
}
compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
if (slot_p == NULL) {
/* out of range ? */
panic("vm_compressor_pager_put: out of range");
}
if (*slot_p != 0) {
/*
* Already compressed: forget about the old one.
*
* This can happen after a vm_object_do_collapse() when
* the "backing_object" had some pages paged out and the
* "object" had an equivalent page resident.
*/
vm_compressor_free(slot_p, 0);
*compressed_count_delta_p -= 1;
}
#if __arm__ || __arm64__
/*
* cacheability should be set to the system default (usually writeback)
* during compressor operations, both for performance and correctness,
* e.g. to avoid compressor codec faults generated by an unexpected
* memory type.
*/
prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK;
if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) {
set_cache_attr = TRUE;
pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT);
}
/*
* If the compressor operation succeeds, we presumably don't need to
* undo any previous WIMG update, as all live mappings should be
* disconnected.
*/
if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
if (set_cache_attr)
pmap_set_cache_attributes(ppnum, prev_wimg);
return KERN_RESOURCE_SHORTAGE;
}
#else
if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
return KERN_RESOURCE_SHORTAGE;
}
#endif
*compressed_count_delta_p += 1;
return KERN_SUCCESS;
}
kern_return_t
vm_compressor_pager_get(
memory_object_t mem_obj,
memory_object_offset_t offset,
ppnum_t ppnum,
int *my_fault_type,
int flags,
int *compressed_count_delta_p)
{
compressor_pager_t pager;
kern_return_t kr;
compressor_slot_t *slot_p;
compressor_pager_stats.get++;
*compressed_count_delta_p = 0;
if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
return KERN_MEMORY_ERROR;
}
compressor_pager_lookup(mem_obj, pager);
/* find the compressor slot for that page */
compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
/* out of range */
kr = KERN_MEMORY_FAILURE;
} else if (slot_p == NULL || *slot_p == 0) {
/* compressor does not have this page */
kr = KERN_MEMORY_ERROR;
} else {
/* compressor does have this page */
kr = KERN_SUCCESS;
}
*my_fault_type = DBG_COMPRESSOR_FAULT;
if (kr == KERN_SUCCESS) {
int retval;
#if __arm__ || __arm64__
unsigned int prev_wimg = VM_WIMG_DEFAULT;
boolean_t set_cache_attr = FALSE;
/*
* cacheability should be set to the system default (usually writeback)
* during compressor operations, both for performance and correctness,
* e.g. to avoid compressor codec faults generated by an unexpected
* memory type.
*/
prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK;
if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) {
set_cache_attr = TRUE;
pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT);
}
#endif
/* get the page from the compressor */
retval = vm_compressor_get(ppnum, slot_p, flags);
if (retval == -1)
kr = KERN_MEMORY_FAILURE;
else if (retval == 1)
*my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
else if (retval == -2) {
assert((flags & C_DONT_BLOCK));
kr = KERN_FAILURE;
}
#if __arm__ || __arm64__
if (set_cache_attr)
pmap_set_cache_attributes(ppnum, prev_wimg);
#endif
}
if (kr == KERN_SUCCESS) {
assert(slot_p != NULL);
if (*slot_p != 0) {
/*
* We got the page for a copy-on-write fault
* and we kept the original in place. Slot
* is still occupied.
*/
} else {
*compressed_count_delta_p -= 1;
}
}
return kr;
}
unsigned int
vm_compressor_pager_state_clr(
memory_object_t mem_obj,
memory_object_offset_t offset)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
unsigned int num_slots_freed;
assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
compressor_pager_stats.state_clr++;
if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
/* overflow */
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
return 0;
}
compressor_pager_lookup(mem_obj, pager);
/* find the compressor slot for that page */
compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
num_slots_freed = 0;
if (slot_p && *slot_p != 0) {
vm_compressor_free(slot_p, 0);
num_slots_freed++;
assert(*slot_p == 0);
}
return num_slots_freed;
}
vm_external_state_t
vm_compressor_pager_state_get(
memory_object_t mem_obj,
memory_object_offset_t offset)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
compressor_pager_stats.state_get++;
if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
/* overflow */
panic("%s: offset 0x%llx overflow\n",
__FUNCTION__, (uint64_t) offset);
return VM_EXTERNAL_STATE_ABSENT;
}
compressor_pager_lookup(mem_obj, pager);
/* find the compressor slot for that page */
compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
/* out of range */
return VM_EXTERNAL_STATE_ABSENT;
} else if (slot_p == NULL || *slot_p == 0) {
/* compressor does not have this page */
return VM_EXTERNAL_STATE_ABSENT;
} else {
/* compressor does have this page */
return VM_EXTERNAL_STATE_EXISTS;
}
}
unsigned int
vm_compressor_pager_reap_pages(
memory_object_t mem_obj,
int flags)
{
compressor_pager_t pager;
int num_chunks;
int failures;
int i;
compressor_slot_t *chunk;
unsigned int num_slots_freed;
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return 0;
compressor_pager_lock(pager);
/* reap the compressor slots */
num_slots_freed = 0;
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
/* we have an array of chunks */
for (i = 0; i < num_chunks; i++) {
chunk = pager->cpgr_slots.cpgr_islots[i];
if (chunk != NULL) {
num_slots_freed +=
compressor_pager_slots_chunk_free(
chunk,
COMPRESSOR_SLOTS_PER_CHUNK,
flags,
&failures);
if (failures == 0) {
pager->cpgr_slots.cpgr_islots[i] = NULL;
kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
}
}
}
} else if (pager->cpgr_num_slots > 2) {
chunk = pager->cpgr_slots.cpgr_dslots;
num_slots_freed +=
compressor_pager_slots_chunk_free(
chunk,
pager->cpgr_num_slots,
flags,
NULL);
} else {
chunk = &pager->cpgr_slots.cpgr_eslots[0];
num_slots_freed +=
compressor_pager_slots_chunk_free(
chunk,
pager->cpgr_num_slots,
flags,
NULL);
}
compressor_pager_unlock(pager);
return num_slots_freed;
}
void
vm_compressor_pager_transfer(
memory_object_t dst_mem_obj,
memory_object_offset_t dst_offset,
memory_object_t src_mem_obj,
memory_object_offset_t src_offset)
{
compressor_pager_t src_pager, dst_pager;
compressor_slot_t *src_slot_p, *dst_slot_p;
compressor_pager_stats.transfer++;
/* find the compressor slot for the destination */
assert((uint32_t) dst_offset == dst_offset);
compressor_pager_lookup(dst_mem_obj, dst_pager);
assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
&dst_slot_p);
assert(dst_slot_p != NULL);
assert(*dst_slot_p == 0);
/* find the compressor slot for the source */
assert((uint32_t) src_offset == src_offset);
compressor_pager_lookup(src_mem_obj, src_pager);
assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
&src_slot_p);
assert(src_slot_p != NULL);
assert(*src_slot_p != 0);
/* transfer the slot from source to destination */
vm_compressor_transfer(dst_slot_p, src_slot_p);
OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
}
memory_object_offset_t
vm_compressor_pager_next_compressed(
memory_object_t mem_obj,
memory_object_offset_t offset)
{
compressor_pager_t pager;
uint32_t num_chunks;
uint32_t page_num;
uint32_t chunk_idx;
uint32_t slot_idx;
compressor_slot_t *chunk;
compressor_pager_lookup(mem_obj, pager);
page_num = (uint32_t)(offset / PAGE_SIZE);
if (page_num != (offset/PAGE_SIZE)) {
/* overflow */
return (memory_object_offset_t) -1;
}
if (page_num >= pager->cpgr_num_slots) {
/* out of range */
return (memory_object_offset_t) -1;
}
num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
COMPRESSOR_SLOTS_PER_CHUNK);
if (num_chunks == 1) {
if (pager->cpgr_num_slots > 2) {
chunk = pager->cpgr_slots.cpgr_dslots;
} else {
chunk = &pager->cpgr_slots.cpgr_eslots[0];
}
for (slot_idx = page_num;
slot_idx < pager->cpgr_num_slots;
slot_idx++) {
if (chunk[slot_idx] != 0) {
/* found a non-NULL slot in this chunk */
return (memory_object_offset_t) (slot_idx *
PAGE_SIZE);
}
}
return (memory_object_offset_t) -1;
}
/* we have an array of chunks; find the next non-NULL chunk */
chunk = NULL;
for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
chunk_idx < num_chunks;
chunk_idx++,
slot_idx = 0) {
chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
if (chunk == NULL) {
/* no chunk here: try the next one */
continue;
}
/* search for an occupied slot in this chunk */
for (;
slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
slot_idx++) {
if (chunk[slot_idx] != 0) {
/* found an occupied slot in this chunk */
uint32_t next_slot;
next_slot = ((chunk_idx *
COMPRESSOR_SLOTS_PER_CHUNK) +
slot_idx);
if (next_slot >= pager->cpgr_num_slots) {
/* went beyond end of object */
return (memory_object_offset_t) -1;
}
return (memory_object_offset_t) (next_slot *
PAGE_SIZE);
}
}
}
return (memory_object_offset_t) -1;
}
unsigned int
vm_compressor_pager_get_count(
memory_object_t mem_obj)
{
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return 0;
/*
* The caller should have the VM object locked and one
* needs that lock to do a page-in or page-out, so no
* need to lock the pager here.
*/
assert(pager->cpgr_num_slots_occupied >= 0);
return pager->cpgr_num_slots_occupied;
}
void
vm_compressor_pager_count(
memory_object_t mem_obj,
int compressed_count_delta,
boolean_t shared_lock,
vm_object_t object __unused)
{
compressor_pager_t pager;
if (compressed_count_delta == 0) {
return;
}
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return;
if (compressed_count_delta < 0) {
assert(pager->cpgr_num_slots_occupied >=
(unsigned int) -compressed_count_delta);
}
/*
* The caller should have the VM object locked,
* shared or exclusive.
*/
if (shared_lock) {
vm_object_lock_assert_shared(object);
OSAddAtomic(compressed_count_delta,
&pager->cpgr_num_slots_occupied);
} else {
vm_object_lock_assert_exclusive(object);
pager->cpgr_num_slots_occupied += compressed_count_delta;
}
}
#if CONFIG_FREEZE
kern_return_t
vm_compressor_pager_relocate(
memory_object_t mem_obj,
memory_object_offset_t offset,
void **current_chead)
{
/*
* Has the page at this offset been compressed?
*/
compressor_slot_t *slot_p;
compressor_pager_t dst_pager;
assert(mem_obj);
compressor_pager_lookup(mem_obj, dst_pager);
if (dst_pager == NULL)
return KERN_FAILURE;
compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
return (vm_compressor_relocate(current_chead, slot_p));
}
#endif /* CONFIG_FREEZE */
| 13,840 |
3,102 | <filename>clang/test/CodeGen/transparent-union-redecl.c
// RUN: %clang_cc1 -Werror -triple i386-linux -emit-llvm -o - %s | FileCheck %s
// Test that different order of declarations is acceptable and that
// implementing different redeclarations is acceptable.
// rdar://problem/34949329
typedef union {
int i;
float f;
} TU __attribute__((transparent_union));
// CHECK-LABEL: define void @f0(i32 %tu.coerce)
// CHECK: %tu = alloca %union.TU, align 4
// CHECK: %coerce.dive = getelementptr inbounds %union.TU, %union.TU* %tu, i32 0, i32 0
// CHECK: store i32 %tu.coerce, i32* %coerce.dive, align 4
void f0(TU tu) {}
void f0(int i);
// CHECK-LABEL: define void @f1(i32 %tu.coerce)
// CHECK: %tu = alloca %union.TU, align 4
// CHECK: %coerce.dive = getelementptr inbounds %union.TU, %union.TU* %tu, i32 0, i32 0
// CHECK: store i32 %tu.coerce, i32* %coerce.dive, align 4
void f1(int i);
void f1(TU tu) {}
// CHECK-LABEL: define void @f2(i32 %i)
// CHECK: %i.addr = alloca i32, align 4
// CHECK: store i32 %i, i32* %i.addr, align 4
void f2(TU tu);
void f2(int i) {}
// CHECK-LABEL: define void @f3(i32 %i)
// CHECK: %i.addr = alloca i32, align 4
// CHECK: store i32 %i, i32* %i.addr, align 4
void f3(int i) {}
void f3(TU tu);
// Also test functions with parameters specified K&R style.
// CHECK-LABEL: define void @knrStyle(i32 %tu.coerce)
// CHECK: %tu = alloca %union.TU, align 4
// CHECK: %coerce.dive = getelementptr inbounds %union.TU, %union.TU* %tu, i32 0, i32 0
// CHECK: store i32 %tu.coerce, i32* %coerce.dive, align 4
void knrStyle(int i);
void knrStyle(tu) TU tu; {}
| 678 |
416 | /*
* Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#ifndef _STRING_H_
# error "Never use <secure/_string.h> directly; include <string.h> instead."
#endif
#ifndef _SECURE__STRING_H_
#define _SECURE__STRING_H_
#include <Availability.h>
#include <sys/cdefs.h>
#include <secure/_common.h>
#if _USE_FORTIFY_LEVEL > 0
#ifndef __has_builtin
#define _undef__has_builtin
#define __has_builtin(x) 0
#endif
/* <rdar://problem/12622659> */
#if defined(__clang__) && \
((defined(__apple_build_version__) && __apple_build_version__ >= 4260006) || \
(!defined(__apple_build_version__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 3))))
#define __HAS_FIXED_CHK_PROTOTYPES 1
#else
#define __HAS_FIXED_CHK_PROTOTYPES 0
#endif
/* memccpy, memcpy, mempcpy, memmove, memset, strcpy, strlcpy, stpcpy,
strncpy, stpncpy, strcat, strlcat, and strncat */
#if __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000 || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
#if __has_builtin(__builtin___memccpy_chk) && __HAS_FIXED_CHK_PROTOTYPES
#undef memccpy
#define memccpy(dest, src, c, len) \
__builtin___memccpy_chk (dest, src, c, len, __darwin_obsz0 (dest))
#endif
#endif
#if __has_builtin(__builtin___memcpy_chk) || defined(__GNUC__)
#undef memcpy
#define memcpy(dest, src, len) \
__builtin___memcpy_chk (dest, src, len, __darwin_obsz0 (dest))
#endif
#if __has_builtin(__builtin___memmove_chk) || defined(__GNUC__)
#undef memmove
#define memmove(dest, src, len) \
__builtin___memmove_chk (dest, src, len, __darwin_obsz0 (dest))
#endif
#if __has_builtin(__builtin___memset_chk) || defined(__GNUC__)
#undef memset
#define memset(dest, val, len) \
__builtin___memset_chk (dest, val, len, __darwin_obsz0 (dest))
#endif
#if __has_builtin(__builtin___strcpy_chk) || defined(__GNUC__)
#undef strcpy
#define strcpy(dest, src) \
__builtin___strcpy_chk (dest, src, __darwin_obsz (dest))
#endif
#if __DARWIN_C_LEVEL >= 200809L
#if __has_builtin(__builtin___stpcpy_chk) || defined(__GNUC__)
#undef stpcpy
#define stpcpy(dest, src) \
__builtin___stpcpy_chk (dest, src, __darwin_obsz (dest))
#endif
#if __has_builtin(__builtin___stpncpy_chk) || __APPLE_CC__ >= 5666 || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
#undef stpncpy
#define stpncpy(dest, src, len) \
__builtin___stpncpy_chk (dest, src, len, __darwin_obsz (dest))
#endif
#endif /* _DARWIN_C_LEVEL >= 200809L */
#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL
#if __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000 || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
#if __has_builtin(__builtin___strlcpy_chk) && __HAS_FIXED_CHK_PROTOTYPES
#undef strlcpy
#define strlcpy(dest, src, len) \
__builtin___strlcpy_chk (dest, src, len, __darwin_obsz (dest))
#endif
#if __has_builtin(__builtin___strlcat_chk) && __HAS_FIXED_CHK_PROTOTYPES
#undef strlcat
#define strlcat(dest, src, len) \
__builtin___strlcat_chk (dest, src, len, __darwin_obsz (dest))
#endif
#endif /* __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000 || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 */
#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */
#if __has_builtin(__builtin___strncpy_chk) || defined(__GNUC__)
#undef strncpy
#define strncpy(dest, src, len) \
__builtin___strncpy_chk (dest, src, len, __darwin_obsz (dest))
#endif
#if __has_builtin(__builtin___strcat_chk) || defined(__GNUC__)
#undef strcat
#define strcat(dest, src) \
__builtin___strcat_chk (dest, src, __darwin_obsz (dest))
#endif
#if ! (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 32000)
#if __has_builtin(__builtin___strncat_chk) || defined(__GNUC__)
#undef strncat
#define strncat(dest, src, len) \
__builtin___strncat_chk (dest, src, len, __darwin_obsz (dest))
#endif
#endif
#ifdef _undef__has_builtin
#undef _undef__has_builtin
#undef __has_builtin
#endif
#undef __HAS_FIXED_CHK_PROTOTYPES
#endif /* _USE_FORTIFY_LEVEL > 0 */
#endif /* _SECURE__STRING_H_ */
| 2,109 |
2,323 | #ifndef CORSIX_TH_TH_STRINGS_H_
#define CORSIX_TH_TH_STRINGS_H_
#include "config.h"
#include <cwctype>
#include <stdexcept>
constexpr unsigned int invalid_char_codepoint = 0xFFFD;
constexpr unsigned int ideographic_space_codepoint = 0x3000;
size_t discard_leading_set_bits(uint8_t& byte) {
size_t count = 0;
while ((byte & 0x80) != 0) {
count++;
byte = static_cast<uint8_t>(byte << 1);
}
byte = byte >> count;
return count;
}
unsigned int next_utf8_codepoint(const char*& sString, const char* end) {
if (sString >= end) {
throw std::out_of_range("pointer is outside of string");
}
uint8_t cur_byte = *reinterpret_cast<const uint8_t*>(sString++);
size_t leading_bit_count = discard_leading_set_bits(cur_byte);
if (leading_bit_count == 1 || leading_bit_count > 4) {
// A single leading bit is a continuation character. A utf-8 character
// can be at most 4 bytes long.
return invalid_char_codepoint;
}
unsigned int codepoint = cur_byte;
for (size_t i = 1; i < leading_bit_count; ++i) {
if (sString == end) {
return invalid_char_codepoint;
}
cur_byte = *reinterpret_cast<const uint8_t*>(sString++);
size_t continue_leading_bits = discard_leading_set_bits(cur_byte);
if (continue_leading_bits != 1) {
// Not enough continuation characters
return invalid_char_codepoint;
}
codepoint = (codepoint << 6) | cur_byte;
}
return codepoint;
}
unsigned int decode_utf8(const char* sString, const char* end) {
return next_utf8_codepoint(sString, end);
}
const char* previous_utf8_codepoint(const char* sString) {
do {
--sString;
} while (((*sString) & 0xC0) == 0x80);
return sString;
}
void skip_utf8_whitespace(const char*& sString, const char* end) {
if (sString >= end) {
return;
}
unsigned int iCode = decode_utf8(sString, end);
while ((std::iswspace(iCode) || iCode == ideographic_space_codepoint)) {
next_utf8_codepoint(sString, end);
if (sString == end) {
return;
}
iCode = decode_utf8(sString, end);
}
}
constexpr uint16_t unicode_to_cp437_table[0x60] = {
0xFF, 0xAD, 0x9B, 0x9C, 0x3F, 0x9D, 0x3F, 0x3F, 0x3F, 0x3F, 0xA6, 0xAE,
0xAA, 0x3F, 0x3F, 0x3F, 0xF8, 0xF1, 0xFD, 0x3F, 0x3F, 0x3F, 0x3F, 0xFA,
0x3F, 0x3F, 0xA7, 0xAF, 0xAC, 0xAB, 0x3F, 0xA8, 0x3F, 0x3F, 0x3F, 0x3F,
0x8E, 0x8F, 0x3F, 0x80, 0x3F, 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
0x3F, 0xA5, 0x3F, 0x3F, 0x3F, 0x3F, 0x99, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
0x9A, 0x3F, 0x3F, 0xE1, 0x85, 0xA0, 0x83, 0x3F, 0x84, 0x86, 0x91, 0x87,
0x8A, 0x82, 0x88, 0x89, 0x8D, 0xA1, 0x8C, 0x8B, 0x3F, 0xA4, 0x95, 0xA2,
0x93, 0x3F, 0x94, 0xF6, 0x3F, 0x97, 0xA3, 0x96, 0x81, 0x3F, 0x3F, 0x98};
unsigned int unicode_to_codepage_437(unsigned int iCodePoint) {
if (iCodePoint < 0x80) return iCodePoint;
if (iCodePoint < 0xA0) return '?';
if (iCodePoint < 0x100) return unicode_to_cp437_table[iCodePoint - 0xA0];
switch (iCodePoint) {
case 0x0192:
return 0x9F;
case 0x0393:
return 0xE2;
case 0x0398:
return 0xE9;
case 0x03A3:
return 0xE4;
case 0x03A6:
return 0xE8;
case 0x03A9:
return 0xEA;
case 0x03B1:
return 0xE0;
case 0x03B4:
return 0xEB;
case 0x03B5:
return 0xEE;
case 0x03BC:
return 0xE6;
case 0x03C0:
return 0xE3;
case 0x03C3:
return 0xE5;
case 0x03C4:
return 0xE7;
case 0x03C6:
return 0xED;
case 0x207F:
return 0xFC;
case 0x20A7:
return 0x9E;
case 0x2219:
return 0xF9;
case 0x221A:
return 0xFB;
case 0x221E:
return 0xEC;
case 0x2229:
return 0xEF;
case 0x2248:
return 0xF7;
case 0x2261:
return 0xF0;
case 0x2264:
return 0xF3;
case 0x2265:
return 0xF2;
case 0x2310:
return 0xA9;
case 0x2320:
return 0xF4;
case 0x2321:
return 0xF5;
case 0x25A0:
return 0xFE;
}
return 0x3F;
}
#endif
| 2,052 |
12,824 | package anns;
import java.lang.annotation.*;
@Retention(RetentionPolicy.RUNTIME)
@Repeatable(Ann_0.Container.class)
public @interface Ann_0 {
String name();
String value();
@Retention(RetentionPolicy.RUNTIME)
public static @interface Container {
public Ann_0[] value() default {};
}
} | 115 |
1,899 | <gh_stars>1000+
#pragma once
#include <math.h>
#include <float.h>
#include <opencv2/opencv.hpp>
#include "Types.h"
namespace bgslibrary
{
namespace algorithms
{
namespace lb
{
class BGModel
{
public:
BGModel(int width, int height);
virtual ~BGModel();
void InitModel(IplImage* image);
void UpdateModel(IplImage* image);
virtual void setBGModelParameter(int id, int value) {};
virtual IplImage* GetSrc();
virtual IplImage* GetFG();
virtual IplImage* GetBG();
protected:
IplImage* m_SrcImage;
IplImage* m_BGImage;
IplImage* m_FGImage;
const unsigned int m_width;
const unsigned int m_height;
virtual void Init() = 0;
virtual void Update() = 0;
};
}
}
}
| 382 |
1,337 | <gh_stars>1000+
/*
* Copyright (c) 2008-2016 Haulmont.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.haulmont.cuba.security.global;
import com.haulmont.cuba.security.entity.Constraint;
import com.haulmont.cuba.security.entity.ConstraintOperationType;
import com.haulmont.cuba.security.entity.ConstraintCheckType;
import java.io.Serializable;
import java.util.UUID;
public class ConstraintData implements Serializable {
protected final UUID id;
protected final String code;
protected final ConstraintOperationType operationType;
protected final ConstraintCheckType checkType;
protected final String join;
protected final String whereClause;
protected final String groovyScript;
public ConstraintData(Constraint constraint) {
this.id = constraint.getId();
this.code = constraint.getCode();
this.join = constraint.getJoinClause();
this.whereClause = constraint.getWhereClause();
this.groovyScript = constraint.getGroovyScript();
this.operationType = constraint.getOperationType();
this.checkType = constraint.getCheckType();
}
public String getCode() {
return code;
}
public String getJoin() {
return join;
}
public String getWhereClause() {
return whereClause;
}
public String getGroovyScript() {
return groovyScript;
}
public ConstraintOperationType getOperationType() {
return operationType;
}
public ConstraintCheckType getCheckType() {
return checkType;
}
public UUID getId() {
return id;
}
} | 710 |
34,359 | /*++
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
Module Name:
- SettingContainer
Abstract:
- This is a XAML container that wraps settings in the Settings UI.
It interacts with the inheritance logic from the TerminalSettingsModel
and represents it in the Settings UI.
Author(s):
- <NAME> - January 2021
--*/
#pragma once
#include "SettingContainer.g.h"
#include "Utils.h"
namespace winrt::Microsoft::Terminal::Settings::Editor::implementation
{
struct SettingContainer : SettingContainerT<SettingContainer>
{
public:
SettingContainer();
void OnApplyTemplate();
DEPENDENCY_PROPERTY(Windows::Foundation::IInspectable, Header);
DEPENDENCY_PROPERTY(hstring, HelpText);
DEPENDENCY_PROPERTY(hstring, CurrentValue);
DEPENDENCY_PROPERTY(bool, HasSettingValue);
DEPENDENCY_PROPERTY(IInspectable, SettingOverrideSource);
TYPED_EVENT(ClearSettingValue, Editor::SettingContainer, Windows::Foundation::IInspectable);
private:
static void _InitializeProperties();
static void _OnHasSettingValueChanged(const Windows::UI::Xaml::DependencyObject& d, const Windows::UI::Xaml::DependencyPropertyChangedEventArgs& e);
static hstring _GenerateOverrideMessage(const IInspectable& settingOrigin);
void _UpdateOverrideSystem();
};
}
namespace winrt::Microsoft::Terminal::Settings::Editor::factory_implementation
{
BASIC_FACTORY(SettingContainer);
}
| 556 |
777 | <reponame>google-ar/chromium
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_GL_GL_BINDINGS_H_
#define UI_GL_GL_BINDINGS_H_
// Includes the platform independent and platform dependent GL headers.
// Only include this in cc files. It pulls in system headers, including
// the X11 headers on linux, which define all kinds of macros that are
// liable to cause conflicts.
// GL headers may include inttypes.h and so we need to ensure that
// __STDC_FORMAT_MACROS is defined in order for //base/format_macros.h to
// function correctly. See comment and #error message in //base/format_macros.h
// for details.
#if defined(OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
#define __STDC_FORMAT_MACROS
#endif
#include <GL/gl.h>
#include <GL/glext.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <stdint.h>
#include <string>
#include "base/logging.h"
#include "base/threading/thread_local.h"
#include "build/build_config.h"
#include "ui/gl/gl_export.h"
// The standard OpenGL native extension headers are also included.
#if defined(OS_WIN)
#include <GL/wglext.h>
#elif defined(OS_MACOSX)
#include <OpenGL/OpenGL.h>
#elif defined(USE_GLX)
#include <GL/glx.h>
#include <GL/glxext.h>
#endif
// Undefine some macros defined by X headers. This is why this file should only
// be included in .cc files.
#undef Bool
#undef None
#undef Status
// GLES2 defines not part of Desktop GL
// Shader Precision-Specified Types
#define GL_LOW_FLOAT 0x8DF0
#define GL_MEDIUM_FLOAT 0x8DF1
#define GL_HIGH_FLOAT 0x8DF2
#define GL_LOW_INT 0x8DF3
#define GL_MEDIUM_INT 0x8DF4
#define GL_HIGH_INT 0x8DF5
#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
#define GL_MAX_VARYING_VECTORS 0x8DFC
#define GL_SHADER_BINARY_FORMATS 0x8DF8
#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
#define GL_SHADER_COMPILER 0x8DFA
#define GL_RGB565 0x8D62
#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES 0x8B8B
#define GL_RGB8_OES 0x8051
#define GL_RGBA8_OES 0x8058
#define GL_HALF_FLOAT_OES 0x8D61
// GL_OES_EGL_image_external
#define GL_TEXTURE_EXTERNAL_OES 0x8D65
#define GL_SAMPLER_EXTERNAL_OES 0x8D66
#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67
#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68
// GL_ANGLE_translated_shader_source
#define GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE 0x93A0
#define GL_UNPACK_COLORSPACE_CONVERSION_CHROMIUM 0x9243
#define GL_BIND_GENERATES_RESOURCE_CHROMIUM 0x9244
// GL_ANGLE_texture_usage
#define GL_TEXTURE_USAGE_ANGLE 0x93A2
#define GL_FRAMEBUFFER_ATTACHMENT_ANGLE 0x93A3
// GL_EXT_texture_storage
#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F
#define GL_ALPHA8_EXT 0x803C
#define GL_LUMINANCE8_EXT 0x8040
#define GL_LUMINANCE8_ALPHA8_EXT 0x8045
#define GL_RGBA32F_EXT 0x8814
#define GL_RGB32F_EXT 0x8815
#define GL_ALPHA32F_EXT 0x8816
#define GL_LUMINANCE32F_EXT 0x8818
#define GL_LUMINANCE_ALPHA32F_EXT 0x8819
#define GL_RGBA16F_EXT 0x881A
#define GL_RGB16F_EXT 0x881B
#define GL_RG16F_EXT 0x822F
#define GL_R16F_EXT 0x822D
#define GL_ALPHA16F_EXT 0x881C
#define GL_LUMINANCE16F_EXT 0x881E
#define GL_LUMINANCE_ALPHA16F_EXT 0x881F
#define GL_R32F_EXT 0x822E
#define GL_RG32F_EXT 0x8230
#define GL_BGRA8_EXT 0x93A1
// GL_ANGLE_instanced_arrays
#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE 0x88FE
// GL_EXT_occlusion_query_boolean
#define GL_ANY_SAMPLES_PASSED_EXT 0x8C2F
#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT 0x8D6A
#define GL_CURRENT_QUERY_EXT 0x8865
#define GL_QUERY_RESULT_EXT 0x8866
#define GL_QUERY_RESULT_AVAILABLE_EXT 0x8867
// GL_CHROMIUM_command_buffer_query
#define GL_COMMANDS_ISSUED_CHROMIUM 0x6004
/* GL_CHROMIUM_get_error_query */
#define GL_GET_ERROR_QUERY_CHROMIUM 0x6003
/* GL_CHROMIUM_command_buffer_latency_query */
#define GL_LATENCY_QUERY_CHROMIUM 0x6007
/* GL_CHROMIUM_async_pixel_transfers */
#define GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM 0x6006
// GL_CHROMIUM_sync_query
#define GL_COMMANDS_COMPLETED_CHROMIUM 0x84F7
// GL_CHROMIUM_gpu_memory_buffer_image
#define GL_READ_WRITE_CHROMIUM 0x78F2
// GL_CHROMIUM_ycrcb_420_image
#define GL_RGB_YCRCB_420_CHROMIUM 0x78FA
// GL_CHROMIUM_ycbcr_422_image
#define GL_RGB_YCBCR_422_CHROMIUM 0x78FB
// GL_CHROMIUM_ycbcr_420v_image
#define GL_RGB_YCBCR_420V_CHROMIUM 0x78FC
// GL_CHROMIUM_schedule_overlay_plane
#define GL_OVERLAY_TRANSFORM_NONE_CHROMIUM 0x9245
#define GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM 0x9246
#define GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM 0x9247
#define GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM 0x9248
#define GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM 0x9249
#define GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM 0x924A
// GL_CHROMIUM_subscribe_uniforms
#define GL_SUBSCRIBED_VALUES_BUFFER_CHROMIUM 0x924B
#define GL_MOUSE_POSITION_CHROMIUM 0x924C
// GL_OES_texure_3D
#define GL_SAMPLER_3D_OES 0x8B5F
// GL_OES_depth24
#define GL_DEPTH_COMPONENT24_OES 0x81A6
// GL_OES_depth32
#define GL_DEPTH_COMPONENT32_OES 0x81A7
// GL_OES_packed_depth_stencil
#ifndef GL_DEPTH24_STENCIL8_OES
#define GL_DEPTH24_STENCIL8_OES 0x88F0
#endif
#ifndef GL_DEPTH24_STENCIL8
#define GL_DEPTH24_STENCIL8 0x88F0
#endif
// GL_OES_compressed_ETC1_RGB8_texture
#define GL_ETC1_RGB8_OES 0x8D64
// GL_AMD_compressed_ATC_texture
#define GL_ATC_RGB_AMD 0x8C92
#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93
#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE
// GL_IMG_texture_compression_pvrtc
#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00
#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01
#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03
// GL_OES_vertex_array_object
#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5
// GL_CHROMIUM_pixel_transfer_buffer_object
#define GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM 0x78EC
#define GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM 0x78ED
#define GL_PIXEL_PACK_TRANSFER_BUFFER_BINDING_CHROMIUM 0x78EE
#define GL_PIXEL_UNPACK_TRANSFER_BUFFER_BINDING_CHROMIUM 0x78EF
/* GL_EXT_discard_framebuffer */
#ifndef GL_EXT_discard_framebuffer
#define GL_COLOR_EXT 0x1800
#define GL_DEPTH_EXT 0x1801
#define GL_STENCIL_EXT 0x1802
#endif
// GL_EXT_sRGB
#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210
// GL_ARB_get_program_binary
#define PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257
// GL_OES_get_program_binary
#define GL_PROGRAM_BINARY_LENGTH_OES 0x8741
#define GL_NUM_PROGRAM_BINARY_FORMATS_OES 0x87FE
#define GL_PROGRAM_BINARY_FORMATS_OES 0x87FF
#ifndef GL_EXT_multisampled_render_to_texture
#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56
#define GL_MAX_SAMPLES_EXT 0x8D57
#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C
#endif
#ifndef GL_IMG_multisampled_render_to_texture
#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133
#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134
#define GL_MAX_SAMPLES_IMG 0x9135
#define GL_TEXTURE_SAMPLES_IMG 0x9136
#endif
#ifndef GL_CHROMIUM_path_rendering
#define GL_CHROMIUM_path_rendering 1
// These match the corresponding values in NV_path_rendering
// extension, eg tokens with CHROMIUM replaced with NV.
#define GL_PATH_MODELVIEW_MATRIX_CHROMIUM 0x0BA6
#define GL_PATH_PROJECTION_MATRIX_CHROMIUM 0x0BA7
#define GL_PATH_MODELVIEW_CHROMIUM 0x1700
#define GL_PATH_PROJECTION_CHROMIUM 0x1701
#define GL_FLAT_CHROMIUM 0x1D00
#define GL_CLOSE_PATH_CHROMIUM 0x00
#define GL_MOVE_TO_CHROMIUM 0x02
#define GL_LINE_TO_CHROMIUM 0x04
#define GL_QUADRATIC_CURVE_TO_CHROMIUM 0x0A
#define GL_CUBIC_CURVE_TO_CHROMIUM 0x0C
#define GL_CONIC_CURVE_TO_CHROMIUM 0x1A
#define GL_EYE_LINEAR_CHROMIUM 0x2400
#define GL_OBJECT_LINEAR_CHROMIUM 0x2401
#define GL_CONSTANT_CHROMIUM 0x8576
#define GL_PATH_STROKE_WIDTH_CHROMIUM 0x9075
#define GL_PATH_END_CAPS_CHROMIUM 0x9076
#define GL_PATH_JOIN_STYLE_CHROMIUM 0x9079
#define GL_PATH_MITER_LIMIT_CHROMIUM 0x907a
#define GL_PATH_STROKE_BOUND_CHROMIUM 0x9086
#define GL_COUNT_UP_CHROMIUM 0x9088
#define GL_COUNT_DOWN_CHROMIUM 0x9089
#define GL_CONVEX_HULL_CHROMIUM 0x908B
#define GL_BOUNDING_BOX_CHROMIUM 0x908D
#define GL_TRANSLATE_X_CHROMIUM 0x908E
#define GL_TRANSLATE_Y_CHROMIUM 0x908F
#define GL_TRANSLATE_2D_CHROMIUM 0x9090
#define GL_TRANSLATE_3D_CHROMIUM 0x9091
#define GL_AFFINE_2D_CHROMIUM 0x9092
#define GL_AFFINE_3D_CHROMIUM 0x9094
#define GL_TRANSPOSE_AFFINE_2D_CHROMIUM 0x9096
#define GL_TRANSPOSE_AFFINE_3D_CHROMIUM 0x9098
#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM 0x909C
#define GL_SQUARE_CHROMIUM 0x90a3
#define GL_ROUND_CHROMIUM 0x90a4
#define GL_BEVEL_CHROMIUM 0x90a6
#define GL_MITER_REVERT_CHROMIUM 0x90a7
#define GL_PATH_STENCIL_FUNC_CHROMIUM 0x90B7
#define GL_PATH_STENCIL_REF_CHROMIUM 0x90B8
#define GL_PATH_STENCIL_VALUE_MASK_CHROMIUM 0x90B9
#endif
#ifndef GL_EXT_multisample_compatibility
#define GL_EXT_multisample_compatibility 1
#define GL_MULTISAMPLE_EXT 0x809D
#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F
#endif /* GL_EXT_multisample_compatibility */
#ifndef GL_CHROMIUM_framebuffer_mixed_samples
#define GL_CHROMIUM_framebuffer_mixed_samples 1
#define GL_COVERAGE_MODULATION_CHROMIUM 0x9332
#endif /* GL_CHROMIUM_framebuffer_mixed_samples */
#ifndef GL_KHR_blend_equation_advanced
#define GL_KHR_blend_equation_advanced 1
#define GL_COLORBURN_KHR 0x929A
#define GL_COLORDODGE_KHR 0x9299
#define GL_DARKEN_KHR 0x9297
#define GL_DIFFERENCE_KHR 0x929E
#define GL_EXCLUSION_KHR 0x92A0
#define GL_HARDLIGHT_KHR 0x929B
#define GL_HSL_COLOR_KHR 0x92AF
#define GL_HSL_HUE_KHR 0x92AD
#define GL_HSL_LUMINOSITY_KHR 0x92B0
#define GL_HSL_SATURATION_KHR 0x92AE
#define GL_LIGHTEN_KHR 0x9298
#define GL_MULTIPLY_KHR 0x9294
#define GL_OVERLAY_KHR 0x9296
#define GL_SCREEN_KHR 0x9295
#define GL_SOFTLIGHT_KHR 0x929C
#endif /* GL_KHR_blend_equation_advanced */
#ifndef GL_KHR_blend_equation_advanced_coherent
#define GL_KHR_blend_equation_advanced_coherent 1
#define GL_BLEND_ADVANCED_COHERENT_KHR 0x9285
#endif /* GL_KHR_blend_equation_advanced_coherent */
#ifndef GL_EXT_disjoint_timer_query
#define GL_EXT_disjoint_timer_query 1
#define GL_QUERY_COUNTER_BITS_EXT 0x8864
#define GL_TIME_ELAPSED_EXT 0x88BF
#define GL_TIMESTAMP_EXT 0x8E28
#define GL_GPU_DISJOINT_EXT 0x8FBB
#endif
#ifndef GL_KHR_robustness
#define GL_KHR_robustness 1
#define GL_CONTEXT_ROBUST_ACCESS_KHR 0x90F3
#define GL_LOSE_CONTEXT_ON_RESET_KHR 0x8252
#define GL_GUILTY_CONTEXT_RESET_KHR 0x8253
#define GL_INNOCENT_CONTEXT_RESET_KHR 0x8254
#define GL_UNKNOWN_CONTEXT_RESET_KHR 0x8255
#define GL_RESET_NOTIFICATION_STRATEGY_KHR 0x8256
#define GL_NO_RESET_NOTIFICATION_KHR 0x8261
#define GL_CONTEXT_LOST_KHR 0x0507
#endif /* GL_KHR_robustness */
#ifndef GL_EXT_texture_rg
#define GL_EXT_texture_rg 1
#define GL_RED_EXT 0x1903
#define GL_RG_EXT 0x8227
#define GL_R8_EXT 0x8229
#define GL_RG8_EXT 0x822B
#endif /* GL_EXT_texture_rg */
// This is from NV_path_rendering, but the Mesa GL header is not up to date with
// the most recent
// version of the extension. This definition could be removed once glext.h
// r27498 or later is
// imported.
#ifndef GL_FRAGMENT_INPUT_NV
#define GL_FRAGMENT_INPUT_NV 0x936D
#endif
#ifndef GL_EXT_blend_func_extended
#define GL_EXT_blend_func_extended 1
#define GL_SRC_ALPHA_SATURATE_EXT 0x0308
#define GL_SRC1_ALPHA_EXT 0x8589 // OpenGL 1.5 token value
#define GL_SRC1_COLOR_EXT 0x88F9
#define GL_ONE_MINUS_SRC1_COLOR_EXT 0x88FA
#define GL_ONE_MINUS_SRC1_ALPHA_EXT 0x88FB
#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT 0x88FC
#endif /* GL_EXT_blend_func_extended */
#define GL_GLEXT_PROTOTYPES 1
#if defined(OS_WIN)
#define GL_BINDING_CALL WINAPI
#else
#define GL_BINDING_CALL
#endif
#define GL_SERVICE_LOG(args) DLOG(INFO) << args;
#if defined(NDEBUG)
#define GL_SERVICE_LOG_CODE_BLOCK(code)
#else
#define GL_SERVICE_LOG_CODE_BLOCK(code) code
#endif
// Forward declare OSMesa types.
typedef struct osmesa_context *OSMesaContext;
typedef void (*OSMESAproc)();
// Forward declare EGL types.
typedef uint64_t EGLuint64CHROMIUM;
#include "gl_bindings_autogen_gl.h"
#include "gl_bindings_autogen_osmesa.h"
#if defined(USE_EGL)
#include "gl_bindings_autogen_egl.h"
#endif
#if defined(OS_WIN)
#include "gl_bindings_autogen_wgl.h"
#endif
#if defined(USE_GLX)
#include "gl_bindings_autogen_glx.h"
#endif
namespace gl {
struct GL_EXPORT DriverGL {
void InitializeStaticBindings();
void InitializeCustomDynamicBindings(GLContext* context);
void InitializeDebugBindings();
void InitializeNullDrawBindings();
// TODO(danakj): Remove this when all test suites are using null-draw.
bool HasInitializedNullDrawBindings();
bool SetNullDrawBindingsEnabled(bool enabled);
void ClearBindings();
ProcsGL fn;
ProcsGL orig_fn;
ProcsGL debug_fn;
ExtensionsGL ext;
bool null_draw_bindings_enabled;
private:
void InitializeDynamicBindings(GLContext* context);
};
struct GL_EXPORT DriverOSMESA {
void InitializeStaticBindings();
void InitializeExtensionBindings();
void InitializeDebugBindings();
void ClearBindings();
ProcsOSMESA fn;
ProcsOSMESA debug_fn;
ExtensionsOSMESA ext;
private:
static std::string GetPlatformExtensions();
};
#if defined(OS_WIN)
struct GL_EXPORT DriverWGL {
void InitializeStaticBindings();
void InitializeExtensionBindings();
void InitializeDebugBindings();
void ClearBindings();
ProcsWGL fn;
ProcsWGL debug_fn;
ExtensionsWGL ext;
private:
static std::string GetPlatformExtensions();
};
#endif
#if defined(USE_EGL)
struct GL_EXPORT DriverEGL {
void InitializeStaticBindings();
void InitializeClientExtensionBindings();
void InitializeExtensionBindings();
void InitializeDebugBindings();
void ClearBindings();
ProcsEGL fn;
ProcsEGL debug_fn;
ExtensionsEGL ext;
static std::string GetPlatformExtensions();
static std::string GetClientExtensions();
};
#endif
#if defined(USE_GLX)
struct GL_EXPORT DriverGLX {
void InitializeStaticBindings();
void InitializeExtensionBindings();
void InitializeDebugBindings();
void ClearBindings();
ProcsGLX fn;
ProcsGLX debug_fn;
ExtensionsGLX ext;
private:
static std::string GetPlatformExtensions();
};
#endif
// This #define is here to support autogenerated code.
#define g_current_gl_context g_current_gl_context_tls->Get()
GL_EXPORT extern base::ThreadLocalPointer<GLApi>* g_current_gl_context_tls;
GL_EXPORT extern OSMESAApi* g_current_osmesa_context;
GL_EXPORT extern DriverGL g_driver_gl;
GL_EXPORT extern DriverOSMESA g_driver_osmesa;
#if defined(USE_EGL)
GL_EXPORT extern EGLApi* g_current_egl_context;
GL_EXPORT extern DriverEGL g_driver_egl;
#endif
#if defined(OS_WIN)
GL_EXPORT extern WGLApi* g_current_wgl_context;
GL_EXPORT extern DriverWGL g_driver_wgl;
#endif
#if defined(USE_GLX)
GL_EXPORT extern GLXApi* g_current_glx_context;
GL_EXPORT extern DriverGLX g_driver_glx;
#endif
} // namespace gl
#endif // UI_GL_GL_BINDINGS_H_
| 8,959 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Bonneuil-sur-Marne","circ":"1ère circonscription","dpt":"Val-de-Marne","inscrits":8972,"abs":5484,"votants":3488,"blancs":83,"nuls":22,"exp":3383,"res":[{"nuance":"COM","nom":"M. <NAME>","voix":1112},{"nuance":"REM","nom":"<NAME>","voix":825},{"nuance":"FI","nom":"M. <NAME>","voix":375},{"nuance":"FN","nom":"M. <NAME>","voix":350},{"nuance":"LR","nom":"M. <NAME>","voix":255},{"nuance":"ECO","nom":"Mme <NAME>","voix":140},{"nuance":"SOC","nom":"M. <NAME>","voix":131},{"nuance":"DLF","nom":"Mme <NAME>","voix":51},{"nuance":"DIV","nom":"<NAME>","voix":39},{"nuance":"DVD","nom":"M. <NAME>","voix":29},{"nuance":"EXG","nom":"M. <NAME>","voix":25},{"nuance":"DIV","nom":"M. <NAME>","voix":18},{"nuance":"DIV","nom":"Mme <NAME>","voix":15},{"nuance":"EXG","nom":"M. <NAME>","voix":9},{"nuance":"ECO","nom":"Mme <NAME>","voix":9}]} | 369 |
1,162 | package io.digdag.server.rs;
import com.google.common.base.Optional;
public class QueryParamValidator
{
public static int validatePageSize(Optional<Integer> pageSize, int maxPageSize, int defaultPageSize)
throws IllegalArgumentException
{
if (!pageSize.isPresent()) { return defaultPageSize; }
int pageSizeValue = pageSize.get().intValue();
if (pageSizeValue > maxPageSize) {
String message = "Your specified page_size is " + pageSize + ", " +
"but it is larger than MAX_PAGE_SIZE: " + maxPageSize + ". " +
"You must specify page_size with a number which is smaller than " + maxPageSize + ".";
// This error results 400 response
throw new IllegalArgumentException(message);
}
else {
return pageSizeValue;
}
}
}
| 354 |
880 | <filename>logback-android/src/main/java/ch/qos/logback/classic/joran/action/FindIncludeAction.java
/**
* Copyright 2019 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.qos.logback.classic.joran.action;
import java.io.InputStream;
import java.net.URL;
import org.xml.sax.Attributes;
import ch.qos.logback.core.joran.action.IncludeAction;
import ch.qos.logback.core.joran.event.SaxEventRecorder;
import ch.qos.logback.core.joran.spi.ActionException;
import ch.qos.logback.core.joran.spi.InterpretationContext;
import ch.qos.logback.core.joran.spi.JoranException;
/**
* Action that searches child includes until the first found
* path is returned.
*
* @author <NAME>
*/
public class FindIncludeAction extends IncludeAction {
private static final int EVENT_OFFSET = 1;
public FindIncludeAction() {
setEventOffset(EVENT_OFFSET);
}
@Override
public void begin(InterpretationContext ec, String name, Attributes attributes)
throws ActionException {
// nothing to do
}
@Override
public void end(InterpretationContext ic, String name) throws ActionException {
if (!ic.isEmpty() && (ic.peekObject() instanceof ConditionalIncludeAction.State)) {
ConditionalIncludeAction.State state = (ConditionalIncludeAction.State)ic.popObject();
URL url = state.getUrl();
if (url != null) {
addInfo("Path found [" + url.toString() + "]");
try {
processInclude(ic, url);
} catch (JoranException e) {
addError("Failed to process include [" + url.toString() + "]", e);
}
} else {
addInfo("No paths found from includes");
}
}
}
/**
* Creates a {@link SaxEventRecorder} based on the input stream
* @return the newly created recorder
*/
@Override
protected SaxEventRecorder createRecorder(InputStream in, URL url) {
return new SaxEventRecorder(getContext());
}
}
| 811 |
8,828 | <filename>codelabs/gke-source-to-prod/front50/pipelines/205a774a-2869-452a-9050-5fb95ae6624a/specification.json
{"expectedArtifacts":[{"defaultArtifact":{"kind":"default.gcs","name":"{%BUCKET_URI%}/manifests/frontend.yml","reference":"{%BUCKET_URI%}/manifests/frontend.yml","type":"gcs/object"},"id":"dfc04116-f241-4af7-ad5f-b4855545d585","matchArtifact":{"kind":"gcs","name":"{%BUCKET_URI%}/manifests/frontend.yml","type":"gcs/object"},"useDefaultArtifact":true,"usePriorExecution":false},{"defaultArtifact":{"kind":"default.gcs","name":"{%BUCKET_URI%}/manifests/backend.yml","reference":"{%BUCKET_URI%}/manifests/backend.yml","type":"gcs/object"},"id":"68b3d1d0-8ea1-426f-879d-53e40687e88a","matchArtifact":{"kind":"gcs","name":"{%BUCKET_URI%}/manifests/backend.yml","type":"gcs/object"},"useDefaultArtifact":true,"usePriorExecution":false},{"defaultArtifact":{"kind":"default.docker","name":"gcr.io/{%PROJECT_ID%}/frontend","reference":"gcr.io/{%PROJECT_ID%}/frontend","type":"docker/image"},"id":"17205002-333a-4d5d-aaec-abb77b73b977","matchArtifact":{"kind":"docker","name":"gcr.io/{%PROJECT_ID%}/frontend","type":"docker/image"},"useDefaultArtifact":true,"usePriorArtifact":true,"usePriorExecution":false},{"defaultArtifact":{"kind":"default.docker","name":"gcr.io/{%PROJECT_ID%}/backend","reference":"gcr.io/{%PROJECT_ID%}/backend","type":"docker/image"},"id":"09eda939-611e-4695-9b52-518379718eaf","matchArtifact":{"kind":"docker","name":"gcr.io/{%PROJECT_ID%}/backend","type":"docker/image"},"useDefaultArtifact":true,"usePriorArtifact":true,"usePriorExecution":false}],"keepWaitingPipelines":false,"limitConcurrent":true,"application":"demo","parameterConfig":[],"lastModifiedBy":"anonymous","name":"Deploy Simple Canary to Production","stages":[{"account":"my-kubernetes-account","canary":true,"cloudProvider":"kubernetes","manifestArtifactAccount":"my-gcs-account","manifestArtifactId":"dfc04116-f241-4af7-ad5f-b4855545d585","moniker":{"app":"demo","cluster":"frontend-canary"},"name":"Simple Frontend Canary","namespace":"production","refId":"1","relationships":{"loadBalancers":[],"securityGroups":[]},"requiredArtifactIds":["17205002-333a-4d5d-aaec-abb77b73b977"],"requisiteStageRefIds":[],"source":"artifact","type":"deployManifest"},{"account":"my-kubernetes-account","canary":true,"cloudProvider":"kubernetes","manifestArtifactAccount":"my-gcs-account","manifestArtifactId":"68b3d1d0-8ea1-426f-879d-53e40687e88a","moniker":{"app":"demo","cluster":"backend-canary"},"name":"Simple Backend Canary","namespace":"production","refId":"2","relationships":{"loadBalancers":[],"securityGroups":[]},"requiredArtifactIds":["09eda939-611e-4695-9b52-518379718eaf"],"requisiteStageRefIds":[],"source":"artifact","type":"deployManifest"},{"failPipeline":true,"isNew":true,"judgmentInputs":[],"name":"Wait For Canary Results","notifications":[],"refId":"3","requisiteStageRefIds":["1","2"],"type":"wait","waitTime":30},{"failPipeline":true,"isNew":true,"judgmentInputs":[],"name":"Manually Validate Canary Results","notifications":[],"refId":"4","requisiteStageRefIds":["3"],"type":"manualJudgment"}],"index":1,"id":"205a774a-2869-452a-9050-5fb95ae6624a","triggers":[{"application":"demo","enabled":true,"expectedArtifactIds":["dfc04116-f241-4af7-ad5f-b4855545d585","17205002-333a-4d5d-aaec-abb77b73b977","68b3d1d0-8ea1-426f-879d-53e40687e88a","09eda939-611e-4695-9b52-518379718eaf"],"pipeline":"f1d724be-7f75-43fc-b0f5-d7efa4b173af","status":["successful"],"type":"pipeline"}],"updateTs":"1522418070917"} | 1,278 |
515 | <filename>docs/source/conf.py
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../compressai/"))
# -- Project information -----------------------------------------------------
project = "compressai"
copyright = "2021, InterDigital Communications, Inc."
author = "InterDigital Communications, Inc."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "sphinx_rtd_theme"
html_theme = "furo"
html_title = "CompressAI"
html_logo = "_static/logo.png"
html_show_sphinx = False
html_theme_options = {
"sidebar_hide_name": True,
"light_css_variables": {
"color-brand-primary": "#00aaee",
"color-brand-content": "#00aaee",
},
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 1,157 |
364 | // The MIT License (MIT)
// Copyright (c) 2016, Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include <iostream> // TODO: remove.
#include "BitFunnel/Configuration/IShardDefinition.h"
#include "BitFunnel/Exceptions.h"
#include "BitFunnel/IFileManager.h"
#include "BitFunnel/Index/Factories.h"
#include "BitFunnel/Index/IDocument.h"
#include "BitFunnel/Index/IRecycler.h"
#include "BitFunnel/Index/ISliceBufferAllocator.h"
#include "BitFunnel/Index/ITermTableCollection.h"
#include "BitFunnel/Utilities/Factories.h"
#include "BitFunnel/Utilities/StreamUtilities.h"
#include "DocumentHandleInternal.h"
#include "Ingestor.h"
#include "LoggerInterfaces/Logging.h"
#include "TermToText.h"
namespace BitFunnel
{
std::unique_ptr<IIngestor>
Factories::CreateIngestor(IDocumentDataSchema const & docDataSchema,
IRecycler& recycler,
ITermTableCollection const & termTables,
IShardDefinition const & shardDefinition,
ISliceBufferAllocator& sliceBufferAllocator)
{
return std::unique_ptr<IIngestor>(new Ingestor(docDataSchema,
recycler,
termTables,
shardDefinition,
sliceBufferAllocator));
}
Ingestor::Ingestor(IDocumentDataSchema const & docDataSchema,
IRecycler& recycler,
ITermTableCollection const & termTables,
IShardDefinition const & shardDefinition,
ISliceBufferAllocator& sliceBufferAllocator)
: m_recycler(recycler),
m_shardDefinition(shardDefinition),
// TODO: This member is now redundant (with m_documentMap).
// But see issue 389. Because of that issue, m_documentCount is not
// always equal to m_documentMap.size().
m_documentCount(0),
m_totalSourceByteSize(0),
m_documentMap(new DocumentMap()),
m_documentCache(new DocumentCache()),
m_tokenManager(Factories::CreateTokenManager()),
m_sliceBufferAllocator(sliceBufferAllocator)
{
// Create shards based on shard definition in m_shardDefinition..
for (ShardId shardId = 0; shardId < m_shardDefinition.GetShardCount(); ++shardId)
{
std::cout
<< "Creating shard: " << shardId;
if (shardId + 1 < m_shardDefinition.GetShardCount())
{
std::cout << " (" << m_shardDefinition.GetMaxPostingCount(shardId) << " postings)" << std::endl;
}
else
{
std::cout << " (larger)" << std::endl;
}
m_shards.push_back(
std::unique_ptr<Shard>(
new Shard(shardId,
GetRecycler(),
GetTokenManager(),
termTables.GetTermTable(shardId),
docDataSchema,
m_sliceBufferAllocator,
m_sliceBufferAllocator.GetSliceBufferSize())));
}
}
Ingestor::~Ingestor()
{
Shutdown();
}
void Ingestor::PrintStatistics(std::ostream& out,
double time) const
{
out << "Shard count:" << m_shards.size() << std::endl
<< "Document count: " << m_documentCount << std::endl
<< "Bytes/Document: "
<< static_cast<double>(m_totalSourceByteSize) / m_documentCount
<< std::endl
<< "Total bytes read: " << m_totalSourceByteSize << std::endl
<< "Posting count: " << m_histogram.GetPostingCount() << std::endl;
if (time > 0)
{
out << "Total ingestion time: " << time << std::endl;
out << "Bytes/second: " << m_totalSourceByteSize / time << std::endl;
}
out << std::endl;
// TODO: print out term count? Not sure how to do this since they are spread across shards.
}
void Ingestor::WriteStatistics(IFileManager & fileManager,
ITermToText const * termToText) const
{
if (termToText != nullptr)
{
auto out = fileManager.TermToText().OpenForWrite();
termToText->Write(*out);
}
{
auto out = fileManager.DocumentHistogram().OpenForWrite();
m_histogram.Write(*out);
}
for (size_t shard = 0; shard < m_shards.size(); ++shard)
{
{
auto out = fileManager.CumulativeTermCounts(shard).OpenForWrite();
m_shards[shard]->TemporaryWriteCumulativeTermCounts(*out);
}
{
auto out = fileManager.DocFreqTable(shard).OpenForWrite();
m_shards[shard]->TemporaryWriteDocumentFrequencyTable(*out, termToText);
}
}
}
void Ingestor::TemporaryReadAllSlices(IFileManager& fileManager)
{
// Recover ingestor-wide values from IndexSliceMain file
// and make sure saved slices are formatted consistently with current index
// Note: PostingCount is neither saved nor recovered
auto sliceFileMain = fileManager.IndexSliceMain();
auto input = sliceFileMain.OpenForRead();
m_documentCount = StreamUtilities::ReadField<size_t>(*input);
m_totalSourceByteSize = StreamUtilities::ReadField<size_t>(*input);
auto shardSize = StreamUtilities::ReadField<size_t>(*input);
auto sliceBufferSize = StreamUtilities::ReadField<size_t>(*input);
if (shardSize != m_shards.size() || sliceBufferSize != m_sliceBufferAllocator.GetSliceBufferSize())
{
RecoverableError error("Ingestor::TemporaryReadAllSlices(): Saved slices don't match index format.");
throw error;
}
// Load each shard's slices
for (size_t i = 0; i < m_shards.size(); ++i)
{
auto nbrSlices = StreamUtilities::ReadField<size_t>(*input);
m_shards[i]->TemporaryReadAllSlices(fileManager, nbrSlices);
}
}
void Ingestor::TemporaryWriteAllSlices(IFileManager& fileManager) const
{
// Write out IndexSliceMain file with ingestor-wide values
auto sliceFileMain = fileManager.IndexSliceMain();
auto output = sliceFileMain.OpenForWrite();
StreamUtilities::WriteField<size_t>(*output, m_documentCount);
StreamUtilities::WriteField<size_t>(*output, m_totalSourceByteSize);
StreamUtilities::WriteField<size_t>(*output, m_shards.size());
StreamUtilities::WriteField<size_t>(*output, m_sliceBufferAllocator.GetSliceBufferSize());
// Save each shard's slices
for (size_t i = 0; i < m_shards.size(); ++i)
{
StreamUtilities::WriteField<size_t>(*output, m_shards[i]->GetSliceBuffers().size());
m_shards[i]->TemporaryWriteAllSlices(fileManager);
}
}
IDocumentCache & Ingestor::GetDocumentCache() const
{
return *m_documentCache;
}
void Ingestor::Add(DocId id, IDocument const & document)
{
++m_documentCount;
m_totalSourceByteSize += document.GetSourceByteSize();
// Add postingCount to the DocumentHistogramBuilder
m_histogram.AddDocument(document.GetPostingCount());
// Choose correct shard and then allocate handle.
ShardId shardId = m_shardDefinition.GetShard(document.GetPostingCount());
DocumentHandleInternal handle = m_shards[shardId]->AllocateDocument(id);
// std::cout
// << "IIngestor::Add("
// << id << "):"
// << " postingCount: " << document.GetPostingCount()
// << " shardId: " << shardId
// << std::endl;
document.Ingest(handle);
// TODO: REVIEW: Why are Activate() and CommitDocument() separate operations?
handle.Activate();
handle.GetSlice().CommitDocument();
// TODO: schedule for backup if Slice is full.
// Consider if Slice::CommitDocument itself may schedule a backup when full.
try
{
m_documentMap->Add(handle);
// TODO: Remove this debugging code. Related to issue 389.
//if (m_documentMap->size() != m_documentCount)
//{
// std::cout
// << " Mismatch on docId=" << id
// << ": m_documentMap->size()=" << m_documentMap->size()
// << ", m_documentCount = " << m_documentCount
// << std::endl;
//}
}
catch (...)
{
try
{
handle.Expire();
}
catch (...)
{
LogB(Logging::Error,
"Ingestor::Add",
"Error while cleaning up after AddDocument operation failed.",
"");
}
// Re-throw the original exception back to the caller.
throw;
}
}
IRecycler& Ingestor::GetRecycler() const
{
return m_recycler;
}
size_t Ingestor::GetShardCount() const
{
return m_shards.size();
}
IShard& Ingestor::GetShard(size_t shard) const
{
return *(m_shards[shard]);
}
IShardDefinition const & Ingestor::GetShardDefinition() const
{
return m_shardDefinition;
}
ITokenManager& Ingestor::GetTokenManager() const
{
return *m_tokenManager;
}
bool Ingestor::Delete(DocId id)
{
const Token token = m_tokenManager->RequestToken();
// Protecting from concurrent Delete operations. Even though individual
// function calls here are thread-safe, Delete on the same value of DocId
// is not, since it modifies the counters of the expired documents in the
// Slice.
std::lock_guard<std::mutex> lock(m_deleteDocumentLock);
bool isFound;
DocumentHandleInternal location = m_documentMap->Find(id, isFound);
if (isFound)
{
m_documentMap->Delete(id);
location.Expire();
}
// In a case of documents deletes, a missing entry should not be treated
// as an error. This is to accommodate soft-deleting a large number of
// documents where only the range of IDs is known, but not the exact
// values.
return isFound;
}
void Ingestor::AssertFact(DocId /*id*/, FactHandle /*fact*/, bool /*value*/)
{
throw NotImplemented();
}
bool Ingestor::Contains(DocId id) const
{
bool isFound;
m_documentMap->Find(id, isFound);
return isFound;
}
DocumentHandle Ingestor::GetHandle(DocId id) const
{
bool isFound;
auto handle = m_documentMap->Find(id, isFound);
if (!isFound)
{
RecoverableError error("Ingestor::GetHandle(): DocId not found.");
throw error;
}
return handle;
}
size_t Ingestor::GetDocumentCount() const
{
return m_documentCount;
// return m_documentMap->size();
}
size_t Ingestor::GetUsedCapacityInBytes() const
{
throw NotImplemented();
}
size_t Ingestor::GetTotalSouceBytesIngested() const
{
return m_totalSourceByteSize;
}
size_t Ingestor::GetPostingCount() const
{
return m_histogram.GetPostingCount();
}
void Ingestor::Shutdown()
{
m_tokenManager->Shutdown();
}
void Ingestor::OpenGroup(GroupId /*groupId*/)
{
throw NotImplemented();
}
void Ingestor::CloseGroup()
{
throw NotImplemented();
}
void Ingestor::ExpireGroup(GroupId /*groupId*/)
{
throw NotImplemented();
}
}
| 6,003 |
357 | <filename>PsychSourceGL/Source/Common/PsychHID/PsychHIDGetCollections.c
/*
PsychToolbox3/Source/Common/PsychHID/PsychHIDGetCollections.c
PROJECTS: PsychHID
PLATFORMS: OSX
AUTHORS:
<EMAIL> awi
<EMAIL> mk
HISTORY:
5/11/03 awi Created.
TO DO:
*/
#include "PsychHID.h"
#if PSYCH_SYSTEM == PSYCH_OSX
// ---------------------------------
// convert an element type to a mask
HIDElementTypeMask HIDConvertElementTypeToMask (const long type)
{
HIDElementTypeMask result = kHIDElementTypeAll;
switch (type)
{
case kIOHIDElementTypeInput_Misc:
case kIOHIDElementTypeInput_Button:
case kIOHIDElementTypeInput_Axis:
case kIOHIDElementTypeInput_ScanCodes:
result = kHIDElementTypeInput;
break;
case kIOHIDElementTypeOutput:
result = kHIDElementTypeOutput;
break;
case kIOHIDElementTypeFeature:
result = kHIDElementTypeFeature;
break;
case kIOHIDElementTypeCollection:
result = kHIDElementTypeCollection;
break;
default:
result = kHIDElementTypeAll;
break;
}
return result;
}
static char useString[]= "collections=PsychHID('Collections', deviceNumber)";
static char synopsisString[] =
"Return a flat list of all collections on the specified USB HID device.\n"
"A collection is a grouping of elements.\n"
"Collections are hierarchical. A collection can contain other collections. "
"Use the \"memberCollectionIndices\" field of the returned structures, "
"which indexes the member collections, to expose the hierarchy.";
static char seeAlsoString[] = "";
PsychError PSYCHHIDGetCollections(void)
{
pRecDevice specDevice=NULL;
UInt32 numDeviceElements;
const char *elementFieldNames[]={"typeMaskName", "name", "deviceIndex", "collectionIndex", "typeValue", "typeName", "usagePageValue", "usageValue", "usageName", "memberCollectionIndices", "memberElementIndices"};
int i, numElementStructElements, numElementStructFieldNames=11, elementIndex, deviceIndex;
PsychGenericScriptType *elementStruct, *memberCollectionIndicesMat, *memberIOElementIndicesMat;
pRecElement currentElement, lastElement = NULL;
char elementTypeName[PSYCH_HID_MAX_DEVICE_ELEMENT_TYPE_NAME_LENGTH];
char usageName[PSYCH_HID_MAX_DEVICE_ELEMENT_USAGE_NAME_LENGTH];
char *typeMaskName;
HIDElementTypeMask typeMask;
char tmpName[1024];
pRecElement *memberCollectionRecords, *memberIOElementRecords;
double *memberCollectionIndices, *memberIOElementIndices;
int numSubCollections, numSubIOElements;
PsychPushHelp(useString, synopsisString, seeAlsoString);
if(PsychIsGiveHelp()){PsychGiveHelp();return(PsychError_none);};
PsychErrorExit(PsychCapNumOutputArgs(1));
PsychErrorExit(PsychCapNumInputArgs(1));
PsychCopyInIntegerArg(1, TRUE, &deviceIndex);
PsychHIDVerifyInit();
specDevice= PsychHIDGetDeviceRecordPtrFromIndex(deviceIndex);
PsychHIDVerifyOpenDeviceInterfaceFromDeviceIndex(deviceIndex);
numDeviceElements= HIDCountDeviceElements(specDevice, kHIDElementTypeCollection);
numElementStructElements = (int)numDeviceElements;
PsychAllocOutStructArray(1, FALSE, numElementStructElements, numElementStructFieldNames, elementFieldNames, &elementStruct);
elementIndex=0;
for(currentElement=HIDGetFirstDeviceElement(specDevice,kHIDElementTypeCollection);
(currentElement != NULL) && (currentElement != lastElement);
currentElement=HIDGetNextDeviceElement(currentElement, kHIDElementTypeCollection)) {
lastElement = currentElement;
// Needs HIDUtilities V2.0, available since OSX 10.5:
IOHIDElementType type = IOHIDElementGetType(currentElement);
typeMask = HIDConvertElementTypeToMask(type);
tmpName[0] = 0;
CFStringRef cfString = IOHIDElementGetName(currentElement);
if (cfString) {
CFStringGetCString(cfString, tmpName, sizeof(tmpName), kCFStringEncodingASCII);
CFRelease(cfString);
}
PsychSetStructArrayStringElement("name", elementIndex, tmpName, elementStruct);
PsychSetStructArrayDoubleElement("typeValue", elementIndex, (double) type, elementStruct);
HIDGetTypeName(type, elementTypeName);
PsychSetStructArrayDoubleElement("usagePageValue", elementIndex, (double) IOHIDElementGetUsagePage(currentElement), elementStruct);
PsychSetStructArrayDoubleElement("usageValue", elementIndex, (double) IOHIDElementGetUsage(currentElement), elementStruct);
HIDGetUsageName(IOHIDElementGetUsagePage(currentElement), IOHIDElementGetUsage(currentElement), usageName);
PsychSetStructArrayStringElement("usageName", elementIndex, usageName, elementStruct);
PsychHIDGetTypeMaskStringFromTypeMask(typeMask, &typeMaskName);
PsychSetStructArrayStringElement("typeMaskName", elementIndex, typeMaskName, elementStruct);
PsychSetStructArrayDoubleElement("deviceIndex", elementIndex, (double)deviceIndex, elementStruct);
PsychSetStructArrayDoubleElement("collectionIndex", elementIndex, (double)elementIndex+1, elementStruct);
PsychSetStructArrayStringElement("typeName", elementIndex, elementTypeName, elementStruct);
//find and return the indices of this collection's member collections and indices
numSubCollections=PsychHIDCountCollectionElements(currentElement, kHIDElementTypeCollection);
numSubIOElements=PsychHIDCountCollectionElements(currentElement, kHIDElementTypeIO);
memberCollectionRecords=(pRecElement*)PsychMallocTemp(sizeof(pRecElement) * numSubCollections);
memberIOElementRecords=(pRecElement*)PsychMallocTemp(sizeof(pRecElement) * numSubIOElements);
PsychHIDFindCollectionElements(currentElement, kHIDElementTypeCollection, memberCollectionRecords, numSubCollections);
PsychHIDFindCollectionElements(currentElement, kHIDElementTypeIO, memberIOElementRecords, numSubIOElements);
memberCollectionIndices=NULL;
PsychAllocateNativeDoubleMat(1, numSubCollections, 1, &memberCollectionIndices, &memberCollectionIndicesMat);
memberIOElementIndices=NULL;
PsychAllocateNativeDoubleMat(1, numSubIOElements, 1, &memberIOElementIndices, &memberIOElementIndicesMat);
for(i=0;i<numSubCollections;i++)
memberCollectionIndices[i]=PsychHIDGetIndexFromRecord(specDevice, memberCollectionRecords[i], kHIDElementTypeCollection);
for(i=0;i<numSubIOElements;i++)
memberIOElementIndices[i]=PsychHIDGetIndexFromRecord(specDevice, memberIOElementRecords[i], kHIDElementTypeIO);
PsychFreeTemp(memberCollectionRecords);
PsychFreeTemp(memberIOElementRecords);
PsychSetStructArrayNativeElement("memberCollectionIndices", elementIndex, memberCollectionIndicesMat, elementStruct);
PsychSetStructArrayNativeElement("memberElementIndices", elementIndex, memberIOElementIndicesMat, elementStruct);
++elementIndex;
}
return(PsychError_none);
}
#endif
| 2,798 |
399 | <reponame>KirmesBude/REGoth-bs<gh_stars>100-1000
#pragma once
#include "RTTIUtil.hpp"
#include <components/UIFocusText.hpp>
namespace REGoth
{
class RTTI_UIFocusText : public bs::RTTIType<UIFocusText, UIElement, RTTI_UIFocusText>
{
BS_BEGIN_RTTI_MEMBERS
// This class should not be serialized
BS_END_RTTI_MEMBERS
public:
RTTI_UIFocusText()
{
}
REGOTH_IMPLEMENT_RTTI_CLASS_FOR_COMPONENT(UIFocusText)
};
} // namespace REGoth
| 211 |
504 | package org.dayatang.cache.redis;
import redis.clients.jedis.Jedis;
/**
* Created by yyang on 15/9/8.
*/
public interface JedisAction {
void doInRedis(Jedis jedis);
}
| 74 |
399 | <filename>library/java/io/envoyproxy/envoymobile/engine/types/EnvoyStringAccessor.java
package io.envoyproxy.envoymobile.engine.types;
public interface EnvoyStringAccessor {
/**
* Called to retrieve a string from the Application
*/
String getEnvoyString();
}
| 84 |
348 | {"nom":"Longuyon","circ":"3ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":4027,"abs":2669,"votants":1358,"blancs":19,"nuls":10,"exp":1329,"res":[{"nuance":"REM","nom":"<NAME>","voix":399},{"nuance":"FN","nom":"Mme <NAME>","voix":272},{"nuance":"LR","nom":"<NAME>","voix":179},{"nuance":"FI","nom":"M. <NAME>","voix":151},{"nuance":"SOC","nom":"<NAME>","voix":137},{"nuance":"COM","nom":"<NAME>","voix":47},{"nuance":"ECO","nom":"M. <NAME>","voix":40},{"nuance":"DVD","nom":"M. <NAME>","voix":28},{"nuance":"DLF","nom":"Mme <NAME>","voix":22},{"nuance":"DVD","nom":"Mme <NAME>","voix":21},{"nuance":"DIV","nom":"Mme <NAME>","voix":9},{"nuance":"EXG","nom":"M. <NAME>","voix":9},{"nuance":"DIV","nom":"M. <NAME>","voix":8},{"nuance":"EXG","nom":"Mme <NAME>","voix":7},{"nuance":"DVD","nom":"M. <NAME>","voix":0}]} | 335 |
931 | <filename>bindgen/thread_db_wrapper.h
#define _GNU_SOURCE
#include <thread_db.h>
#include <proc_service.h>
| 43 |
1,433 | //******************************************************************
//
// Copyright 2015 Samsung Electronics All Rights Reserved.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#include "TestBundleActivator.h"
TestBundleActivator *bundle;
TestBundleActivator::TestBundleActivator()
{
m_pResourceContainer = nullptr;
m_pTestResource = nullptr;
}
TestBundleActivator::~TestBundleActivator()
{
m_pResourceContainer = nullptr;
m_pTestResource = nullptr;
}
void TestBundleActivator::activateBundle(ResourceContainerBundleAPI *resourceContainer,
std::string bundleId)
{
std::cout << "TestBundleActivator::activateBundle .. " << std::endl;
m_pResourceContainer = resourceContainer;
m_bundleId = bundleId;
}
void TestBundleActivator::deactivateBundle()
{
std::cout << "TestBundleActivator::deactivateBundle .. " << std::endl;
m_pResourceContainer = nullptr;
}
void TestBundleActivator::createResource(resourceInfo resourceInfo)
{
std::cout << "TestBundleActivator::createResource .. " << std::endl;
m_pTestResource = std::make_shared< TestBundleResource >();
m_pTestResource->m_bundleId = m_bundleId;
m_pTestResource->m_uri = resourceInfo.uri;
m_pTestResource->m_resourceType = resourceInfo.resourceType;
m_pResourceContainer->registerResource(m_pTestResource);
}
void TestBundleActivator::destroyResource(BundleResource::Ptr pBundleResource)
{
std::cout << "TestBundleActivator::destroyResource .. " << std::endl;
m_pResourceContainer->unregisterResource(pBundleResource);
}
extern "C" void test_externalActivateBundle(ResourceContainerBundleAPI *resourceContainer,
std::string bundleId)
{
bundle = new TestBundleActivator();
bundle->activateBundle(resourceContainer, bundleId);
}
extern "C" void test_externalDeactivateBundle()
{
bundle->deactivateBundle();
delete bundle;
}
extern "C" void test_externalCreateResource(resourceInfo resourceInfo)
{
bundle->createResource(resourceInfo);
}
extern "C" void test_externalDestroyResource(BundleResource::Ptr pBundleResource)
{
bundle->destroyResource(pBundleResource);
}
| 857 |
542 | <filename>flink-redis/src/test/java/com/intsmaze/test/ShardedJedisPoolTemplate.java
package com.intsmaze.test;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import redis.clients.jedis.*;
public class ShardedJedisPoolTemplate {
public static void main(String[] args) {
ApplicationContext ct = new ClassPathXmlApplicationContext("spring-redis.xml");
ShardedJedisPool shardedJedisPool = (ShardedJedisPool) ct.getBean("shardedJedisPool");
for (int i = 0; i < 10; i++) {
ShardedJedis shardedJedis = shardedJedisPool.getResource();//发现Sharded的构造方法其实是在我们 jedisPool.getResource() 时就完成的,每次jedisPool.getResource() 都会初始化一次,所以通过这个功能完成了动态上下节点功能啪啪啪·
String key = "shard" + i;
shardedJedis.set(key, "v-" + i);
System.out.println(shardedJedis.get(key));
JedisShardInfo shardInfo = shardedJedis.getShardInfo(key);
System.out.println("getHost:" + shardInfo.getHost());
shardedJedis.close();
}
shardedJedisPool.close();
shardedJedisPool.destroy();
}
}
| 578 |
13,648 | <filename>tests/micropython/kbd_intr.py
# test the micropython.kbd_intr() function
import micropython
try:
micropython.kbd_intr
except AttributeError:
print("SKIP")
raise SystemExit
# just check we can actually call it
micropython.kbd_intr(3)
| 100 |
16,989 | // Copyright 2015 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.shell;
import static com.google.common.truth.Truth.assertThat;
import static com.google.devtools.build.lib.shell.ShellUtils.prettyPrintArgv;
import static com.google.devtools.build.lib.shell.ShellUtils.tokenize;
import static org.junit.Assert.fail;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for ShellUtils that call out to Bash. */
@RunWith(JUnit4.class)
public class ShellUtilsWithBashTest {
private void assertTokenizeIsDualToPrettyPrint(String... args) throws Exception {
List<String> in = Arrays.asList(args);
String shellCommand = prettyPrintArgv(in);
// Assert that pretty-print is correct, i.e. dual to the actual /bin/sh
// tokenization. This test assumes no newlines in the input:
String[] execArgs = {
"/bin/sh",
"-c",
"for i in " + shellCommand + "; do echo \"$i\"; done" // tokenize, one word per line
};
String stdout = null;
try {
stdout = new String(new Command(execArgs).execute().getStdout());
} catch (Exception e) {
fail("/bin/sh failed:\n" + in + "\n" + shellCommand + "\n" + e.getMessage());
}
// We can't use stdout.split("\n") here,
// because String.split() ignores trailing empty strings.
ArrayList<String> words = Lists.newArrayList();
int index;
while ((index = stdout.indexOf('\n')) >= 0) {
words.add(stdout.substring(0, index));
stdout = stdout.substring(index + 1);
}
assertThat(words).isEqualTo(in);
// Assert that tokenize is dual to pretty-print:
List<String> out = new ArrayList<>();
try {
tokenize(out, shellCommand);
} finally {
if (out.isEmpty()) { // i.e. an exception
System.err.println(in);
}
}
assertThat(out).isEqualTo(in);
}
@Test
public void testTokenizeIsDualToPrettyPrint() throws Exception {
// tokenize() is the inverse of prettyPrintArgv(). (However, the reverse
// is not true, since there are many ways to escape the same string,
// e.g. "foo" and 'foo'.)
assertTokenizeIsDualToPrettyPrint("foo");
assertTokenizeIsDualToPrettyPrint("foo bar");
assertTokenizeIsDualToPrettyPrint("foo bar", "wiz");
assertTokenizeIsDualToPrettyPrint("'foo'");
assertTokenizeIsDualToPrettyPrint("\\'foo\\'");
assertTokenizeIsDualToPrettyPrint("${filename%.c}.o");
assertTokenizeIsDualToPrettyPrint("<html!>");
assertTokenizeIsDualToPrettyPrint("");
assertTokenizeIsDualToPrettyPrint("!@#$%^&*()");
assertTokenizeIsDualToPrettyPrint("x'y\" z");
}
}
| 1,150 |
348 | {"nom":"<NAME>","circ":"3ème circonscription","dpt":"Morbihan","inscrits":1707,"abs":846,"votants":861,"blancs":37,"nuls":35,"exp":789,"res":[{"nuance":"REM","nom":"<NAME>","voix":504},{"nuance":"FI","nom":"Mme <NAME>","voix":285}]} | 95 |
3,096 | /*
* Copyright 1999-2012 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.cobar.manager.util;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import com.alibaba.cobar.manager.dataobject.cobarnode.ConnectionStatus;
import com.alibaba.cobar.manager.dataobject.cobarnode.DataNodesStatus;
import com.alibaba.cobar.manager.dataobject.xml.ClusterDO;
import com.alibaba.cobar.manager.dataobject.xml.CobarDO;
public class ListSortUtil {
public static void sortClusterByName(List<ClusterDO> list) {
Collections.sort(list, new Comparator<ClusterDO>() {
@Override
public int compare(ClusterDO c1, ClusterDO c2) {
return (c1).getName().compareTo((c2).getName());
}
});
}
public static void sortClusterBySortId(List<ClusterDO> list) {
Collections.sort(list, new Comparator<ClusterDO>() {
@Override
public int compare(ClusterDO c1, ClusterDO c2) {
if (c1.getSortId() == c2.getSortId()) {
return (c1).getName().compareTo((c2).getName());
}
return c1.getSortId() - c2.getSortId();
}
});
}
public static void sortCobarByName(List<CobarDO> list) {
Collections.sort(list, new Comparator<CobarDO>() {
@Override
public int compare(CobarDO o1, CobarDO o2) {
return o1.getName().compareTo(o2.getName());
}
});
}
public static void sortDataNodesByPoolName(List<DataNodesStatus> list) {
Collections.sort(list, new Comparator<DataNodesStatus>() {
@Override
public int compare(DataNodesStatus o1, DataNodesStatus o2) {
return comparePoolName(o1.getPoolName(), o2.getPoolName());
}
});
}
public static void sortDataNodesMapByPoolName(List<Map<String, Object>> list) {
Collections.sort(list, new Comparator<Map<String, Object>>() {
@Override
public int compare(Map<String, Object> o1, Map<String, Object> o2) {
return comparePoolName((String) o1.get("poolName"), (String) o2.get("poolName"));
}
});
}
public static int comparePoolName(String s1, String s2) {
Pair<String, Integer> p1 = CobarStringUtil.splitIndex(s1, '[', ']');
Pair<String, Integer> p2 = CobarStringUtil.splitIndex(s2, '[', ']');
if (p1.getFirst().compareTo(p2.getFirst()) == 0) {
return p1.getSecond() - p2.getSecond();
} else {
return p1.getFirst().compareTo(p2.getFirst());
}
}
public static void sortConnections(List<ConnectionStatus> list) {
Collections.sort(list, new Comparator<ConnectionStatus>() {
@Override
public int compare(ConnectionStatus o1, ConnectionStatus o2) {
if (o1.getHost().equals(o2.getHost())) {
return (int) (o2.getNetOut() - o1.getNetOut());
}
return o1.getHost().compareTo(o2.getHost());
}
});
}
}
| 1,611 |
416 | import org.simpleflatmapper.converter.ContextualConverterFactoryProducer;
module org.simpleflatmapper.converter.protobuf {
requires org.simpleflatmapper.util;
requires org.simpleflatmapper.converter;
requires protobuf.java;
exports org.simpleflatmapper.converter.protobuf;
provides ContextualConverterFactoryProducer
with org.simpleflatmapper.converter.protobuf.ProtobufConverterFactoryProducer;
} | 173 |
368 | <filename>src/com/oltpbenchmark/benchmarks/auctionmark/procedures/NewItem.java<gh_stars>100-1000
/******************************************************************************
* Copyright 2015 by OLTPBenchmark Project *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
******************************************************************************/
package com.oltpbenchmark.benchmarks.auctionmark.procedures;
import java.sql.Connection;
import java.sql.Timestamp;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import com.oltpbenchmark.api.Procedure;
import com.oltpbenchmark.api.SQLStmt;
import com.oltpbenchmark.benchmarks.auctionmark.AuctionMarkConstants;
import com.oltpbenchmark.benchmarks.auctionmark.exceptions.DuplicateItemIdException;
import com.oltpbenchmark.benchmarks.auctionmark.util.AuctionMarkUtil;
import com.oltpbenchmark.benchmarks.auctionmark.util.ItemStatus;
import com.oltpbenchmark.util.SQLUtil;
/**
* NewItem
* @author pavlo
* @author visawee
*/
public class NewItem extends Procedure {
private static final Logger LOG = Logger.getLogger(NewItem.class);
// -----------------------------------------------------------------
// STATEMENTS
// -----------------------------------------------------------------
public final SQLStmt insertItem = new SQLStmt(
"INSERT INTO " + AuctionMarkConstants.TABLENAME_ITEM + "(" +
"i_id," +
"i_u_id," +
"i_c_id," +
"i_name," +
"i_description," +
"i_user_attributes," +
"i_initial_price," +
"i_current_price," +
"i_num_bids," +
"i_num_images," +
"i_num_global_attrs," +
"i_start_date," +
"i_end_date," +
"i_status, " +
"i_created," +
"i_updated," +
"i_iattr0" +
") VALUES (" +
"?," + // i_id
"?," + // i_u_id
"?," + // i_c_id
"?," + // i_name
"?," + // i_description
"?," + // i_user_attributes
"?," + // i_initial_price
"?," + // i_current_price
"?," + // i_num_bids
"?," + // i_num_images
"?," + // i_num_global_attrs
"?," + // i_start_date
"?," + // i_end_date
"?," + // i_status
"?," + // i_created
"?," + // i_updated
"1" + // i_attr0
")"
);
public final SQLStmt getSellerItemCount = new SQLStmt(
"SELECT COUNT(*) FROM " + AuctionMarkConstants.TABLENAME_ITEM +
" WHERE i_u_id = ?"
);
public final SQLStmt getCategory = new SQLStmt(
"SELECT * FROM " + AuctionMarkConstants.TABLENAME_CATEGORY + " WHERE c_id = ? "
);
public final SQLStmt getCategoryParent = new SQLStmt(
"SELECT * FROM " + AuctionMarkConstants.TABLENAME_CATEGORY + " WHERE c_parent_id = ? "
);
public final SQLStmt getGlobalAttribute = new SQLStmt(
"SELECT gag_name, gav_name, gag_c_id " +
"FROM " + AuctionMarkConstants.TABLENAME_GLOBAL_ATTRIBUTE_GROUP + ", " +
AuctionMarkConstants.TABLENAME_GLOBAL_ATTRIBUTE_VALUE +
" WHERE gav_id = ? AND gav_gag_id = ? " +
"AND gav_gag_id = gag_id"
);
public final SQLStmt insertItemAttribute = new SQLStmt(
"INSERT INTO " + AuctionMarkConstants.TABLENAME_ITEM_ATTRIBUTE + "(" +
"ia_id," +
"ia_i_id," +
"ia_u_id," +
"ia_gav_id," +
"ia_gag_id" +
") VALUES(?, ?, ?, ?, ?)"
);
public final SQLStmt insertImage = new SQLStmt(
"INSERT INTO " + AuctionMarkConstants.TABLENAME_ITEM_IMAGE + "(" +
"ii_id," +
"ii_i_id," +
"ii_u_id," +
"ii_sattr0" +
") VALUES(?, ?, ?, ?)"
);
public final SQLStmt updateUserBalance = new SQLStmt(
"UPDATE " + AuctionMarkConstants.TABLENAME_USERACCT + " " +
"SET u_balance = u_balance - 1, " +
" u_updated = ? " +
" WHERE u_id = ?"
);
// -----------------------------------------------------------------
// RUN METHOD
// -----------------------------------------------------------------
/**
* Insert a new ITEM record for a user.
* The benchmark client provides all of the preliminary information
* required for the new item, as well as optional information to create
* derivative image and attribute records. After inserting the new ITEM
* record, the transaction then inserts any GLOBAL ATTRIBUTE VALUE and
* ITEM IMAGE. The unique identifer for each of these records is a
* composite 64-bit key where the lower 60-bits are the i id parameter and the
* upper 4-bits are used to represent the index of the image/attribute.
* For example, if the i id is 100 and there are four items, then the
* composite key will be 0 100 for the first image, 1 100 for the second,
* and so on. After these records are inserted, the transaction then updates
* the USER record to add the listing fee to the seller's balance.
*/
public Object[] run(Connection conn, Timestamp benchmarkTimes[],
long item_id, long seller_id, long category_id,
String name, String description, long duration, double initial_price, String attributes,
long gag_ids[], long gav_ids[], String images[]) throws SQLException {
final Timestamp currentTime = AuctionMarkUtil.getProcTimestamp(benchmarkTimes);
final boolean debug = LOG.isDebugEnabled();
// Calculate endDate
Timestamp end_date = new Timestamp(currentTime.getTime() + (duration * AuctionMarkConstants.MILLISECONDS_IN_A_DAY));
if (debug) {
LOG.debug("NewItem :: run ");
LOG.debug(">> item_id = " + item_id + " , seller_id = " + seller_id + ", category_id = " + category_id);
LOG.debug(">> name = " + name + " , description length = " + description.length());
LOG.debug(">> initial_price = " + initial_price + " , attributes length = " + attributes.length());
LOG.debug(">> gag_ids[].length = " + gag_ids.length + " , gav_ids[] length = " + gav_ids.length);
LOG.debug(">> image length = " + images.length + " ");
LOG.debug(">> start = " + currentTime + ", end = " + end_date);
}
// Get attribute names and category path and append
// them to the item description
PreparedStatement stmt = null;
ResultSet results = null;
int updated = -1;
// ATTRIBUTES
description += "\nATTRIBUTES: ";
stmt = this.getPreparedStatement(conn, getGlobalAttribute);
for (int i = 0; i < gag_ids.length; i++) {
int col = 1;
stmt.setLong(col++, gav_ids[i]);
stmt.setLong(col++, gag_ids[i]);
results = stmt.executeQuery();
if (results.next()) {
col = 1;
description += String.format(" * %s -> %s\n", results.getString(col++), results.getString(col++));
}
results.close();
} // FOR
// CATEGORY
stmt = this.getPreparedStatement(conn, getCategory, category_id);
results = stmt.executeQuery();
boolean adv = results.next();
assert(adv);
String category_name = String.format("%s[%d]", results.getString(2), results.getInt(1));
results.close();
// CATEGORY PARENT
stmt = this.getPreparedStatement(conn, getCategoryParent, category_id);
results = stmt.executeQuery();
String category_parent = null;
if (results.next()) {
category_parent = String.format("%s[%d]", results.getString(2), results.getInt(1));
} else {
category_parent = "<ROOT>";
}
description += String.format("\nCATEGORY: %s >> %s", category_parent, category_name);
results.close();
// Insert new ITEM tuple
stmt = this.getPreparedStatement(conn, insertItem,
item_id, // i_id
seller_id, // i_u_id
category_id, // i_c_id
name, // i_name
description, // i_description
attributes, // i_user_attributes
initial_price, // i_initial_proce
initial_price, // i_current_price
0, // i_num_bids
images.length, // i_num_images
gav_ids.length, // i_num_global_attrs
currentTime, // i_start_date
end_date, // i_end_date
ItemStatus.OPEN.ordinal(), // i_status
currentTime, // i_created
currentTime // i_updated
);
// NOTE: This may fail with a duplicate entry exception because
// the client's internal count of the number of items that this seller
// already has is wrong. That's ok. We'll just abort and ignore the problem
// Eventually the client's internal cache will catch up with what's in the database
try {
updated = stmt.executeUpdate();
} catch (SQLException ex) {
if (SQLUtil.isDuplicateKeyException(ex)) {
conn.rollback();
results = this.getPreparedStatement(conn, getSellerItemCount, seller_id).executeQuery();
adv = results.next();
assert(adv);
int item_count = results.getInt(1);
results.close();
throw new DuplicateItemIdException(item_id, seller_id, item_count, ex);
} else throw ex;
}
assert(updated == 1);
// Insert ITEM_ATTRIBUTE tuples
stmt = this.getPreparedStatement(conn, insertItemAttribute);
for (int i = 0; i < gav_ids.length; i++) {
int param = 1;
stmt.setLong(param++, AuctionMarkUtil.getUniqueElementId(item_id, i));
stmt.setLong(param++, item_id);
stmt.setLong(param++, seller_id);
stmt.setLong(param++, gag_ids[i]);
stmt.setLong(param++, gag_ids[i]);
updated = stmt.executeUpdate();
assert(updated == 1);
} // FOR
// Insert ITEM_IMAGE tuples
stmt = this.getPreparedStatement(conn, insertImage);
for (int i = 0; i < images.length; i++) {
int param = 1;
stmt.setLong(param++, AuctionMarkUtil.getUniqueElementId(item_id, i));
stmt.setLong(param++, item_id);
stmt.setLong(param++, seller_id);
stmt.setString(param++, images[i]);
updated = stmt.executeUpdate();
assert(updated == 1);
} // FOR
// Update listing fee
updated = this.getPreparedStatement(conn, updateUserBalance, currentTime, seller_id).executeUpdate();
assert(updated == 1);
// Return new item_id and user_id
return new Object[] {
// ITEM ID
item_id,
// SELLER ID
seller_id,
// ITEM_NAME
name,
// CURRENT PRICE
initial_price,
// NUM BIDS
0l,
// END DATE
end_date,
// STATUS
ItemStatus.OPEN.ordinal()
};
}
} | 6,292 |
3,269 | # Time: O(m + n)
# Space: O(m + n)
class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
diff = (sum(A)-sum(B))//2
setA = set(A)
for b in set(B):
if diff+b in setA:
return [diff+b, b]
return []
| 216 |
852 | import FWCore.ParameterSet.Config as cms
process = cms.Process("makeSD")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
annotation = cms.untracked.string('Onia central skim'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Skimming/test/CSmaker_Onia_PDMu_1e29_reprocess370patch2_cfg.py,v $')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10000)
)
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.GeometryExtended_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.EventContent.EventContent_cff')
process.GlobalTag.globaltag = "GR_R_36X_V12A::All"
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning10/MinimumBias/RAW-RECO/v8/000/132/601/F85204EE-EB40-DF11-8F71-001A64789D1C.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Commissioning09/Cosmics/RAW/v3/000/105/755/F6887FD0-9371-DE11-B69E-00304879FBB2.root'
)
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
import HLTrigger.HLTfilters.hltHighLevelDev_cfi
### Onia skim CS
process.goodMuons = cms.EDFilter("MuonRefSelector",
src = cms.InputTag("muons"),
cut = cms.string("isGlobalMuon || (isTrackerMuon && numberOfMatches('SegmentAndTrackArbitration')>0)"),
)
process.diMuons = cms.EDProducer("CandViewShallowCloneCombiner",
decay = cms.string("goodMuons goodMuons"),
checkCharge = cms.bool(False),
cut = cms.string("mass > 2"),
)
process.diMuonFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("diMuons"),
minNumber = cms.uint32(1),
)
process.Skim_diMuons = cms.Path(
process.goodMuons *
process.diMuons *
process.diMuonFilter
)
process.outputCsOnia = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('CS_Onia')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('Skim_diMuons')),
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('CS_Onia_1e29.root')
)
process.this_is_the_end = cms.EndPath(process.outputCsOnia)
| 1,269 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.openfile;
import java.awt.Component;
import java.awt.Frame;
import java.awt.Image;
import java.beans.PropertyChangeListener;
import java.io.File;
import java.util.Set;
import javax.swing.Action;
import org.netbeans.api.sendopts.CommandLine;
import org.netbeans.junit.MockServices;
import org.netbeans.junit.NbTestCase;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.nodes.Node;
import org.openide.nodes.NodeAcceptor;
import org.openide.nodes.NodeOperation;
import org.openide.util.UserCancelException;
import org.openide.windows.*;
/**
*
* @author <NAME>
*/
public class OpenCLITest extends NbTestCase {
File dir;
public OpenCLITest(String testName) {
super(testName);
}
@Override
protected boolean runInEQ() {
return true;
}
@Override
protected void setUp() throws Exception {
dir = new File(getWorkDir(), "tstdir");
dir.mkdirs();
MockServices.setServices(MockWindowManager.class,
MockNodeOperation.class);
MockNodeOperation.explored = null;
}
@Override
protected void tearDown() throws Exception {
}
public void testOpenFolder() throws Exception {
CommandLine.create(Handler.class).process(
new String[] { "--open", dir.getPath()});
assertNotNull("A node has been explored", MockNodeOperation.explored);
FileObject root = MockNodeOperation.explored.getLookup().lookup(FileObject.class);
assertNotNull("There is a file object in lookup", root);
assertEquals("It is our dir", dir, FileUtil.toFile(root));
}
public static final class MockNodeOperation extends NodeOperation {
public static Node explored;
@Override
public boolean customize(Node n) {
fail("No customize");
return false;
}
@Override
public void explore(Node n) {
assertNull("No explore before", explored);
explored = n;
}
@Override
public void showProperties(Node n) {
fail("no props");
}
@Override
public void showProperties(Node[] n) {
fail("no props");
}
@Override
public Node[] select(String title, String rootTitle, Node root, NodeAcceptor acceptor, Component top) throws UserCancelException {
fail("no select");
return null;
}
}
public static final class MockWindowManager extends WindowManager {
@Override
public void invokeWhenUIReady(Runnable runnable) {
// run immediatelly
runnable.run();
}
@Override
public Mode findMode(String name) {
throw unsupp();
}
@Override
public Mode findMode(TopComponent tc) {
throw unsupp();
}
@Override
public Set<? extends Mode> getModes() {
throw unsupp();
}
@Override
public Frame getMainWindow() {
throw unsupp();
}
@Override
public void updateUI() {
throw unsupp();
}
@Override
protected Component createTopComponentManager(TopComponent c) {
throw unsupp();
}
@Override
public Workspace createWorkspace(String name, String displayName) {
throw unsupp();
}
@Override
public Workspace findWorkspace(String name) {
throw unsupp();
}
@Override
public Workspace[] getWorkspaces() {
throw unsupp();
}
@Override
public void setWorkspaces(Workspace[] workspaces) {
throw unsupp();
}
@Override
public Workspace getCurrentWorkspace() {
throw unsupp();
}
@Override
public TopComponentGroup findTopComponentGroup(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void addPropertyChangeListener(PropertyChangeListener l) {
throw unsupp();
}
@Override
public void removePropertyChangeListener(PropertyChangeListener l) {
throw unsupp();
}
@Override
protected void topComponentOpen(TopComponent tc) {
throw unsupp();
}
@Override
protected void topComponentClose(TopComponent tc) {
throw unsupp();
}
@Override
protected void topComponentRequestActive(TopComponent tc) {
throw unsupp();
}
@Override
protected void topComponentRequestVisible(TopComponent tc) {
throw unsupp();
}
@Override
protected void topComponentDisplayNameChanged(TopComponent tc, String displayName) {
throw unsupp();
}
@Override
protected void topComponentHtmlDisplayNameChanged(TopComponent tc, String htmlDisplayName) {
throw unsupp();
}
@Override
protected void topComponentToolTipChanged(TopComponent tc, String toolTip) {
throw unsupp();
}
@Override
protected void topComponentIconChanged(TopComponent tc, Image icon) {
throw unsupp();
}
@Override
protected void topComponentActivatedNodesChanged(TopComponent tc,
Node[] activatedNodes) {
throw unsupp();
}
@Override
protected boolean topComponentIsOpened(TopComponent tc) {
throw unsupp();
}
@Override
protected Action[] topComponentDefaultActions(TopComponent tc) {
throw unsupp();
}
@Override
protected String topComponentID(TopComponent tc, String preferredID) {
throw unsupp();
}
@Override
public TopComponent findTopComponent(String tcID) {
throw unsupp();
}
private UnsupportedOperationException unsupp() {
return new UnsupportedOperationException("Not supported yet.");
}
}
} | 2,943 |
1,382 | <filename>examples/firpfb_rrrf_example.c
// firpf_rrrf_example.c - demonstrate poly-phase filter-bank as interpolator
#include <stdlib.h>
#include <stdio.h>
#include "liquid.h"
#define OUTPUT_FILENAME "firpfb_rrrf_example.m"
int main(int argc, char*argv[]) {
// options
unsigned int M = 16; // interpolation factor
unsigned int m = 4; // filter delay (input samples)
unsigned int num_samples = 40; // number of input samples to generate
// create object
firpfb_rrrf pfb = firpfb_rrrf_create_default(M, m);
// generate and interpolate signal (windowed sinc pulse)
float buf_0[ num_samples];
float buf_1[M*num_samples];
unsigned int i, j;
for (i=0; i<num_samples; i++) {
// generate input random +1/-1 sequence
buf_0[i] = rand() & 1 ? 1. : -1.;
// push sample into filter bank
firpfb_rrrf_push(pfb, buf_0[i]);
// interpolate result (one output per branch)
for (j=0; j<M; j++)
firpfb_rrrf_execute(pfb, j, buf_1 + i*M + j);
}
// clean up objects
firpfb_rrrf_destroy(pfb);
// output to file
FILE*fid = fopen(OUTPUT_FILENAME,"w");
fprintf(fid,"%% %s: auto-generated file\n\n", OUTPUT_FILENAME);
fprintf(fid,"clear all; close all;\n\n");
fprintf(fid,"M = %u; m = %u; num_samples = %u\n", M, m, num_samples);
fprintf(fid,"x = zeros(1, num_samples);\n");
fprintf(fid,"y = zeros(1,M*num_samples);\n");
for (i=0; i< num_samples; i++) { fprintf(fid,"x(%3u) = %12.4e;\n", i+1, buf_0[i]); }
for (i=0; i<M*num_samples; i++) { fprintf(fid,"y(%3u) = %12.4e;\n", i+1, buf_1[i]); }
fprintf(fid,"tx = [0:( num_samples-1)];\n");
fprintf(fid,"ty = [0:(M*num_samples-1)]/M - m;\n");
fprintf(fid,"figure;\n");
fprintf(fid,"plot(ty,y,'-k',tx,x,'ob','MarkerSize',2);\n");
fprintf(fid,"grid on;\n");
fprintf(fid,"xlabel('Input Sample Index');\n");
fprintf(fid,"ylabel('Signal');\n");
fprintf(fid,"legend('Output','Input');\n");
fclose(fid);
printf("results written to %s\n", OUTPUT_FILENAME);
return 0;
}
| 990 |
335 | package moze_intel.projecte.capability;
import java.util.List;
import java.util.function.Supplier;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.CompoundNBT;
import net.minecraft.util.Direction;
import net.minecraftforge.common.capabilities.Capability;
import net.minecraftforge.common.capabilities.ICapabilitySerializable;
import net.minecraftforge.common.util.LazyOptional;
public class ItemCapabilityWrapper implements ICapabilitySerializable<CompoundNBT> {
private final ItemCapability<?>[] capabilities;
private final ItemStack itemStack;
public ItemCapabilityWrapper(ItemStack stack, List<Supplier<ItemCapability<?>>> capabilities) {
itemStack = stack;
this.capabilities = new ItemCapability<?>[capabilities.size()];
for (int i = 0; i < capabilities.size(); i++) {
ItemCapability<?> cap = capabilities.get(i).get();
this.capabilities[i] = cap;
cap.setWrapper(this);
}
}
public ItemCapabilityWrapper(ItemStack stack, ItemCapability<?>... capabilities) {
itemStack = stack;
this.capabilities = capabilities;
for (ItemCapability<?> cap : this.capabilities) {
cap.setWrapper(this);
}
}
protected ItemStack getItemStack() {
return itemStack;
}
@Nonnull
@Override
public <T> LazyOptional<T> getCapability(@Nonnull Capability<T> capability, @Nullable Direction side) {
for (ItemCapability<?> cap : capabilities) {
if (capability == cap.getCapability()) {
return cap.getLazyCapability().cast();
}
}
return LazyOptional.empty();
}
@Override
public CompoundNBT serializeNBT() {
CompoundNBT serializedNBT = new CompoundNBT();
for (ItemCapability<?> cap : capabilities) {
if (cap instanceof IItemCapabilitySerializable) {
IItemCapabilitySerializable serializableCap = (IItemCapabilitySerializable) cap;
serializedNBT.put(serializableCap.getStorageKey(), serializableCap.serializeNBT());
}
}
return serializedNBT;
}
@Override
public void deserializeNBT(CompoundNBT nbt) {
for (ItemCapability<?> cap : capabilities) {
if (cap instanceof IItemCapabilitySerializable) {
IItemCapabilitySerializable serializableCap = (IItemCapabilitySerializable) cap;
if (nbt.contains(serializableCap.getStorageKey())) {
serializableCap.deserializeNBT(nbt.get(serializableCap.getStorageKey()));
}
}
}
}
} | 810 |
540 | // File generated from our OpenAPI spec
package com.stripe.model;
public class SubscriptionScheduleCollection extends StripeCollection<SubscriptionSchedule> {}
| 38 |
3,372 | <reponame>rbalamohan/aws-sdk-java
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.mq;
import javax.annotation.Generated;
import com.amazonaws.services.mq.model.*;
import com.amazonaws.*;
/**
* Abstract implementation of {@code AmazonMQ}. Convenient method forms pass through to the corresponding overload that
* takes a request object, which throws an {@code UnsupportedOperationException}.
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AbstractAmazonMQ implements AmazonMQ {
protected AbstractAmazonMQ() {
}
@Override
public CreateBrokerResult createBroker(CreateBrokerRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public CreateConfigurationResult createConfiguration(CreateConfigurationRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public CreateTagsResult createTags(CreateTagsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public CreateUserResult createUser(CreateUserRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DeleteBrokerResult deleteBroker(DeleteBrokerRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DeleteTagsResult deleteTags(DeleteTagsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DeleteUserResult deleteUser(DeleteUserRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeBrokerResult describeBroker(DescribeBrokerRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeBrokerEngineTypesResult describeBrokerEngineTypes(DescribeBrokerEngineTypesRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeBrokerInstanceOptionsResult describeBrokerInstanceOptions(DescribeBrokerInstanceOptionsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeConfigurationResult describeConfiguration(DescribeConfigurationRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeConfigurationRevisionResult describeConfigurationRevision(DescribeConfigurationRevisionRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public DescribeUserResult describeUser(DescribeUserRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public ListBrokersResult listBrokers(ListBrokersRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public ListConfigurationRevisionsResult listConfigurationRevisions(ListConfigurationRevisionsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public ListConfigurationsResult listConfigurations(ListConfigurationsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public ListTagsResult listTags(ListTagsRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public ListUsersResult listUsers(ListUsersRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public RebootBrokerResult rebootBroker(RebootBrokerRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public UpdateBrokerResult updateBroker(UpdateBrokerRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public UpdateConfigurationResult updateConfiguration(UpdateConfigurationRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public UpdateUserResult updateUser(UpdateUserRequest request) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public void shutdown() {
throw new java.lang.UnsupportedOperationException();
}
@Override
public com.amazonaws.ResponseMetadata getCachedResponseMetadata(com.amazonaws.AmazonWebServiceRequest request) {
throw new java.lang.UnsupportedOperationException();
}
}
| 1,510 |
521 | <reponame>Fimbure/icebox-1
/* $Id: VBoxNetCfg.cpp $ */
/** @file
* VBoxNetCfg.cpp - Network Configuration API.
*/
/*
* Copyright (C) 2011-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#include "VBox/VBoxNetCfg-win.h"
#include "VBox/VBoxDrvCfg-win.h"
#define _WIN32_DCOM
#include <devguid.h>
#include <stdio.h>
#include <regstr.h>
#include <iprt/win/shlobj.h>
#include <cfgmgr32.h>
#include <tchar.h>
#include <iprt/win/objbase.h>
#include <crtdbg.h>
#include <stdlib.h>
#include <string.h>
#include <Wbemidl.h>
#include <comdef.h>
#include <iprt/win/winsock2.h>
#include <iprt/win/ws2tcpip.h>
#include <ws2ipdef.h>
#include <iprt/win/netioapi.h>
#include <iprt/win/iphlpapi.h>
#ifndef Assert /** @todo r=bird: where would this be defined? */
//# ifdef DEBUG
//# define Assert(_expr) assert(_expr)
//# else
//# define Assert(_expr) do{ }while (0)
//# endif
# define Assert _ASSERT
# define AssertMsg(expr, msg) do{}while (0)
#endif
static LOG_ROUTINE g_Logger = NULL;
static VOID DoLogging(LPCSTR szString, ...);
#define NonStandardLog DoLogging
#define NonStandardLogFlow(x) DoLogging x
#define DbgLog /** @todo r=bird: What does this do? */
#define VBOX_NETCFG_LOCK_TIME_OUT 5000 /** @todo r=bird: What does this do? */
#define VBOXNETCFGWIN_NETADP_ID L"sun_VBoxNetAdp"
/*
* Wrappers for HelpAPI functions
*/
typedef void FNINITIALIZEIPINTERFACEENTRY( _Inout_ PMIB_IPINTERFACE_ROW row);
typedef FNINITIALIZEIPINTERFACEENTRY *PFNINITIALIZEIPINTERFACEENTRY;
typedef NETIOAPI_API FNGETIPINTERFACEENTRY( _Inout_ PMIB_IPINTERFACE_ROW row);
typedef FNGETIPINTERFACEENTRY *PFNGETIPINTERFACEENTRY;
typedef NETIOAPI_API FNSETIPINTERFACEENTRY( _Inout_ PMIB_IPINTERFACE_ROW row);
typedef FNSETIPINTERFACEENTRY *PFNSETIPINTERFACEENTRY;
static PFNINITIALIZEIPINTERFACEENTRY g_pfnInitializeIpInterfaceEntry = NULL;
static PFNGETIPINTERFACEENTRY g_pfnGetIpInterfaceEntry = NULL;
static PFNSETIPINTERFACEENTRY g_pfnSetIpInterfaceEntry = NULL;
/*
* Forward declaration for using vboxNetCfgWinSetupMetric()
*/
HRESULT vboxNetCfgWinSetupMetric(IN NET_LUID* pLuid);
HRESULT vboxNetCfgWinGetInterfaceLUID(IN HKEY hKey, OUT NET_LUID* pLUID);
/*
* For some weird reason we do not want to use IPRT here, hence the following
* function provides a replacement for BstrFmt.
*/
static bstr_t bstr_printf(const char *cszFmt, ...)
{
char szBuffer[4096];
szBuffer[sizeof(szBuffer) - 1] = 0; /* Make sure the string will be null-terminated */
va_list va;
va_start(va, cszFmt);
_vsnprintf(szBuffer, sizeof(szBuffer) - 1, cszFmt, va);
va_end(va);
return bstr_t(szBuffer);
}
static HRESULT vboxNetCfgWinINetCfgLock(IN INetCfg *pNetCfg,
IN LPCWSTR pszwClientDescription,
IN DWORD cmsTimeout,
OUT LPWSTR *ppszwClientDescription)
{
INetCfgLock *pLock;
HRESULT hr = pNetCfg->QueryInterface(IID_INetCfgLock, (PVOID*)&pLock);
if (FAILED(hr))
{
NonStandardLogFlow(("QueryInterface failed, hr (0x%x)\n", hr));
return hr;
}
hr = pLock->AcquireWriteLock(cmsTimeout, pszwClientDescription, ppszwClientDescription);
if (hr == S_FALSE)
{
NonStandardLogFlow(("Write lock busy\n"));
}
else if (FAILED(hr))
{
NonStandardLogFlow(("AcquireWriteLock failed, hr (0x%x)\n", hr));
}
pLock->Release();
return hr;
}
static HRESULT vboxNetCfgWinINetCfgUnlock(IN INetCfg *pNetCfg)
{
INetCfgLock *pLock;
HRESULT hr = pNetCfg->QueryInterface(IID_INetCfgLock, (PVOID*)&pLock);
if (FAILED(hr))
{
NonStandardLogFlow(("QueryInterface failed, hr (0x%x)\n", hr));
return hr;
}
hr = pLock->ReleaseWriteLock();
if (FAILED(hr))
NonStandardLogFlow(("ReleaseWriteLock failed, hr (0x%x)\n", hr));
pLock->Release();
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinQueryINetCfg(OUT INetCfg **ppNetCfg,
IN BOOL fGetWriteLock,
IN LPCWSTR pszwClientDescription,
IN DWORD cmsTimeout,
OUT LPWSTR *ppszwClientDescription)
{
INetCfg *pNetCfg;
HRESULT hr = CoCreateInstance(CLSID_CNetCfg, NULL, CLSCTX_INPROC_SERVER, IID_INetCfg, (PVOID*)&pNetCfg);
if (FAILED(hr))
{
NonStandardLogFlow(("CoCreateInstance failed, hr (0x%x)\n", hr));
return hr;
}
if (fGetWriteLock)
{
hr = vboxNetCfgWinINetCfgLock(pNetCfg, pszwClientDescription, cmsTimeout, ppszwClientDescription);
if (hr == S_FALSE)
{
NonStandardLogFlow(("Write lock is busy\n", hr));
hr = NETCFG_E_NO_WRITE_LOCK;
}
}
if (SUCCEEDED(hr))
{
hr = pNetCfg->Initialize(NULL);
if (SUCCEEDED(hr))
{
*ppNetCfg = pNetCfg;
return S_OK;
}
else
NonStandardLogFlow(("Initialize failed, hr (0x%x)\n", hr));
}
pNetCfg->Release();
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinReleaseINetCfg(IN INetCfg *pNetCfg, IN BOOL fHasWriteLock)
{
if (!pNetCfg) /* If network config has been released already, just bail out. */
{
NonStandardLogFlow(("Warning: No network config given but write lock is set to TRUE\n"));
return S_OK;
}
HRESULT hr = pNetCfg->Uninitialize();
if (FAILED(hr))
{
NonStandardLogFlow(("Uninitialize failed, hr (0x%x)\n", hr));
/* Try to release the write lock below. */
}
if (fHasWriteLock)
{
HRESULT hr2 = vboxNetCfgWinINetCfgUnlock(pNetCfg);
if (FAILED(hr2))
NonStandardLogFlow(("vboxNetCfgWinINetCfgUnlock failed, hr (0x%x)\n", hr2));
if (SUCCEEDED(hr))
hr = hr2;
}
pNetCfg->Release();
return hr;
}
static HRESULT vboxNetCfgWinGetComponentByGuidEnum(IEnumNetCfgComponent *pEnumNcc,
IN const GUID *pGuid,
OUT INetCfgComponent **ppNcc)
{
HRESULT hr = pEnumNcc->Reset();
if (FAILED(hr))
{
NonStandardLogFlow(("Reset failed, hr (0x%x)\n", hr));
return hr;
}
INetCfgComponent *pNcc;
while ((hr = pEnumNcc->Next(1, &pNcc, NULL)) == S_OK)
{
ULONG uComponentStatus;
hr = pNcc->GetDeviceStatus(&uComponentStatus);
if (SUCCEEDED(hr))
{
if (uComponentStatus == 0)
{
GUID NccGuid;
hr = pNcc->GetInstanceGuid(&NccGuid);
if (SUCCEEDED(hr))
{
if (NccGuid == *pGuid)
{
/* found the needed device */
*ppNcc = pNcc;
break;
}
}
else
NonStandardLogFlow(("GetInstanceGuid failed, hr (0x%x)\n", hr));
}
}
pNcc->Release();
}
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinGetComponentByGuid(IN INetCfg *pNc,
IN const GUID *pguidClass,
IN const GUID * pComponentGuid,
OUT INetCfgComponent **ppncc)
{
IEnumNetCfgComponent *pEnumNcc;
HRESULT hr = pNc->EnumComponents(pguidClass, &pEnumNcc);
if (SUCCEEDED(hr))
{
hr = vboxNetCfgWinGetComponentByGuidEnum(pEnumNcc, pComponentGuid, ppncc);
if (hr == S_FALSE)
{
NonStandardLogFlow(("Component not found\n"));
}
else if (FAILED(hr))
{
NonStandardLogFlow(("vboxNetCfgWinGetComponentByGuidEnum failed, hr (0x%x)\n", hr));
}
pEnumNcc->Release();
}
else
NonStandardLogFlow(("EnumComponents failed, hr (0x%x)\n", hr));
return hr;
}
static HRESULT vboxNetCfgWinQueryInstaller(IN INetCfg *pNetCfg, IN const GUID *pguidClass, INetCfgClassSetup **ppSetup)
{
HRESULT hr = pNetCfg->QueryNetCfgClass(pguidClass, IID_INetCfgClassSetup, (void**)ppSetup);
if (FAILED(hr))
NonStandardLogFlow(("QueryNetCfgClass failed, hr (0x%x)\n", hr));
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinInstallComponent(IN INetCfg *pNetCfg, IN LPCWSTR pszwComponentId, IN const GUID *pguidClass,
OUT INetCfgComponent **ppComponent)
{
INetCfgClassSetup *pSetup;
HRESULT hr = vboxNetCfgWinQueryInstaller(pNetCfg, pguidClass, &pSetup);
if (FAILED(hr))
{
NonStandardLogFlow(("vboxNetCfgWinQueryInstaller failed, hr (0x%x)\n", hr));
return hr;
}
OBO_TOKEN Token;
ZeroMemory(&Token, sizeof (Token));
Token.Type = OBO_USER;
INetCfgComponent* pTempComponent = NULL;
hr = pSetup->Install(pszwComponentId, &Token,
0, /* IN DWORD dwSetupFlags */
0, /* IN DWORD dwUpgradeFromBuildNo */
NULL, /* IN LPCWSTR pszwAnswerFile */
NULL, /* IN LPCWSTR pszwAnswerSections */
&pTempComponent);
if (SUCCEEDED(hr))
{
if (pTempComponent != NULL)
{
HKEY hkey = (HKEY)INVALID_HANDLE_VALUE;
HRESULT res;
/*
* Set default metric value of interface to fix multicast issue
* See @bugref{6379} for details.
*/
res = pTempComponent->OpenParamKey(&hkey);
/* Set default metric value for host-only interface only */
if ( SUCCEEDED(res)
&& hkey != INVALID_HANDLE_VALUE
&& wcsnicmp(pszwComponentId, VBOXNETCFGWIN_NETADP_ID, 256) == 0)
{
NET_LUID luid;
res = vboxNetCfgWinGetInterfaceLUID(hkey, &luid);
/* Close the key as soon as possible. See @bugref{7973}. */
RegCloseKey (hkey);
hkey = (HKEY)INVALID_HANDLE_VALUE;
if (FAILED(res))
{
/*
* The setting of Metric is not very important functionality,
* So we will not break installation process due to this error.
*/
NonStandardLogFlow(("VBoxNetCfgWinInstallComponent Warning! "
"vboxNetCfgWinGetInterfaceLUID failed, default metric "
"for new interface will not be set, hr (0x%x)\n", res));
}
else
{
res = vboxNetCfgWinSetupMetric(&luid);
if (FAILED(res))
{
/*
* The setting of Metric is not very important functionality,
* So we will not break installation process due to this error.
*/
NonStandardLogFlow(("VBoxNetCfgWinInstallComponent Warning! "
"vboxNetCfgWinSetupMetric failed, default metric "
"for new interface will not be set, hr (0x%x)\n", res));
}
}
}
if (hkey != INVALID_HANDLE_VALUE)
{
RegCloseKey (hkey);
hkey = (HKEY)INVALID_HANDLE_VALUE;
}
if (ppComponent != NULL)
*ppComponent = pTempComponent;
else
pTempComponent->Release();
}
/* ignore the apply failure */
HRESULT tmpHr = pNetCfg->Apply();
Assert(tmpHr == S_OK);
if (tmpHr != S_OK)
NonStandardLogFlow(("Apply failed, hr (0x%x)\n", tmpHr));
}
else
NonStandardLogFlow(("Install failed, hr (0x%x)\n", hr));
pSetup->Release();
return hr;
}
static HRESULT vboxNetCfgWinInstallInfAndComponent(IN INetCfg *pNetCfg, IN LPCWSTR pszwComponentId, IN const GUID *pguidClass,
IN LPCWSTR const *apInfPaths, IN UINT cInfPaths,
OUT INetCfgComponent **ppComponent)
{
HRESULT hr = S_OK;
UINT cFilesProcessed = 0;
NonStandardLogFlow(("Installing %u INF files ...\n", cInfPaths));
for (; cFilesProcessed < cInfPaths; cFilesProcessed++)
{
NonStandardLogFlow(("Installing INF file \"%ws\" ...\n", apInfPaths[cFilesProcessed]));
hr = VBoxDrvCfgInfInstall(apInfPaths[cFilesProcessed]);
if (FAILED(hr))
{
NonStandardLogFlow(("VBoxNetCfgWinInfInstall failed, hr (0x%x)\n", hr));
break;
}
}
if (SUCCEEDED(hr))
{
hr = VBoxNetCfgWinInstallComponent(pNetCfg, pszwComponentId, pguidClass, ppComponent);
if (FAILED(hr))
NonStandardLogFlow(("VBoxNetCfgWinInstallComponent failed, hr (0x%x)\n", hr));
}
if (FAILED(hr))
{
NonStandardLogFlow(("Installation failed, rolling back installation set ...\n"));
do
{
HRESULT hr2 = VBoxDrvCfgInfUninstall(apInfPaths[cFilesProcessed], 0);
if (FAILED(hr2))
NonStandardLogFlow(("VBoxDrvCfgInfUninstall failed, hr (0x%x)\n", hr2));
/* Keep going. */
if (!cFilesProcessed)
break;
} while (cFilesProcessed--);
NonStandardLogFlow(("Rollback complete\n"));
}
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinUninstallComponent(IN INetCfg *pNetCfg, IN INetCfgComponent *pComponent)
{
GUID GuidClass;
HRESULT hr = pComponent->GetClassGuid(&GuidClass);
if (FAILED(hr))
{
NonStandardLogFlow(("GetClassGuid failed, hr (0x%x)\n", hr));
return hr;
}
INetCfgClassSetup *pSetup = NULL;
hr = vboxNetCfgWinQueryInstaller(pNetCfg, &GuidClass, &pSetup);
if (FAILED(hr))
{
NonStandardLogFlow(("vboxNetCfgWinQueryInstaller failed, hr (0x%x)\n", hr));
return hr;
}
OBO_TOKEN Token;
ZeroMemory(&Token, sizeof(Token));
Token.Type = OBO_USER;
hr = pSetup->DeInstall(pComponent, &Token, NULL /* OUT LPWSTR *pmszwRefs */);
if (SUCCEEDED(hr))
{
hr = pNetCfg->Apply();
if (FAILED(hr))
NonStandardLogFlow(("Apply failed, hr (0x%x)\n", hr));
}
else
NonStandardLogFlow(("DeInstall failed, hr (0x%x)\n", hr));
if (pSetup)
pSetup->Release();
return hr;
}
typedef BOOL (*VBOXNETCFGWIN_NETCFGENUM_CALLBACK) (IN INetCfg *pNetCfg, IN INetCfgComponent *pNetCfgComponent, PVOID pContext);
static HRESULT vboxNetCfgWinEnumNetCfgComponents(IN INetCfg *pNetCfg,
IN const GUID *pguidClass,
VBOXNETCFGWIN_NETCFGENUM_CALLBACK callback,
PVOID pContext)
{
IEnumNetCfgComponent *pEnumComponent;
HRESULT hr = pNetCfg->EnumComponents(pguidClass, &pEnumComponent);
if (SUCCEEDED(hr))
{
INetCfgComponent *pNetCfgComponent;
hr = pEnumComponent->Reset();
do
{
hr = pEnumComponent->Next(1, &pNetCfgComponent, NULL);
if (hr == S_OK)
{
// ULONG uComponentStatus;
// hr = pNcc->GetDeviceStatus(&uComponentStatus);
// if (SUCCEEDED(hr))
BOOL fResult = FALSE;
if (pNetCfgComponent)
{
if (pContext)
fResult = callback(pNetCfg, pNetCfgComponent, pContext);
pNetCfgComponent->Release();
}
if (!fResult)
break;
}
else
{
if (hr == S_FALSE)
{
hr = S_OK;
}
else
NonStandardLogFlow(("Next failed, hr (0x%x)\n", hr));
break;
}
} while (true);
pEnumComponent->Release();
}
return hr;
}
/*
* Forward declarations of functions used in vboxNetCfgWinRemoveAllNetDevicesOfIdCallback.
*/
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinGenHostonlyConnectionName(PCWSTR DevName, WCHAR *pBuf, PULONG pcbBuf);
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinRenameConnection(LPWSTR pGuid, PCWSTR NewName);
static BOOL vboxNetCfgWinRemoveAllNetDevicesOfIdCallback(HDEVINFO hDevInfo, PSP_DEVINFO_DATA pDev, PVOID pvContext)
{
RT_NOREF1(pvContext);
SP_REMOVEDEVICE_PARAMS rmdParams;
memset(&rmdParams, 0, sizeof(SP_REMOVEDEVICE_PARAMS));
rmdParams.ClassInstallHeader.cbSize = sizeof(SP_CLASSINSTALL_HEADER);
rmdParams.ClassInstallHeader.InstallFunction = DIF_REMOVE;
rmdParams.Scope = DI_REMOVEDEVICE_GLOBAL;
if (SetupDiSetClassInstallParams(hDevInfo,pDev,
&rmdParams.ClassInstallHeader, sizeof(rmdParams)))
{
if (SetupDiSetSelectedDevice(hDevInfo, pDev))
{
#ifndef VBOXNETCFG_DELAYEDRENAME
/* Figure out NetCfgInstanceId. */
HKEY hKey = SetupDiOpenDevRegKey(hDevInfo,
pDev,
DICS_FLAG_GLOBAL,
0,
DIREG_DRV,
KEY_READ);
if (hKey == INVALID_HANDLE_VALUE)
{
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: SetupDiOpenDevRegKey failed with error %ld\n",
GetLastError()));
}
else
{
WCHAR wszCfgGuidString[50] = { L'' };
DWORD cbSize = sizeof(wszCfgGuidString);
DWORD dwValueType;
DWORD ret = RegQueryValueExW(hKey, L"NetCfgInstanceId", NULL,
&dwValueType, (LPBYTE)wszCfgGuidString, &cbSize);
if (ret == ERROR_SUCCESS)
{
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: Processing device ID \"%S\"\n",
wszCfgGuidString));
/* Figure out device name. */
WCHAR wszDevName[256], wszTempName[256];
ULONG cbName = sizeof(wszTempName);
if (SetupDiGetDeviceRegistryPropertyW(hDevInfo, pDev,
SPDRP_FRIENDLYNAME, /* IN DWORD Property,*/
NULL, /* OUT PDWORD PropertyRegDataType, OPTIONAL*/
(PBYTE)wszDevName, /* OUT PBYTE PropertyBuffer,*/
sizeof(wszDevName), /* IN DWORD PropertyBufferSize,*/
NULL /* OUT PDWORD RequiredSize OPTIONAL*/))
{
/*
* Rename the connection before removing the device. This will
* hopefully prevent an error when we will be attempting
* to rename a newly created connection (see @bugref{6740}).
*/
HRESULT hr = VBoxNetCfgWinGenHostonlyConnectionName(wszDevName, wszTempName, &cbName);
wcscat_s(wszTempName, sizeof(wszTempName), L" removed");
if (SUCCEEDED(hr))
hr = VBoxNetCfgWinRenameConnection(wszCfgGuidString, wszTempName);
//NonStandardLogFlow(("VBoxNetCfgWinRenameConnection(%S,%S) => 0x%x\n", pWCfgGuidString, TempName, hr_tmp));
}
else
{
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: Failed to get friendly name for device \"%S\"\n",
wszCfgGuidString));
}
}
else
{
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: Querying instance ID failed with %d\n",
ret));
}
RegCloseKey(hKey);
}
#endif /* VBOXNETCFG_DELAYEDRENAME */
if (SetupDiCallClassInstaller(DIF_REMOVE, hDevInfo, pDev))
{
SP_DEVINSTALL_PARAMS devParams;
memset(&devParams, 0, sizeof(SP_DEVINSTALL_PARAMS));
devParams.cbSize = sizeof(devParams);
if (SetupDiGetDeviceInstallParams(hDevInfo, pDev, &devParams))
{
if ( (devParams.Flags & DI_NEEDRESTART)
|| (devParams.Flags & DI_NEEDREBOOT))
{
NonStandardLog(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: A reboot is required\n"));
}
}
else
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: SetupDiGetDeviceInstallParams failed with %ld\n",
GetLastError()));
}
else
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: SetupDiCallClassInstaller failed with %ld\n",
GetLastError()));
}
else
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: SetupDiSetSelectedDevice failed with %ld\n",
GetLastError()));
}
else
NonStandardLogFlow(("vboxNetCfgWinRemoveAllNetDevicesOfIdCallback: SetupDiSetClassInstallParams failed with %ld\n",
GetLastError()));
/* Continue enumeration. */
return TRUE;
}
typedef struct VBOXNECTFGWINPROPCHANGE
{
VBOXNECTFGWINPROPCHANGE_TYPE enmPcType;
HRESULT hr;
} VBOXNECTFGWINPROPCHANGE ,*PVBOXNECTFGWINPROPCHANGE;
static BOOL vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback(HDEVINFO hDevInfo, PSP_DEVINFO_DATA pDev, PVOID pContext)
{
PVBOXNECTFGWINPROPCHANGE pPc = (PVBOXNECTFGWINPROPCHANGE)pContext;
SP_PROPCHANGE_PARAMS PcParams;
memset (&PcParams, 0, sizeof (PcParams));
PcParams.ClassInstallHeader.cbSize = sizeof(SP_CLASSINSTALL_HEADER);
PcParams.ClassInstallHeader.InstallFunction = DIF_PROPERTYCHANGE;
PcParams.Scope = DICS_FLAG_GLOBAL;
switch(pPc->enmPcType)
{
case VBOXNECTFGWINPROPCHANGE_TYPE_DISABLE:
PcParams.StateChange = DICS_DISABLE;
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: Change type (DICS_DISABLE): %d\n", pPc->enmPcType));
break;
case VBOXNECTFGWINPROPCHANGE_TYPE_ENABLE:
PcParams.StateChange = DICS_ENABLE;
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: Change type (DICS_ENABLE): %d\n", pPc->enmPcType));
break;
default:
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: Unexpected prop change type: %d\n", pPc->enmPcType));
pPc->hr = E_INVALIDARG;
return FALSE;
}
if (SetupDiSetClassInstallParams(hDevInfo, pDev, &PcParams.ClassInstallHeader, sizeof(PcParams)))
{
if (SetupDiSetSelectedDevice(hDevInfo, pDev))
{
if (SetupDiCallClassInstaller(DIF_PROPERTYCHANGE, hDevInfo, pDev))
{
SP_DEVINSTALL_PARAMS devParams;
devParams.cbSize = sizeof(devParams);
if (SetupDiGetDeviceInstallParams(hDevInfo,pDev,&devParams))
{
if ( (devParams.Flags & DI_NEEDRESTART)
|| (devParams.Flags & DI_NEEDREBOOT))
{
NonStandardLog(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: A reboot is required\n"));
}
}
else
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: SetupDiGetDeviceInstallParams failed with %ld\n",
GetLastError()));
}
else
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback: SetupDiCallClassInstaller failed with %ld\n",
GetLastError()));
}
else
NonStandardLogFlow(("SetupDiSetSelectedDevice failed with %ld\n", GetLastError()));
}
else
NonStandardLogFlow(("SetupDiSetClassInstallParams failed with %ld\n", GetLastError()));
/* Continue enumeration. */
return TRUE;
}
typedef BOOL (*PFNVBOXNETCFGWINNETENUMCALLBACK)(HDEVINFO hDevInfo, PSP_DEVINFO_DATA pDev, PVOID pContext);
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinEnumNetDevices(LPCWSTR pwszPnPId,
PFNVBOXNETCFGWINNETENUMCALLBACK pfnCallback, PVOID pvContext)
{
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Searching for: %S\n", pwszPnPId));
HRESULT hr;
HDEVINFO hDevInfo = SetupDiGetClassDevsExW(&GUID_DEVCLASS_NET,
NULL, /* IN PCTSTR Enumerator, OPTIONAL */
NULL, /* IN HWND hwndParent, OPTIONAL */
DIGCF_PRESENT, /* IN DWORD Flags,*/
NULL, /* IN HDEVINFO DeviceInfoSet, OPTIONAL */
NULL, /* IN PCTSTR MachineName, OPTIONAL */
NULL /* IN PVOID Reserved */);
if (hDevInfo != INVALID_HANDLE_VALUE)
{
DWORD winEr = NO_ERROR;
DWORD dwDevId = 0;
size_t cPnPId = wcslen(pwszPnPId);
PBYTE pBuffer = NULL;
for (;;)
{
SP_DEVINFO_DATA Dev;
memset(&Dev, 0, sizeof(SP_DEVINFO_DATA));
Dev.cbSize = sizeof(SP_DEVINFO_DATA);
if (!SetupDiEnumDeviceInfo(hDevInfo, dwDevId, &Dev))
{
winEr = GetLastError();
if (winEr == ERROR_NO_MORE_ITEMS)
winEr = ERROR_SUCCESS;
break;
}
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Enumerating device %ld ... \n", dwDevId));
dwDevId++;
if (pBuffer)
free(pBuffer);
pBuffer = NULL;
DWORD cbBuffer = 0;
DWORD cbRequired = 0;
if (!SetupDiGetDeviceRegistryPropertyW(hDevInfo, &Dev,
SPDRP_HARDWAREID, /* IN DWORD Property */
NULL, /* OUT PDWORD PropertyRegDataType OPTIONAL */
pBuffer, /* OUT PBYTE PropertyBuffer */
cbBuffer, /* IN DWORD PropertyBufferSize */
&cbRequired /* OUT PDWORD RequiredSize OPTIONAL */))
{
winEr = GetLastError();
if (winEr != ERROR_INSUFFICIENT_BUFFER)
{
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: SetupDiGetDeviceRegistryPropertyW (1) failed with %ld\n", winEr));
break;
}
pBuffer = (PBYTE)malloc(cbRequired);
if (!pBuffer)
{
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Out of memory allocating %ld bytes\n",
cbRequired));
winEr = ERROR_OUTOFMEMORY;
break;
}
cbBuffer = cbRequired;
if (!SetupDiGetDeviceRegistryPropertyW(hDevInfo,&Dev,
SPDRP_HARDWAREID, /* IN DWORD Property */
NULL, /* OUT PDWORD PropertyRegDataType, OPTIONAL */
pBuffer, /* OUT PBYTE PropertyBuffer */
cbBuffer, /* IN DWORD PropertyBufferSize */
&cbRequired /* OUT PDWORD RequiredSize OPTIONAL */))
{
winEr = GetLastError();
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: SetupDiGetDeviceRegistryPropertyW (2) failed with %ld\n",
winEr));
break;
}
}
PWSTR pCurId = (PWSTR)pBuffer;
size_t cCurId = wcslen(pCurId);
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Device %ld: %S\n", dwDevId, pCurId));
if (cCurId >= cPnPId)
{
NonStandardLogFlow(("!wcsnicmp(pCurId = (%S), pwszPnPId = (%S), cPnPId = (%d))\n", pCurId, pwszPnPId, cPnPId));
pCurId += cCurId - cPnPId;
if (!wcsnicmp(pCurId, pwszPnPId, cPnPId))
{
if (!pfnCallback(hDevInfo, &Dev, pvContext))
break;
}
}
}
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Found %ld devices total\n", dwDevId));
if (pBuffer)
free(pBuffer);
hr = HRESULT_FROM_WIN32(winEr);
SetupDiDestroyDeviceInfoList(hDevInfo);
}
else
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: SetupDiGetClassDevsExW failed with %ld\n", winEr));
hr = HRESULT_FROM_WIN32(winEr);
}
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices: Ended with hr (0x%x)\n", hr));
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinRemoveAllNetDevicesOfId(IN LPCWSTR lpszPnPId)
{
return VBoxNetCfgWinEnumNetDevices(lpszPnPId, vboxNetCfgWinRemoveAllNetDevicesOfIdCallback, NULL);
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinPropChangeAllNetDevicesOfId(IN LPCWSTR lpszPnPId, VBOXNECTFGWINPROPCHANGE_TYPE enmPcType)
{
VBOXNECTFGWINPROPCHANGE Pc;
Pc.enmPcType = enmPcType;
Pc.hr = S_OK;
NonStandardLogFlow(("Calling VBoxNetCfgWinEnumNetDevices with lpszPnPId =(%S) and vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback\n", lpszPnPId));
HRESULT hr = VBoxNetCfgWinEnumNetDevices(lpszPnPId, vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback, &Pc);
if (!SUCCEEDED(hr))
{
NonStandardLogFlow(("VBoxNetCfgWinEnumNetDevices failed 0x%x\n", hr));
return hr;
}
if (!SUCCEEDED(Pc.hr))
{
NonStandardLogFlow(("vboxNetCfgWinPropChangeAllNetDevicesOfIdCallback failed 0x%x\n", Pc.hr));
return Pc.hr;
}
return S_OK;
}
/*
* logging
*/
static VOID DoLogging(LPCSTR szString, ...)
{
LOG_ROUTINE pfnRoutine = (LOG_ROUTINE)(*((void * volatile *)&g_Logger));
if (pfnRoutine)
{
char szBuffer[4096] = {0};
va_list va;
va_start(va, szString);
_vsnprintf(szBuffer, RT_ELEMENTS(szBuffer), szString, va);
va_end(va);
pfnRoutine(szBuffer);
}
}
VBOXNETCFGWIN_DECL(VOID) VBoxNetCfgWinSetLogging(IN LOG_ROUTINE pfnLog)
{
*((void * volatile *)&g_Logger) = pfnLog;
}
/*
* IP configuration API
*/
/* network settings config */
/**
* Strong referencing operators. Used as a second argument to ComPtr<>/ComObjPtr<>.
*/
template <class C>
class ComStrongRef
{
protected:
static void addref (C *p) { p->AddRef(); }
static void release (C *p) { p->Release(); }
};
/**
* Base template for smart COM pointers. Not intended to be used directly.
*/
template <class C, template <class> class RefOps = ComStrongRef>
class ComPtrBase : protected RefOps <C>
{
public:
/* special template to disable AddRef()/Release() */
template <class I>
class NoAddRefRelease : public I
{
private:
#if !defined (VBOX_WITH_XPCOM)
STDMETHOD_(ULONG, AddRef)() = 0;
STDMETHOD_(ULONG, Release)() = 0;
#else /* !defined (VBOX_WITH_XPCOM) */
NS_IMETHOD_(nsrefcnt) AddRef(void) = 0;
NS_IMETHOD_(nsrefcnt) Release(void) = 0;
#endif /* !defined (VBOX_WITH_XPCOM) */
};
protected:
ComPtrBase () : p (NULL) {}
ComPtrBase (const ComPtrBase &that) : p (that.p) { addref(); }
ComPtrBase (C *that_p) : p (that_p) { addref(); }
~ComPtrBase() { release(); }
ComPtrBase &operator= (const ComPtrBase &that)
{
safe_assign (that.p);
return *this;
}
ComPtrBase &operator= (C *that_p)
{
safe_assign (that_p);
return *this;
}
public:
void setNull()
{
release();
p = NULL;
}
bool isNull() const
{
return (p == NULL);
}
bool operator! () const { return isNull(); }
bool operator< (C* that_p) const { return p < that_p; }
bool operator== (C* that_p) const { return p == that_p; }
template <class I>
bool equalsTo (I *aThat) const
{
return ComPtrEquals (p, aThat);
}
template <class OC>
bool equalsTo (const ComPtrBase <OC> &oc) const
{
return equalsTo ((OC *) oc);
}
/** Intended to pass instances as in parameters to interface methods */
operator C* () const { return p; }
/**
* Dereferences the instance (redirects the -> operator to the managed
* pointer).
*/
NoAddRefRelease <C> *operator-> () const
{
AssertMsg (p, ("Managed pointer must not be null\n"));
return (NoAddRefRelease <C> *) p;
}
template <class I>
HRESULT queryInterfaceTo (I **pp) const
{
if (pp)
{
if (p)
{
return p->QueryInterface (COM_IIDOF (I), (void **) pp);
}
else
{
*pp = NULL;
return S_OK;
}
}
return E_INVALIDARG;
}
/** Intended to pass instances as out parameters to interface methods */
C **asOutParam()
{
setNull();
return &p;
}
private:
void addref()
{
if (p)
RefOps <C>::addref (p);
}
void release()
{
if (p)
RefOps <C>::release (p);
}
void safe_assign (C *that_p)
{
/* be aware of self-assignment */
if (that_p)
RefOps <C>::addref (that_p);
release();
p = that_p;
}
C *p;
};
/**
* Smart COM pointer wrapper that automatically manages refcounting of
* interface pointers.
*
* @param I COM interface class
*/
template <class I, template <class> class RefOps = ComStrongRef>
class ComPtr : public ComPtrBase <I, RefOps>
{
typedef ComPtrBase <I, RefOps> Base;
public:
ComPtr () : Base() {}
ComPtr (const ComPtr &that) : Base(that) {}
ComPtr &operator= (const ComPtr &that)
{
Base::operator= (that);
return *this;
}
template <class OI>
ComPtr (OI *that_p) : Base () { operator= (that_p); }
/* specialization for I */
ComPtr (I *that_p) : Base (that_p) {}
template <class OC>
ComPtr (const ComPtr <OC, RefOps> &oc) : Base () { operator= ((OC *) oc); }
template <class OI>
ComPtr &operator= (OI *that_p)
{
if (that_p)
that_p->QueryInterface (COM_IIDOF (I), (void **) Base::asOutParam());
else
Base::setNull();
return *this;
}
/* specialization for I */
ComPtr &operator=(I *that_p)
{
Base::operator= (that_p);
return *this;
}
template <class OC>
ComPtr &operator= (const ComPtr <OC, RefOps> &oc)
{
return operator= ((OC *) oc);
}
};
static HRESULT netIfWinFindAdapterClassById(IWbemServices * pSvc, const GUID * pGuid, IWbemClassObject **pAdapterConfig)
{
HRESULT hr;
WCHAR wszQuery[256];
WCHAR wszGuid[50];
int length = StringFromGUID2(*pGuid, wszGuid, RT_ELEMENTS(wszGuid));
if (length)
{
swprintf(wszQuery, L"SELECT * FROM Win32_NetworkAdapterConfiguration WHERE SettingID = \"%s\"", wszGuid);
IEnumWbemClassObject* pEnumerator = NULL;
hr = pSvc->ExecQuery(bstr_t("WQL"), bstr_t(wszQuery), WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY,
NULL, &pEnumerator);
if (SUCCEEDED(hr))
{
if (pEnumerator)
{
IWbemClassObject *pclsObj;
ULONG uReturn = 0;
hr = pEnumerator->Next(WBEM_INFINITE, 1, &pclsObj, &uReturn);
NonStandardLogFlow(("netIfWinFindAdapterClassById: IEnumWbemClassObject::Next -> hr=0x%x pclsObj=%p uReturn=%u 42=%u\n",
hr, (void *)pclsObj, uReturn, 42));
if (SUCCEEDED(hr))
{
if (uReturn && pclsObj != NULL)
{
*pAdapterConfig = pclsObj;
pEnumerator->Release();
NonStandardLogFlow(("netIfWinFindAdapterClassById: S_OK and %p\n", *pAdapterConfig));
return S_OK;
}
hr = E_FAIL;
}
pEnumerator->Release();
}
else
{
NonStandardLogFlow(("ExecQuery returned no enumerator\n"));
hr = E_FAIL;
}
}
else
NonStandardLogFlow(("ExecQuery failed (0x%x)\n", hr));
}
else
{
DWORD winEr = GetLastError();
hr = HRESULT_FROM_WIN32( winEr );
if (SUCCEEDED(hr))
hr = E_FAIL;
NonStandardLogFlow(("StringFromGUID2 failed winEr=%u, hr=0x%x\n", winEr, hr));
}
NonStandardLogFlow(("netIfWinFindAdapterClassById: 0x%x and %p\n", hr, *pAdapterConfig));
return hr;
}
static HRESULT netIfWinIsHostOnly(IWbemClassObject * pAdapterConfig, BOOL * pbIsHostOnly)
{
VARIANT vtServiceName;
VariantInit(&vtServiceName);
HRESULT hr = pAdapterConfig->Get(L"ServiceName", 0 /*lFlags*/, &vtServiceName, NULL /*pvtType*/, NULL /*plFlavor*/);
if (SUCCEEDED(hr))
{
*pbIsHostOnly = bstr_t(vtServiceName.bstrVal) == bstr_t("VBoxNetAdp");
VariantClear(&vtServiceName);
}
return hr;
}
static HRESULT netIfWinGetIpSettings(IWbemClassObject * pAdapterConfig, ULONG *pIpv4, ULONG *pMaskv4)
{
VARIANT vtIp;
HRESULT hr;
VariantInit(&vtIp);
*pIpv4 = 0;
*pMaskv4 = 0;
hr = pAdapterConfig->Get(L"IPAddress", 0, &vtIp, 0, 0);
if (SUCCEEDED(hr))
{
if (vtIp.vt == (VT_ARRAY | VT_BSTR))
{
VARIANT vtMask;
VariantInit(&vtMask);
hr = pAdapterConfig->Get(L"IPSubnet", 0, &vtMask, 0, 0);
if (SUCCEEDED(hr))
{
if (vtMask.vt == (VT_ARRAY | VT_BSTR))
{
SAFEARRAY * pIpArray = vtIp.parray;
SAFEARRAY * pMaskArray = vtMask.parray;
if (pIpArray && pMaskArray)
{
BSTR pCurIp;
BSTR pCurMask;
for (LONG i = 0;
SafeArrayGetElement(pIpArray, &i, (PVOID)&pCurIp) == S_OK
&& SafeArrayGetElement(pMaskArray, &i, (PVOID)&pCurMask) == S_OK;
i++)
{
bstr_t ip(pCurIp);
ULONG Ipv4 = inet_addr((char*)(ip));
if (Ipv4 != INADDR_NONE)
{
*pIpv4 = Ipv4;
bstr_t mask(pCurMask);
*pMaskv4 = inet_addr((char*)(mask));
break;
}
}
}
}
else
{
*pIpv4 = 0;
*pMaskv4 = 0;
}
VariantClear(&vtMask);
}
}
else
{
*pIpv4 = 0;
*pMaskv4 = 0;
}
VariantClear(&vtIp);
}
return hr;
}
#if 0 /* unused */
static HRESULT netIfWinHasIpSettings(IWbemClassObject * pAdapterConfig, SAFEARRAY * pCheckIp, SAFEARRAY * pCheckMask, bool *pFound)
{
VARIANT vtIp;
HRESULT hr;
VariantInit(&vtIp);
*pFound = false;
hr = pAdapterConfig->Get(L"IPAddress", 0, &vtIp, 0, 0);
if (SUCCEEDED(hr))
{
VARIANT vtMask;
VariantInit(&vtMask);
hr = pAdapterConfig->Get(L"IPSubnet", 0, &vtMask, 0, 0);
if (SUCCEEDED(hr))
{
SAFEARRAY * pIpArray = vtIp.parray;
SAFEARRAY * pMaskArray = vtMask.parray;
if (pIpArray && pMaskArray)
{
BSTR pIp, pMask;
for (LONG k = 0;
SafeArrayGetElement(pCheckIp, &k, (PVOID)&pIp) == S_OK
&& SafeArrayGetElement(pCheckMask, &k, (PVOID)&pMask) == S_OK;
k++)
{
BSTR pCurIp;
BSTR pCurMask;
for (LONG i = 0;
SafeArrayGetElement(pIpArray, &i, (PVOID)&pCurIp) == S_OK
&& SafeArrayGetElement(pMaskArray, &i, (PVOID)&pCurMask) == S_OK;
i++)
{
if (!wcsicmp(pCurIp, pIp))
{
if (!wcsicmp(pCurMask, pMask))
*pFound = true;
break;
}
}
}
}
VariantClear(&vtMask);
}
VariantClear(&vtIp);
}
return hr;
}
static HRESULT netIfWinWaitIpSettings(IWbemServices *pSvc, const GUID * pGuid, SAFEARRAY * pCheckIp, SAFEARRAY * pCheckMask, ULONG sec2Wait, bool *pFound)
{
/* on Vista we need to wait for the address to get applied */
/* wait for the address to appear in the list */
HRESULT hr = S_OK;
ULONG i;
*pFound = false;
ComPtr <IWbemClassObject> pAdapterConfig;
for (i = 0;
(hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam())) == S_OK
&& (hr = netIfWinHasIpSettings(pAdapterConfig, pCheckIp, pCheckMask, pFound)) == S_OK
&& !(*pFound)
&& i < sec2Wait/6;
i++)
{
Sleep(6000);
}
return hr;
}
#endif /* unused */
static HRESULT netIfWinCreateIWbemServices(IWbemServices ** ppSvc)
{
IWbemLocator *pLoc = NULL;
HRESULT hr = CoCreateInstance(CLSID_WbemLocator, 0, CLSCTX_INPROC_SERVER, IID_IWbemLocator, (LPVOID *) &pLoc);
if (SUCCEEDED(hr))
{
IWbemServices *pSvc = NULL;
hr = pLoc->ConnectServer(bstr_t(L"ROOT\\CIMV2"), /* [in] const BSTR strNetworkResource */
NULL, /* [in] const BSTR strUser */
NULL, /* [in] const BSTR strPassword */
0, /* [in] const BSTR strLocale */
NULL, /* [in] LONG lSecurityFlags */
0, /* [in] const BSTR strAuthority */
0, /* [in] IWbemContext* pCtx */
&pSvc /* [out] IWbemServices** ppNamespace */);
if (SUCCEEDED(hr))
{
hr = CoSetProxyBlanket(pSvc, /* IUnknown * pProxy */
RPC_C_AUTHN_WINNT, /* DWORD dwAuthnSvc */
RPC_C_AUTHZ_NONE, /* DWORD dwAuthzSvc */
NULL, /* WCHAR * pServerPrincName */
RPC_C_AUTHN_LEVEL_CALL, /* DWORD dwAuthnLevel */
RPC_C_IMP_LEVEL_IMPERSONATE, /* DWORD dwImpLevel */
NULL, /* RPC_AUTH_IDENTITY_HANDLE pAuthInfo */
EOAC_NONE /* DWORD dwCapabilities */
);
if (SUCCEEDED(hr))
{
*ppSvc = pSvc;
/* do not need it any more */
pLoc->Release();
return hr;
}
else
NonStandardLogFlow(("CoSetProxyBlanket failed, hr (0x%x)\n", hr));
pSvc->Release();
}
else
NonStandardLogFlow(("ConnectServer failed, hr (0x%x)\n", hr));
pLoc->Release();
}
else
NonStandardLogFlow(("CoCreateInstance failed, hr (0x%x)\n", hr));
return hr;
}
static HRESULT netIfWinAdapterConfigPath(IWbemClassObject *pObj, BSTR * pStr)
{
VARIANT index;
HRESULT hr = pObj->Get(L"Index", 0, &index, 0, 0);
if (SUCCEEDED(hr))
{
WCHAR strIndex[8];
swprintf(strIndex, L"%u", index.uintVal);
*pStr = (bstr_t(L"Win32_NetworkAdapterConfiguration.Index='") + strIndex + "'").copy();
}
else
NonStandardLogFlow(("Get failed, hr (0x%x)\n", hr));
return hr;
}
static HRESULT netIfExecMethod(IWbemServices * pSvc, IWbemClassObject *pClass, BSTR ObjPath,
BSTR MethodName, LPWSTR *pArgNames, LPVARIANT *pArgs, UINT cArgs,
IWbemClassObject** ppOutParams
)
{
HRESULT hr = S_OK;
ComPtr<IWbemClassObject> pInParamsDefinition;
ComPtr<IWbemClassObject> pClassInstance;
if (cArgs)
{
hr = pClass->GetMethod(MethodName, 0, pInParamsDefinition.asOutParam(), NULL);
if (SUCCEEDED(hr))
{
hr = pInParamsDefinition->SpawnInstance(0, pClassInstance.asOutParam());
if (SUCCEEDED(hr))
{
for (UINT i = 0; i < cArgs; i++)
{
hr = pClassInstance->Put(pArgNames[i], 0,
pArgs[i], 0);
if (FAILED(hr))
break;
}
}
}
}
if (SUCCEEDED(hr))
{
IWbemClassObject* pOutParams = NULL;
hr = pSvc->ExecMethod(ObjPath, MethodName, 0, NULL, pClassInstance, &pOutParams, NULL);
if (SUCCEEDED(hr))
{
*ppOutParams = pOutParams;
}
}
return hr;
}
static HRESULT netIfWinCreateIpArray(SAFEARRAY **ppArray, in_addr* aIp, UINT cIp)
{
HRESULT hr = S_OK; /* MSC maybe used uninitialized */
SAFEARRAY * pIpArray = SafeArrayCreateVector(VT_BSTR, 0, cIp);
if (pIpArray)
{
for (UINT i = 0; i < cIp; i++)
{
char* addr = inet_ntoa(aIp[i]);
BSTR val = bstr_t(addr).copy();
long aIndex[1];
aIndex[0] = i;
hr = SafeArrayPutElement(pIpArray, aIndex, val);
if (FAILED(hr))
{
SysFreeString(val);
SafeArrayDestroy(pIpArray);
break;
}
}
if (SUCCEEDED(hr))
{
*ppArray = pIpArray;
}
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
#if 0 /* unused */
static HRESULT netIfWinCreateIpArrayV4V6(SAFEARRAY **ppArray, BSTR Ip)
{
HRESULT hr;
SAFEARRAY *pIpArray = SafeArrayCreateVector(VT_BSTR, 0, 1);
if (pIpArray)
{
BSTR val = bstr_t(Ip, false).copy();
long aIndex[1];
aIndex[0] = 0;
hr = SafeArrayPutElement(pIpArray, aIndex, val);
if (FAILED(hr))
{
SysFreeString(val);
SafeArrayDestroy(pIpArray);
}
if (SUCCEEDED(hr))
{
*ppArray = pIpArray;
}
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
#endif
static HRESULT netIfWinCreateIpArrayVariantV4(VARIANT * pIpAddresses, in_addr* aIp, UINT cIp)
{
HRESULT hr;
VariantInit(pIpAddresses);
pIpAddresses->vt = VT_ARRAY | VT_BSTR;
SAFEARRAY *pIpArray;
hr = netIfWinCreateIpArray(&pIpArray, aIp, cIp);
if (SUCCEEDED(hr))
{
pIpAddresses->parray = pIpArray;
}
return hr;
}
#if 0 /* unused */
static HRESULT netIfWinCreateIpArrayVariantV4V6(VARIANT * pIpAddresses, BSTR Ip)
{
HRESULT hr;
VariantInit(pIpAddresses);
pIpAddresses->vt = VT_ARRAY | VT_BSTR;
SAFEARRAY *pIpArray;
hr = netIfWinCreateIpArrayV4V6(&pIpArray, Ip);
if (SUCCEEDED(hr))
{
pIpAddresses->parray = pIpArray;
}
return hr;
}
#endif
static HRESULT netIfWinEnableStatic(IWbemServices *pSvc, const GUID *pGuid, BSTR ObjPath, VARIANT *pIp, VARIANT *pMask)
{
ComPtr<IWbemClassObject> pClass;
BSTR ClassName = SysAllocString(L"Win32_NetworkAdapterConfiguration");
HRESULT hr;
if (ClassName)
{
hr = pSvc->GetObject(ClassName, 0, NULL, pClass.asOutParam(), NULL);
if (SUCCEEDED(hr))
{
LPWSTR argNames[] = {L"IPAddress", L"SubnetMask"};
LPVARIANT args[] = {pIp, pMask};
ComPtr<IWbemClassObject> pOutParams;
hr = netIfExecMethod(pSvc, pClass, ObjPath, bstr_t(L"EnableStatic"), argNames, args, 2, pOutParams.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT varReturnValue;
hr = pOutParams->Get(bstr_t(L"ReturnValue"), 0,
&varReturnValue, NULL, 0);
Assert(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
// Assert(varReturnValue.vt == VT_UINT);
int winEr = varReturnValue.uintVal;
switch (winEr)
{
case 0:
{
hr = S_OK;
// bool bFound;
// HRESULT tmpHr = netIfWinWaitIpSettings(pSvc, pGuid, pIp->parray, pMask->parray, 180, &bFound);
NOREF(pGuid);
break;
}
default:
hr = HRESULT_FROM_WIN32( winEr );
break;
}
}
}
}
SysFreeString(ClassName);
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
static HRESULT netIfWinEnableStaticV4(IWbemServices * pSvc, const GUID * pGuid, BSTR ObjPath, in_addr* aIp, in_addr * aMask, UINT cIp)
{
VARIANT ipAddresses;
HRESULT hr = netIfWinCreateIpArrayVariantV4(&ipAddresses, aIp, cIp);
if (SUCCEEDED(hr))
{
VARIANT ipMasks;
hr = netIfWinCreateIpArrayVariantV4(&ipMasks, aMask, cIp);
if (SUCCEEDED(hr))
{
hr = netIfWinEnableStatic(pSvc, pGuid, ObjPath, &ipAddresses, &ipMasks);
VariantClear(&ipMasks);
}
VariantClear(&ipAddresses);
}
return hr;
}
#if 0 /* unused */
static HRESULT netIfWinEnableStaticV4V6(IWbemServices * pSvc, const GUID * pGuid, BSTR ObjPath, BSTR Ip, BSTR Mask)
{
VARIANT ipAddresses;
HRESULT hr = netIfWinCreateIpArrayVariantV4V6(&ipAddresses, Ip);
if (SUCCEEDED(hr))
{
VARIANT ipMasks;
hr = netIfWinCreateIpArrayVariantV4V6(&ipMasks, Mask);
if (SUCCEEDED(hr))
{
hr = netIfWinEnableStatic(pSvc, pGuid, ObjPath, &ipAddresses, &ipMasks);
VariantClear(&ipMasks);
}
VariantClear(&ipAddresses);
}
return hr;
}
/* win API allows to set gw metrics as well, we are not setting them */
static HRESULT netIfWinSetGateways(IWbemServices * pSvc, BSTR ObjPath, VARIANT * pGw)
{
ComPtr<IWbemClassObject> pClass;
BSTR ClassName = SysAllocString(L"Win32_NetworkAdapterConfiguration");
HRESULT hr;
if (ClassName)
{
hr = pSvc->GetObject(ClassName, 0, NULL, pClass.asOutParam(), NULL);
if (SUCCEEDED(hr))
{
LPWSTR argNames[] = {L"DefaultIPGateway"};
LPVARIANT args[] = {pGw};
ComPtr<IWbemClassObject> pOutParams;
hr = netIfExecMethod(pSvc, pClass, ObjPath, bstr_t(L"SetGateways"), argNames, args, 1, pOutParams.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT varReturnValue;
hr = pOutParams->Get(bstr_t(L"ReturnValue"), 0, &varReturnValue, NULL, 0);
Assert(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
// Assert(varReturnValue.vt == VT_UINT);
int winEr = varReturnValue.uintVal;
switch (winEr)
{
case 0:
hr = S_OK;
break;
default:
hr = HRESULT_FROM_WIN32( winEr );
break;
}
}
}
}
SysFreeString(ClassName);
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
/* win API allows to set gw metrics as well, we are not setting them */
static HRESULT netIfWinSetGatewaysV4(IWbemServices * pSvc, BSTR ObjPath, in_addr* aGw, UINT cGw)
{
VARIANT gwais;
HRESULT hr = netIfWinCreateIpArrayVariantV4(&gwais, aGw, cGw);
if (SUCCEEDED(hr))
{
netIfWinSetGateways(pSvc, ObjPath, &gwais);
VariantClear(&gwais);
}
return hr;
}
/* win API allows to set gw metrics as well, we are not setting them */
static HRESULT netIfWinSetGatewaysV4V6(IWbemServices * pSvc, BSTR ObjPath, BSTR Gw)
{
VARIANT vGw;
HRESULT hr = netIfWinCreateIpArrayVariantV4V6(&vGw, Gw);
if (SUCCEEDED(hr))
{
netIfWinSetGateways(pSvc, ObjPath, &vGw);
VariantClear(&vGw);
}
return hr;
}
#endif /* unused */
static HRESULT netIfWinEnableDHCP(IWbemServices * pSvc, BSTR ObjPath)
{
ComPtr<IWbemClassObject> pClass;
BSTR ClassName = SysAllocString(L"Win32_NetworkAdapterConfiguration");
HRESULT hr;
if (ClassName)
{
hr = pSvc->GetObject(ClassName, 0, NULL, pClass.asOutParam(), NULL);
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pOutParams;
hr = netIfExecMethod(pSvc, pClass, ObjPath, bstr_t(L"EnableDHCP"), NULL, NULL, 0, pOutParams.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT varReturnValue;
hr = pOutParams->Get(bstr_t(L"ReturnValue"), 0,
&varReturnValue, NULL, 0);
Assert(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
// Assert(varReturnValue.vt == VT_UINT);
int winEr = varReturnValue.uintVal;
switch (winEr)
{
case 0:
hr = S_OK;
break;
default:
hr = HRESULT_FROM_WIN32( winEr );
break;
}
}
}
}
SysFreeString(ClassName);
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
static HRESULT netIfWinDhcpRediscover(IWbemServices * pSvc, BSTR ObjPath)
{
ComPtr<IWbemClassObject> pClass;
BSTR ClassName = SysAllocString(L"Win32_NetworkAdapterConfiguration");
HRESULT hr;
if (ClassName)
{
hr = pSvc->GetObject(ClassName, 0, NULL, pClass.asOutParam(), NULL);
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pOutParams;
hr = netIfExecMethod(pSvc, pClass, ObjPath, bstr_t(L"ReleaseDHCPLease"), NULL, NULL, 0, pOutParams.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT varReturnValue;
hr = pOutParams->Get(bstr_t(L"ReturnValue"), 0, &varReturnValue, NULL, 0);
Assert(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
// Assert(varReturnValue.vt == VT_UINT);
int winEr = varReturnValue.uintVal;
if (winEr == 0)
{
hr = netIfExecMethod(pSvc, pClass, ObjPath, bstr_t(L"RenewDHCPLease"), NULL, NULL, 0, pOutParams.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT varReturnValue;
hr = pOutParams->Get(bstr_t(L"ReturnValue"), 0, &varReturnValue, NULL, 0);
Assert(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
// Assert(varReturnValue.vt == VT_UINT);
int winEr = varReturnValue.uintVal;
if (winEr == 0)
hr = S_OK;
else
hr = HRESULT_FROM_WIN32( winEr );
}
}
}
else
hr = HRESULT_FROM_WIN32( winEr );
}
}
}
SysFreeString(ClassName);
}
else
hr = HRESULT_FROM_WIN32(GetLastError());
return hr;
}
static HRESULT vboxNetCfgWinIsDhcpEnabled(IWbemClassObject * pAdapterConfig, BOOL *pEnabled)
{
VARIANT vtEnabled;
HRESULT hr = pAdapterConfig->Get(L"DHCPEnabled", 0, &vtEnabled, 0, 0);
if (SUCCEEDED(hr))
*pEnabled = vtEnabled.boolVal;
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinGetAdapterSettings(IN const GUID * pGuid, OUT PADAPTER_SETTINGS pSettings)
{
HRESULT hr;
ComPtr <IWbemServices> pSvc;
hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
hr = vboxNetCfgWinIsDhcpEnabled(pAdapterConfig, &pSettings->bDhcp);
if (SUCCEEDED(hr))
hr = netIfWinGetIpSettings(pAdapterConfig, &pSettings->ip, &pSettings->mask);
}
}
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinIsDhcpEnabled(const GUID * pGuid, BOOL *pEnabled)
{
HRESULT hr;
ComPtr <IWbemServices> pSvc;
hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
VARIANT vtEnabled;
hr = pAdapterConfig->Get(L"DHCPEnabled", 0, &vtEnabled, 0, 0);
if (SUCCEEDED(hr))
*pEnabled = vtEnabled.boolVal;
}
}
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinEnableStaticIpConfig(IN const GUID *pGuid, IN ULONG ip, IN ULONG mask)
{
NonStandardLogFlow(("VBoxNetCfgWinEnableStaticIpConfig: ip=0x%x mask=0x%x\n", ip, mask));
ComPtr<IWbemServices> pSvc;
HRESULT hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
BOOL bIsHostOnly;
hr = netIfWinIsHostOnly(pAdapterConfig, &bIsHostOnly);
if (SUCCEEDED(hr))
{
if (bIsHostOnly)
{
in_addr aIp[1];
in_addr aMask[1];
aIp[0].S_un.S_addr = ip;
aMask[0].S_un.S_addr = mask;
BSTR ObjPath;
hr = netIfWinAdapterConfigPath(pAdapterConfig, &ObjPath);
if (SUCCEEDED(hr))
{
hr = netIfWinEnableStaticV4(pSvc, pGuid, ObjPath, aIp, aMask, ip != 0 ? 1 : 0);
if (SUCCEEDED(hr))
{
#if 0
in_addr aGw[1];
aGw[0].S_un.S_addr = gw;
hr = netIfWinSetGatewaysV4(pSvc, ObjPath, aGw, 1);
if (SUCCEEDED(hr))
#endif
{
}
}
SysFreeString(ObjPath);
}
}
else
{
hr = E_FAIL;
}
}
}
}
NonStandardLogFlow(("VBoxNetCfgWinEnableStaticIpConfig: returns 0x%x\n", hr));
return hr;
}
#if 0
static HRESULT netIfEnableStaticIpConfigV6(const GUID *pGuid, IN_BSTR aIPV6Address, IN_BSTR aIPV6Mask, IN_BSTR aIPV6DefaultGateway)
{
HRESULT hr;
ComPtr <IWbemServices> pSvc;
hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
BSTR ObjPath;
hr = netIfWinAdapterConfigPath(pAdapterConfig, &ObjPath);
if (SUCCEEDED(hr))
{
hr = netIfWinEnableStaticV4V6(pSvc, pAdapterConfig, ObjPath, aIPV6Address, aIPV6Mask);
if (SUCCEEDED(hr))
{
if (aIPV6DefaultGateway)
{
hr = netIfWinSetGatewaysV4V6(pSvc, ObjPath, aIPV6DefaultGateway);
}
if (SUCCEEDED(hr))
{
// hr = netIfWinUpdateConfig(pIf);
}
}
SysFreeString(ObjPath);
}
}
}
return SUCCEEDED(hr) ? VINF_SUCCESS : VERR_GENERAL_FAILURE;
}
static HRESULT netIfEnableStaticIpConfigV6(const GUID *pGuid, IN_BSTR aIPV6Address, ULONG aIPV6MaskPrefixLength)
{
RTNETADDRIPV6 Mask;
int rc = RTNetPrefixToMaskIPv6(aIPV6MaskPrefixLength, &Mask);
if (RT_SUCCESS(rc))
{
Bstr maskStr = composeIPv6Address(&Mask);
rc = netIfEnableStaticIpConfigV6(pGuid, aIPV6Address, maskStr, NULL);
}
return rc;
}
#endif
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinEnableDynamicIpConfig(IN const GUID *pGuid)
{
HRESULT hr;
ComPtr <IWbemServices> pSvc;
hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
BOOL bIsHostOnly;
hr = netIfWinIsHostOnly(pAdapterConfig, &bIsHostOnly);
if (SUCCEEDED(hr))
{
if (bIsHostOnly)
{
BSTR ObjPath;
hr = netIfWinAdapterConfigPath(pAdapterConfig, &ObjPath);
if (SUCCEEDED(hr))
{
hr = netIfWinEnableDHCP(pSvc, ObjPath);
if (SUCCEEDED(hr))
{
// hr = netIfWinUpdateConfig(pIf);
}
SysFreeString(ObjPath);
}
}
else
{
hr = E_FAIL;
}
}
}
}
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinDhcpRediscover(IN const GUID *pGuid)
{
HRESULT hr;
ComPtr <IWbemServices> pSvc;
hr = netIfWinCreateIWbemServices(pSvc.asOutParam());
if (SUCCEEDED(hr))
{
ComPtr<IWbemClassObject> pAdapterConfig;
hr = netIfWinFindAdapterClassById(pSvc, pGuid, pAdapterConfig.asOutParam());
if (SUCCEEDED(hr))
{
BOOL bIsHostOnly;
hr = netIfWinIsHostOnly(pAdapterConfig, &bIsHostOnly);
if (SUCCEEDED(hr))
{
if (bIsHostOnly)
{
BSTR ObjPath;
hr = netIfWinAdapterConfigPath(pAdapterConfig, &ObjPath);
if (SUCCEEDED(hr))
{
hr = netIfWinDhcpRediscover(pSvc, ObjPath);
if (SUCCEEDED(hr))
{
//hr = netIfWinUpdateConfig(pIf);
}
SysFreeString(ObjPath);
}
}
else
{
hr = E_FAIL;
}
}
}
}
return hr;
}
static const char *vboxNetCfgWinAddrToStr(char *pszBuf, LPSOCKADDR pAddr)
{
switch (pAddr->sa_family)
{
case AF_INET:
sprintf(pszBuf, "%d.%d.%d.%d",
((PSOCKADDR_IN)pAddr)->sin_addr.S_un.S_un_b.s_b1,
((PSOCKADDR_IN)pAddr)->sin_addr.S_un.S_un_b.s_b2,
((PSOCKADDR_IN)pAddr)->sin_addr.S_un.S_un_b.s_b3,
((PSOCKADDR_IN)pAddr)->sin_addr.S_un.S_un_b.s_b4);
break;
case AF_INET6:
sprintf(pszBuf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x",
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[0], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[1],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[2], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[3],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[4], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[5],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[6], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[7],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[8], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[9],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[10], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[11],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[12], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[13],
((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[14], ((PSOCKADDR_IN6)pAddr)->sin6_addr.s6_addr[15]);
break;
default:
strcpy(pszBuf, "unknown");
break;
}
return pszBuf;
}
typedef bool (*PFNVBOXNETCFG_IPSETTINGS_CALLBACK) (ULONG ip, ULONG mask, PVOID pContext);
static void vboxNetCfgWinEnumIpConfig(PIP_ADAPTER_ADDRESSES pAddresses, PFNVBOXNETCFG_IPSETTINGS_CALLBACK pfnCallback, PVOID pContext)
{
PIP_ADAPTER_ADDRESSES pAdapter;
for (pAdapter = pAddresses; pAdapter; pAdapter = pAdapter->Next)
{
char szBuf[80];
NonStandardLogFlow(("+- Enumerating adapter '%ls' %s\n", pAdapter->FriendlyName, pAdapter->AdapterName));
for (PIP_ADAPTER_PREFIX pPrefix = pAdapter->FirstPrefix; pPrefix; pPrefix = pPrefix->Next)
{
const char *pcszAddress = vboxNetCfgWinAddrToStr(szBuf, pPrefix->Address.lpSockaddr);
/* We are concerned with IPv4 only, ignore the rest. */
if (pPrefix->Address.lpSockaddr->sa_family != AF_INET)
{
NonStandardLogFlow(("| +- %s %d: not IPv4, ignoring\n", pcszAddress, pPrefix->PrefixLength));
continue;
}
/* Ignore invalid prefixes as well as host addresses. */
if (pPrefix->PrefixLength < 1 || pPrefix->PrefixLength > 31)
{
NonStandardLogFlow(("| +- %s %d: host or broadcast, ignoring\n", pcszAddress, pPrefix->PrefixLength));
continue;
}
/* Ignore multicast and beyond. */
ULONG ip = ((struct sockaddr_in *)pPrefix->Address.lpSockaddr)->sin_addr.s_addr;
if ((ip & 0xF0) > 224)
{
NonStandardLogFlow(("| +- %s %d: multicast, ignoring\n", pcszAddress, pPrefix->PrefixLength));
continue;
}
ULONG mask = htonl((~(((ULONG)~0) >> pPrefix->PrefixLength)));
bool fContinue = pfnCallback(ip, mask, pContext);
if (!fContinue)
{
NonStandardLogFlow(("| +- %s %d: CONFLICT!\n", pcszAddress, pPrefix->PrefixLength));
return;
}
else
NonStandardLogFlow(("| +- %s %d: no conflict, moving on\n", pcszAddress, pPrefix->PrefixLength));
}
}
}
typedef struct _IPPROBE_CONTEXT
{
ULONG Prefix;
bool bConflict;
}IPPROBE_CONTEXT, *PIPPROBE_CONTEXT;
#define IPPROBE_INIT(_pContext, _addr) \
((_pContext)->bConflict = false, \
(_pContext)->Prefix = _addr)
#define IPPROBE_INIT_STR(_pContext, _straddr) \
IPROBE_INIT(_pContext, inet_addr(_straddr))
static bool vboxNetCfgWinIpProbeCallback (ULONG ip, ULONG mask, PVOID pContext)
{
PIPPROBE_CONTEXT pProbe = (PIPPROBE_CONTEXT)pContext;
if ((ip & mask) == (pProbe->Prefix & mask))
{
pProbe->bConflict = true;
return false;
}
return true;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinGenHostOnlyNetworkNetworkIp(OUT PULONG pNetIp, OUT PULONG pNetMask)
{
DWORD dwRc;
HRESULT hr = S_OK;
/*
* MSDN recommends to pre-allocate a 15KB buffer.
*/
ULONG uBufLen = 15 * 1024;
PIP_ADAPTER_ADDRESSES pAddresses = (PIP_ADAPTER_ADDRESSES)malloc(uBufLen);
if (!pAddresses)
return HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY);
dwRc = GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX, NULL, pAddresses, &uBufLen);
if (dwRc == ERROR_BUFFER_OVERFLOW)
{
/* Impressive! More than 10 adapters! Get more memory and try again. */
free(pAddresses);
pAddresses = (PIP_ADAPTER_ADDRESSES)malloc(uBufLen);
if (!pAddresses)
return HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY);
dwRc = GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX, NULL, pAddresses, &uBufLen);
}
if (dwRc == NO_ERROR)
{
IPPROBE_CONTEXT Context;
const ULONG ip192168 = inet_addr("192.168.0.0");
srand(GetTickCount());
*pNetIp = 0;
*pNetMask = 0;
for (int i = 0; i < 255; i++)
{
ULONG ipProbe = rand()*255/RAND_MAX;
ipProbe = ip192168 | (ipProbe << 16);
unsigned char *a = (unsigned char *)&ipProbe;
NonStandardLogFlow(("probing %d.%d.%d.%d\n", a[0], a[1], a[2], a[3]));
IPPROBE_INIT(&Context, ipProbe);
vboxNetCfgWinEnumIpConfig(pAddresses, vboxNetCfgWinIpProbeCallback, &Context);
if (!Context.bConflict)
{
NonStandardLogFlow(("found unused net %d.%d.%d.%d\n", a[0], a[1], a[2], a[3]));
*pNetIp = ipProbe;
*pNetMask = inet_addr("255.255.255.0");
break;
}
}
if (*pNetIp == 0)
dwRc = ERROR_DHCP_ADDRESS_CONFLICT;
}
else
NonStandardLogFlow(("GetAdaptersAddresses err (%d)\n", dwRc));
if (pAddresses)
free(pAddresses);
if (dwRc != NO_ERROR)
{
hr = HRESULT_FROM_WIN32(dwRc);
}
return hr;
}
/*
* convenience functions to perform netflt/adp manipulations
*/
#define VBOXNETCFGWIN_NETFLT_ID L"sun_VBoxNetFlt"
#define VBOXNETCFGWIN_NETFLT_MP_ID L"sun_VBoxNetFltmp"
static HRESULT vboxNetCfgWinNetFltUninstall(IN INetCfg *pNc, DWORD InfRmFlags)
{
INetCfgComponent *pNcc = NULL;
HRESULT hr = pNc->FindComponent(VBOXNETCFGWIN_NETFLT_ID, &pNcc);
if (hr == S_OK)
{
NonStandardLog("NetFlt is installed currently, uninstalling ...\n");
hr = VBoxNetCfgWinUninstallComponent(pNc, pNcc);
NonStandardLogFlow(("NetFlt component uninstallation ended with hr (0x%x)\n", hr));
pNcc->Release();
}
else if (hr == S_FALSE)
{
NonStandardLog("NetFlt is not installed currently\n");
}
else
{
NonStandardLogFlow(("FindComponent failed, hr (0x%x)\n", hr));
}
VBoxDrvCfgInfUninstallAllF(L"NetService", VBOXNETCFGWIN_NETFLT_ID, InfRmFlags);
VBoxDrvCfgInfUninstallAllF(L"Net", VBOXNETCFGWIN_NETFLT_MP_ID, InfRmFlags);
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetFltUninstall(IN INetCfg *pNc)
{
return vboxNetCfgWinNetFltUninstall(pNc, 0);
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetFltInstall(IN INetCfg *pNc,
IN LPCWSTR const *apInfFullPaths, IN UINT cInfFullPaths)
{
HRESULT hr = vboxNetCfgWinNetFltUninstall(pNc, SUOI_FORCEDELETE);
if (SUCCEEDED(hr))
{
NonStandardLog("NetFlt will be installed ...\n");
hr = vboxNetCfgWinInstallInfAndComponent(pNc, VBOXNETCFGWIN_NETFLT_ID,
&GUID_DEVCLASS_NETSERVICE,
apInfFullPaths,
cInfFullPaths,
NULL);
}
return hr;
}
static HRESULT vboxNetCfgWinNetAdpUninstall(IN INetCfg *pNc, LPCWSTR pwszId, DWORD InfRmFlags)
{
NOREF(pNc);
NonStandardLog("Finding NetAdp driver package and trying to uninstall it ...\n");
VBoxDrvCfgInfUninstallAllF(L"Net", pwszId, InfRmFlags);
NonStandardLog("NetAdp is not installed currently\n");
return S_OK;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetAdpUninstall(IN INetCfg *pNc, IN LPCWSTR pwszId)
{
return vboxNetCfgWinNetAdpUninstall(pNc, pwszId, SUOI_FORCEDELETE);
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetAdpInstall(IN INetCfg *pNc,
IN LPCWSTR const pInfFullPath)
{
NonStandardLog("NetAdp will be installed ...\n");
HRESULT hr = vboxNetCfgWinInstallInfAndComponent(pNc, VBOXNETCFGWIN_NETADP_ID,
&GUID_DEVCLASS_NET,
&pInfFullPath,
1,
NULL);
return hr;
}
#define VBOXNETCFGWIN_NETLWF_ID L"oracle_VBoxNetLwf"
static HRESULT vboxNetCfgWinNetLwfUninstall(IN INetCfg *pNc, DWORD InfRmFlags)
{
INetCfgComponent * pNcc = NULL;
HRESULT hr = pNc->FindComponent(VBOXNETCFGWIN_NETLWF_ID, &pNcc);
if (hr == S_OK)
{
NonStandardLog("NetLwf is installed currently, uninstalling ...\n");
hr = VBoxNetCfgWinUninstallComponent(pNc, pNcc);
pNcc->Release();
}
else if (hr == S_FALSE)
{
NonStandardLog("NetLwf is not installed currently\n");
hr = S_OK;
}
else
{
NonStandardLogFlow(("FindComponent failed, hr (0x%x)\n", hr));
hr = S_OK;
}
VBoxDrvCfgInfUninstallAllF(L"NetService", VBOXNETCFGWIN_NETLWF_ID, InfRmFlags);
return hr;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetLwfUninstall(IN INetCfg *pNc)
{
return vboxNetCfgWinNetLwfUninstall(pNc, 0);
}
static void VBoxNetCfgWinFilterLimitWorkaround(void)
{
/*
* Need to check if the system has a limit of installed filter drivers. If it
* has, bump the limit to 14, which the maximum value supported by Windows 7.
* Note that we only touch the limit if it is set to the default value (8).
* See @bugref{7899}.
*/
HKEY hNetKey;
DWORD dwMaxNumFilters = 0;
DWORD cbMaxNumFilters = sizeof(dwMaxNumFilters);
LONG hr = RegOpenKeyEx(HKEY_LOCAL_MACHINE,
_T("SYSTEM\\CurrentControlSet\\Control\\Network"),
0, KEY_QUERY_VALUE | KEY_SET_VALUE, &hNetKey);
if (SUCCEEDED(hr))
{
hr = RegQueryValueEx(hNetKey, _T("MaxNumFilters"), NULL, NULL,
(LPBYTE)&dwMaxNumFilters, &cbMaxNumFilters);
if (SUCCEEDED(hr) && cbMaxNumFilters == sizeof(dwMaxNumFilters) && dwMaxNumFilters == 8)
{
dwMaxNumFilters = 14;
hr = RegSetValueEx(hNetKey, _T("MaxNumFilters"), 0, REG_DWORD,
(LPBYTE)&dwMaxNumFilters, sizeof(dwMaxNumFilters));
if (SUCCEEDED(hr))
NonStandardLog("Adjusted the installed filter limit to 14...\n");
else
NonStandardLog("Failed to set MaxNumFilters, error code 0x%x\n", hr);
}
RegCloseKey(hNetKey);
}
else
{
NonStandardLog("Failed to open network key, error code 0x%x\n", hr);
}
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinNetLwfInstall(IN INetCfg *pNc,
IN LPCWSTR const pInfFullPath)
{
HRESULT hr = vboxNetCfgWinNetLwfUninstall(pNc, SUOI_FORCEDELETE);
if (SUCCEEDED(hr))
{
VBoxNetCfgWinFilterLimitWorkaround();
NonStandardLog("NetLwf will be installed ...\n");
hr = vboxNetCfgWinInstallInfAndComponent(pNc, VBOXNETCFGWIN_NETLWF_ID,
&GUID_DEVCLASS_NETSERVICE,
&pInfFullPath,
1,
NULL);
}
return hr;
}
#define VBOX_CONNECTION_NAME L"VirtualBox Host-Only Network"
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinGenHostonlyConnectionName(PCWSTR DevName, WCHAR *pBuf, PULONG pcbBuf)
{
const WCHAR * pSuffix = wcsrchr( DevName, L'#' );
ULONG cbSize = sizeof(VBOX_CONNECTION_NAME);
if (pSuffix)
{
cbSize += (ULONG)wcslen(pSuffix) * 2;
cbSize += 2; /* for space */
}
if (*pcbBuf < cbSize)
{
*pcbBuf = cbSize;
return E_FAIL;
}
wcscpy(pBuf, VBOX_CONNECTION_NAME);
if (pSuffix)
{
wcscat(pBuf, L" ");
wcscat(pBuf, pSuffix);
}
return S_OK;
}
static BOOL vboxNetCfgWinAdjustHostOnlyNetworkInterfacePriority(IN INetCfg *pNc, IN INetCfgComponent *pNcc, PVOID pContext)
{
RT_NOREF1(pNc);
INetCfgComponentBindings *pNetCfgBindings;
GUID *pGuid = (GUID*)pContext;
/* Get component's binding. */
HRESULT hr = pNcc->QueryInterface(IID_INetCfgComponentBindings, (PVOID*)&pNetCfgBindings);
if (SUCCEEDED(hr))
{
/* Get binding path enumerator reference. */
IEnumNetCfgBindingPath *pEnumNetCfgBindPath;
hr = pNetCfgBindings->EnumBindingPaths(EBP_BELOW, &pEnumNetCfgBindPath);
if (SUCCEEDED(hr))
{
bool bFoundIface = false;
hr = pEnumNetCfgBindPath->Reset();
do
{
INetCfgBindingPath *pNetCfgBindPath;
hr = pEnumNetCfgBindPath->Next(1, &pNetCfgBindPath, NULL);
if (hr == S_OK)
{
IEnumNetCfgBindingInterface *pEnumNetCfgBindIface;
hr = pNetCfgBindPath->EnumBindingInterfaces(&pEnumNetCfgBindIface);
if (hr == S_OK)
{
pEnumNetCfgBindIface->Reset();
do
{
INetCfgBindingInterface *pNetCfgBindIfce;
hr = pEnumNetCfgBindIface->Next(1, &pNetCfgBindIfce, NULL);
if (hr == S_OK)
{
INetCfgComponent *pNetCfgCompo;
hr = pNetCfgBindIfce->GetLowerComponent(&pNetCfgCompo);
if (hr == S_OK)
{
ULONG uComponentStatus;
hr = pNetCfgCompo->GetDeviceStatus(&uComponentStatus);
if (hr == S_OK)
{
GUID guid;
hr = pNetCfgCompo->GetInstanceGuid(&guid);
if ( hr == S_OK
&& guid == *pGuid)
{
hr = pNetCfgBindings->MoveAfter(pNetCfgBindPath, NULL);
if (FAILED(hr))
NonStandardLogFlow(("Unable to move interface, hr (0x%x)\n", hr));
bFoundIface = true;
/*
* Enable binding paths for host-only adapters bound to bridged filter
* (see @bugref{8140}).
*/
HRESULT hr2;
LPWSTR pwszHwId = NULL;
if ((hr2 = pNcc->GetId(&pwszHwId)) != S_OK)
NonStandardLogFlow(("Failed to get HW ID, hr (0x%x)\n", hr2));
else if (_wcsnicmp(pwszHwId, VBOXNETCFGWIN_NETLWF_ID,
sizeof(VBOXNETCFGWIN_NETLWF_ID)/2))
NonStandardLogFlow(("Ignoring component %ls\n", pwszHwId));
else if ((hr2 = pNetCfgBindPath->IsEnabled()) != S_FALSE)
NonStandardLogFlow(("Already enabled binding path, hr (0x%x)\n", hr2));
else if ((hr2 = pNetCfgBindPath->Enable(TRUE)) != S_OK)
NonStandardLogFlow(("Failed to enable binding path, hr (0x%x)\n", hr2));
else
NonStandardLogFlow(("Enabled binding path\n"));
if (pwszHwId)
CoTaskMemFree(pwszHwId);
}
}
pNetCfgCompo->Release();
}
else
NonStandardLogFlow(("GetLowerComponent failed, hr (0x%x)\n", hr));
pNetCfgBindIfce->Release();
}
else
{
if (hr == S_FALSE) /* No more binding interfaces? */
hr = S_OK;
else
NonStandardLogFlow(("Next binding interface failed, hr (0x%x)\n", hr));
break;
}
} while (!bFoundIface);
pEnumNetCfgBindIface->Release();
}
else
NonStandardLogFlow(("EnumBindingInterfaces failed, hr (0x%x)\n", hr));
pNetCfgBindPath->Release();
}
else
{
if (hr == S_FALSE) /* No more binding paths? */
hr = S_OK;
else
NonStandardLogFlow(("Next bind path failed, hr (0x%x)\n", hr));
break;
}
} while (!bFoundIface);
pEnumNetCfgBindPath->Release();
}
else
NonStandardLogFlow(("EnumBindingPaths failed, hr (0x%x)\n", hr));
pNetCfgBindings->Release();
}
else
NonStandardLogFlow(("QueryInterface for IID_INetCfgComponentBindings failed, hr (0x%x)\n", hr));
return TRUE;
}
static UINT WINAPI vboxNetCfgWinPspFileCallback(
PVOID Context,
UINT Notification,
UINT_PTR Param1,
UINT_PTR Param2
)
{
switch (Notification)
{
case SPFILENOTIFY_TARGETNEWER:
case SPFILENOTIFY_TARGETEXISTS:
return TRUE;
}
return SetupDefaultQueueCallback(Context, Notification, Param1, Param2);
}
/* The original source of the VBoxNetAdp adapter creation/destruction code has the following copyright */
/*
Copyright 2004 by the Massachusetts Institute of Technology
All rights reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of the Massachusetts
Institute of Technology (M.I.T.) not be used in advertising or publicity
pertaining to distribution of the software without specific, written
prior permission.
M.I.T. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
M.I.T. BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
*/
/**
* Use the IShellFolder API to rename the connection.
*/
static HRESULT rename_shellfolder (PCWSTR wGuid, PCWSTR wNewName)
{
/* This is the GUID for the network connections folder. It is constant.
* {7007ACC7-3202-11D1-AAD2-00805FC1270E} */
const GUID CLSID_NetworkConnections = {
0x7007ACC7, 0x3202, 0x11D1, {
0xAA, 0xD2, 0x00, 0x80, 0x5F, 0xC1, 0x27, 0x0E
}
};
LPITEMIDLIST pidl = NULL;
IShellFolder *pShellFolder = NULL;
HRESULT hr;
/* Build the display name in the form "::{GUID}". */
if (wcslen(wGuid) >= MAX_PATH)
return E_INVALIDARG;
WCHAR szAdapterGuid[MAX_PATH + 2] = {0};
swprintf(szAdapterGuid, L"::%ls", wGuid);
/* Create an instance of the network connections folder. */
hr = CoCreateInstance(CLSID_NetworkConnections, NULL,
CLSCTX_INPROC_SERVER, IID_IShellFolder,
reinterpret_cast<LPVOID *>(&pShellFolder));
/* Parse the display name. */
if (SUCCEEDED (hr))
{
hr = pShellFolder->ParseDisplayName (NULL, NULL, szAdapterGuid, NULL,
&pidl, NULL);
}
if (SUCCEEDED (hr))
{
hr = pShellFolder->SetNameOf (NULL, pidl, wNewName, SHGDN_NORMAL,
&pidl);
}
CoTaskMemFree (pidl);
if (pShellFolder)
pShellFolder->Release();
return hr;
}
/**
* Loads a system DLL.
*
* @returns Module handle or NULL
* @param pszName The DLL name.
*/
static HMODULE loadSystemDll(const char *pszName)
{
char szPath[MAX_PATH];
UINT cchPath = GetSystemDirectoryA(szPath, sizeof(szPath));
size_t cbName = strlen(pszName) + 1;
if (cchPath + 1 + cbName > sizeof(szPath))
return NULL;
szPath[cchPath] = '\\';
memcpy(&szPath[cchPath + 1], pszName, cbName);
return LoadLibraryA(szPath);
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinRenameConnection (LPWSTR pGuid, PCWSTR NewName)
{
typedef HRESULT (WINAPI *lpHrRenameConnection) (const GUID *, PCWSTR);
lpHrRenameConnection RenameConnectionFunc = NULL;
HRESULT status;
/* First try the IShellFolder interface, which was unimplemented
* for the network connections folder before XP. */
status = rename_shellfolder (pGuid, NewName);
if (status == E_NOTIMPL)
{
/** @todo that code doesn't seem to work! */
/* The IShellFolder interface is not implemented on this platform.
* Try the (undocumented) HrRenameConnection API in the netshell
* library. */
CLSID clsid;
HINSTANCE hNetShell;
status = CLSIDFromString ((LPOLESTR) pGuid, &clsid);
if (FAILED(status))
return E_FAIL;
hNetShell = loadSystemDll("netshell.dll");
if (hNetShell == NULL)
return E_FAIL;
RenameConnectionFunc =
(lpHrRenameConnection) GetProcAddress (hNetShell,
"HrRenameConnection");
if (RenameConnectionFunc == NULL)
{
FreeLibrary (hNetShell);
return E_FAIL;
}
status = RenameConnectionFunc (&clsid, NewName);
FreeLibrary (hNetShell);
}
if (FAILED (status))
return status;
return S_OK;
}
#define DRIVERHWID _T("sun_VBoxNetAdp")
#define SetErrBreak(strAndArgs) \
if (1) { \
hrc = E_FAIL; \
NonStandardLog strAndArgs; \
bstrError = bstr_printf strAndArgs; \
break; \
} else do {} while (0)
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinRemoveHostOnlyNetworkInterface(IN const GUID *pGUID, OUT BSTR *pErrMsg)
{
HRESULT hrc = S_OK;
bstr_t bstrError;
do
{
TCHAR lszPnPInstanceId [512] = {0};
/* We have to find the device instance ID through a registry search */
HKEY hkeyNetwork = 0;
HKEY hkeyConnection = 0;
do
{
WCHAR strRegLocation [256];
WCHAR wszGuid[50];
int length = StringFromGUID2(*pGUID, wszGuid, RT_ELEMENTS(wszGuid));
if (!length)
SetErrBreak(("Failed to create a Guid string"));
swprintf (strRegLocation,
L"SYSTEM\\CurrentControlSet\\Control\\Network\\"
L"{4D36E972-E325-11CE-BFC1-08002BE10318}\\%s",
wszGuid);
LONG status;
status = RegOpenKeyExW (HKEY_LOCAL_MACHINE, strRegLocation, 0,
KEY_READ, &hkeyNetwork);
if ((status != ERROR_SUCCESS) || !hkeyNetwork)
SetErrBreak (("Host interface network is not found in registry (%S) [1]",
strRegLocation));
status = RegOpenKeyExW (hkeyNetwork, L"Connection", 0,
KEY_READ, &hkeyConnection);
if ((status != ERROR_SUCCESS) || !hkeyConnection)
SetErrBreak (("Host interface network is not found in registry (%S) [2]",
strRegLocation));
DWORD len = sizeof (lszPnPInstanceId);
DWORD dwKeyType;
status = RegQueryValueExW (hkeyConnection, L"PnPInstanceID", NULL,
&dwKeyType, (LPBYTE) lszPnPInstanceId, &len);
if ((status != ERROR_SUCCESS) || (dwKeyType != REG_SZ))
SetErrBreak (("Host interface network is not found in registry (%S) [3]",
strRegLocation));
}
while (0);
if (hkeyConnection)
RegCloseKey (hkeyConnection);
if (hkeyNetwork)
RegCloseKey (hkeyNetwork);
if (FAILED (hrc))
break;
/*
* Now we are going to enumerate all network devices and
* wait until we encounter the right device instance ID
*/
HDEVINFO hDeviceInfo = INVALID_HANDLE_VALUE;
do
{
BOOL ok;
GUID netGuid;
SP_DEVINFO_DATA DeviceInfoData;
DWORD index = 0;
BOOL found = FALSE;
DWORD size = 0;
/* initialize the structure size */
DeviceInfoData.cbSize = sizeof (SP_DEVINFO_DATA);
/* copy the net class GUID */
memcpy(&netGuid, &GUID_DEVCLASS_NET, sizeof (GUID_DEVCLASS_NET));
/* return a device info set contains all installed devices of the Net class */
hDeviceInfo = SetupDiGetClassDevs(&netGuid, NULL, NULL, DIGCF_PRESENT);
if (hDeviceInfo == INVALID_HANDLE_VALUE)
SetErrBreak(("SetupDiGetClassDevs failed (0x%08X)", GetLastError()));
/* enumerate the driver info list */
while (TRUE)
{
TCHAR *deviceHwid;
ok = SetupDiEnumDeviceInfo(hDeviceInfo, index, &DeviceInfoData);
if (!ok)
{
if (GetLastError() == ERROR_NO_MORE_ITEMS)
break;
else
{
index++;
continue;
}
}
/* try to get the hardware ID registry property */
ok = SetupDiGetDeviceRegistryProperty(hDeviceInfo,
&DeviceInfoData,
SPDRP_HARDWAREID,
NULL,
NULL,
0,
&size);
if (!ok)
{
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
index++;
continue;
}
deviceHwid = (TCHAR *) malloc(size);
ok = SetupDiGetDeviceRegistryProperty(hDeviceInfo,
&DeviceInfoData,
SPDRP_HARDWAREID,
NULL,
(PBYTE)deviceHwid,
size,
NULL);
if (!ok)
{
free(deviceHwid);
deviceHwid = NULL;
index++;
continue;
}
}
else
{
/* something is wrong. This shouldn't have worked with a NULL buffer */
index++;
continue;
}
for (TCHAR *t = deviceHwid;
t && *t && t < &deviceHwid[size / sizeof(TCHAR)];
t += _tcslen(t) + 1)
{
if (!_tcsicmp(DRIVERHWID, t))
{
/* get the device instance ID */
TCHAR devId[MAX_DEVICE_ID_LEN];
if (CM_Get_Device_ID(DeviceInfoData.DevInst,
devId, MAX_DEVICE_ID_LEN, 0) == CR_SUCCESS)
{
/* compare to what we determined before */
if (wcscmp(devId, lszPnPInstanceId) == 0)
{
found = TRUE;
break;
}
}
}
}
if (deviceHwid)
{
free (deviceHwid);
deviceHwid = NULL;
}
if (found)
break;
index++;
}
if (found == FALSE)
SetErrBreak (("Host Interface Network driver not found (0x%08X)",
GetLastError()));
ok = SetupDiSetSelectedDevice (hDeviceInfo, &DeviceInfoData);
if (!ok)
SetErrBreak (("SetupDiSetSelectedDevice failed (0x%08X)",
GetLastError()));
ok = SetupDiCallClassInstaller (DIF_REMOVE, hDeviceInfo, &DeviceInfoData);
if (!ok)
SetErrBreak (("SetupDiCallClassInstaller (DIF_REMOVE) failed (0x%08X)",
GetLastError()));
}
while (0);
/* clean up the device info set */
if (hDeviceInfo != INVALID_HANDLE_VALUE)
SetupDiDestroyDeviceInfoList (hDeviceInfo);
if (FAILED (hrc))
break;
}
while (0);
if (pErrMsg && bstrError.length())
*pErrMsg = bstrError.Detach();
return hrc;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinUpdateHostOnlyNetworkInterface(LPCWSTR pcsxwInf, BOOL *pbRebootRequired, LPCWSTR pcsxwId)
{
return VBoxDrvCfgDrvUpdate(pcsxwId, pcsxwInf, pbRebootRequired);
}
static const char *vboxNetCfgWinGetStateText(DWORD dwState)
{
switch (dwState)
{
case SERVICE_STOPPED: return "is not running";
case SERVICE_STOP_PENDING: return "is stopping";
case SERVICE_CONTINUE_PENDING: return "continue is pending";
case SERVICE_PAUSE_PENDING: return "pause is pending";
case SERVICE_PAUSED: return "is paused";
case SERVICE_RUNNING: return "is running";
case SERVICE_START_PENDING: return "is starting";
}
return "state is invalid";
}
static DWORD vboxNetCfgWinGetNetSetupState(SC_HANDLE hService)
{
SERVICE_STATUS status;
status.dwCurrentState = SERVICE_RUNNING;
if (hService) {
if (QueryServiceStatus(hService, &status))
NonStandardLogFlow(("NetSetupSvc %s\n", vboxNetCfgWinGetStateText(status.dwCurrentState)));
else
NonStandardLogFlow(("QueryServiceStatus failed (0x%x)\n", GetLastError()));
}
return status.dwCurrentState;
}
DECLINLINE(bool) vboxNetCfgWinIsNetSetupRunning(SC_HANDLE hService)
{
return vboxNetCfgWinGetNetSetupState(hService) == SERVICE_RUNNING;
}
DECLINLINE(bool) vboxNetCfgWinIsNetSetupStopped(SC_HANDLE hService)
{
return vboxNetCfgWinGetNetSetupState(hService) == SERVICE_STOPPED;
}
static HRESULT vboxNetCfgWinCreateHostOnlyNetworkInterface(IN LPCWSTR pInfPath, IN bool bIsInfPathFile,
OUT GUID *pGuid, OUT BSTR *lppszName, OUT BSTR *pErrMsg)
{
HRESULT hrc = S_OK;
HDEVINFO hDeviceInfo = INVALID_HANDLE_VALUE;
SP_DEVINFO_DATA DeviceInfoData;
PVOID pQueueCallbackContext = NULL;
DWORD ret = 0;
BOOL registered = FALSE;
BOOL destroyList = FALSE;
WCHAR pWCfgGuidString [50];
WCHAR DevName[256];
HKEY hkey = (HKEY)INVALID_HANDLE_VALUE;
bstr_t bstrError;
do
{
BOOL found = FALSE;
GUID netGuid;
SP_DRVINFO_DATA DriverInfoData;
SP_DEVINSTALL_PARAMS DeviceInstallParams;
TCHAR className [MAX_PATH];
DWORD index = 0;
PSP_DRVINFO_DETAIL_DATA pDriverInfoDetail;
/* for our purposes, 2k buffer is more
* than enough to obtain the hardware ID
* of the VBoxNetAdp driver. */
DWORD detailBuf [2048];
DWORD cbSize;
DWORD dwValueType;
/* initialize the structure size */
DeviceInfoData.cbSize = sizeof (SP_DEVINFO_DATA);
DriverInfoData.cbSize = sizeof (SP_DRVINFO_DATA);
/* copy the net class GUID */
memcpy(&netGuid, &GUID_DEVCLASS_NET, sizeof(GUID_DEVCLASS_NET));
/* create an empty device info set associated with the net class GUID */
hDeviceInfo = SetupDiCreateDeviceInfoList(&netGuid, NULL);
if (hDeviceInfo == INVALID_HANDLE_VALUE)
SetErrBreak (("SetupDiCreateDeviceInfoList failed (0x%08X)",
GetLastError()));
/* get the class name from GUID */
BOOL fResult = SetupDiClassNameFromGuid (&netGuid, className, MAX_PATH, NULL);
if (!fResult)
SetErrBreak (("SetupDiClassNameFromGuid failed (0x%08X)",
GetLastError()));
/* create a device info element and add the new device instance
* key to registry */
fResult = SetupDiCreateDeviceInfo (hDeviceInfo, className, &netGuid, NULL, NULL,
DICD_GENERATE_ID, &DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiCreateDeviceInfo failed (0x%08X)",
GetLastError()));
/* select the newly created device info to be the currently
selected member */
fResult = SetupDiSetSelectedDevice (hDeviceInfo, &DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiSetSelectedDevice failed (0x%08X)",
GetLastError()));
if (pInfPath)
{
/* get the device install parameters and disable filecopy */
DeviceInstallParams.cbSize = sizeof(SP_DEVINSTALL_PARAMS);
fResult = SetupDiGetDeviceInstallParams (hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (fResult)
{
memset(DeviceInstallParams.DriverPath, 0, sizeof(DeviceInstallParams.DriverPath));
size_t pathLenght = wcslen(pInfPath) + 1/* null terminator */;
if (pathLenght < sizeof(DeviceInstallParams.DriverPath)/sizeof(DeviceInstallParams.DriverPath[0]))
{
memcpy(DeviceInstallParams.DriverPath, pInfPath, pathLenght*sizeof(DeviceInstallParams.DriverPath[0]));
if (bIsInfPathFile)
{
DeviceInstallParams.Flags |= DI_ENUMSINGLEINF;
}
fResult = SetupDiSetDeviceInstallParams(hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (!fResult)
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("SetupDiSetDeviceInstallParams failed, winEr (%d)\n", winEr));
break;
}
}
else
{
NonStandardLogFlow(("SetupDiSetDeviceInstallParams faileed: INF path is too long\n"));
break;
}
}
else
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("SetupDiGetDeviceInstallParams failed, winEr (%d)\n", winEr));
}
}
/* build a list of class drivers */
fResult = SetupDiBuildDriverInfoList (hDeviceInfo, &DeviceInfoData,
SPDIT_CLASSDRIVER);
if (!fResult)
SetErrBreak (("SetupDiBuildDriverInfoList failed (0x%08X)",
GetLastError()));
destroyList = TRUE;
/* enumerate the driver info list */
while (TRUE)
{
BOOL ret;
ret = SetupDiEnumDriverInfo (hDeviceInfo, &DeviceInfoData,
SPDIT_CLASSDRIVER, index, &DriverInfoData);
/* if the function failed and GetLastError() returned
* ERROR_NO_MORE_ITEMS, then we have reached the end of the
* list. Otherwise there was something wrong with this
* particular driver. */
if (!ret)
{
if (GetLastError() == ERROR_NO_MORE_ITEMS)
break;
else
{
index++;
continue;
}
}
pDriverInfoDetail = (PSP_DRVINFO_DETAIL_DATA) detailBuf;
pDriverInfoDetail->cbSize = sizeof(SP_DRVINFO_DETAIL_DATA);
/* if we successfully find the hardware ID and it turns out to
* be the one for the loopback driver, then we are done. */
if (SetupDiGetDriverInfoDetail (hDeviceInfo,
&DeviceInfoData,
&DriverInfoData,
pDriverInfoDetail,
sizeof (detailBuf),
NULL))
{
TCHAR * t;
/* pDriverInfoDetail->HardwareID is a MULTISZ string. Go through the
* whole list and see if there is a match somewhere. */
t = pDriverInfoDetail->HardwareID;
while (t && *t && t < (TCHAR *) &detailBuf [RT_ELEMENTS(detailBuf)])
{
if (!_tcsicmp(t, DRIVERHWID))
break;
t += _tcslen(t) + 1;
}
if (t && *t && t < (TCHAR *) &detailBuf [RT_ELEMENTS(detailBuf)])
{
found = TRUE;
break;
}
}
index ++;
}
if (!found)
SetErrBreak(("Could not find Host Interface Networking driver! Please reinstall"));
/* set the loopback driver to be the currently selected */
fResult = SetupDiSetSelectedDriver (hDeviceInfo, &DeviceInfoData,
&DriverInfoData);
if (!fResult)
SetErrBreak(("SetupDiSetSelectedDriver failed (0x%08X)",
GetLastError()));
/* register the phantom device to prepare for install */
fResult = SetupDiCallClassInstaller (DIF_REGISTERDEVICE, hDeviceInfo,
&DeviceInfoData);
if (!fResult)
{
DWORD err = GetLastError();
SetErrBreak (("SetupDiCallClassInstaller failed (0x%08X)",
err));
}
/* registered, but remove if errors occur in the following code */
registered = TRUE;
/* ask the installer if we can install the device */
fResult = SetupDiCallClassInstaller (DIF_ALLOW_INSTALL, hDeviceInfo,
&DeviceInfoData);
if (!fResult)
{
if (GetLastError() != ERROR_DI_DO_DEFAULT)
SetErrBreak (("SetupDiCallClassInstaller (DIF_ALLOW_INSTALL) failed (0x%08X)",
GetLastError()));
/* that's fine */
}
/* get the device install parameters and disable filecopy */
DeviceInstallParams.cbSize = sizeof(SP_DEVINSTALL_PARAMS);
fResult = SetupDiGetDeviceInstallParams (hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (fResult)
{
pQueueCallbackContext = SetupInitDefaultQueueCallback(NULL);
if (pQueueCallbackContext)
{
DeviceInstallParams.InstallMsgHandlerContext = pQueueCallbackContext;
DeviceInstallParams.InstallMsgHandler = (PSP_FILE_CALLBACK)vboxNetCfgWinPspFileCallback;
fResult = SetupDiSetDeviceInstallParams (hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (!fResult)
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("SetupDiSetDeviceInstallParams failed, winEr (%d)\n", winEr));
}
Assert(fResult);
}
else
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("SetupInitDefaultQueueCallback failed, winEr (%d)\n", winEr));
}
}
else
{
DWORD winEr = GetLastError();
NonStandardLogFlow(("SetupDiGetDeviceInstallParams failed, winEr (%d)\n", winEr));
}
/* install the files first */
fResult = SetupDiCallClassInstaller (DIF_INSTALLDEVICEFILES, hDeviceInfo,
&DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiCallClassInstaller (DIF_INSTALLDEVICEFILES) failed (0x%08X)",
GetLastError()));
/* get the device install parameters and disable filecopy */
DeviceInstallParams.cbSize = sizeof(SP_DEVINSTALL_PARAMS);
fResult = SetupDiGetDeviceInstallParams (hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (fResult)
{
DeviceInstallParams.Flags |= DI_NOFILECOPY;
fResult = SetupDiSetDeviceInstallParams(hDeviceInfo, &DeviceInfoData,
&DeviceInstallParams);
if (!fResult)
SetErrBreak (("SetupDiSetDeviceInstallParams failed (0x%08X)",
GetLastError()));
}
/*
* Register any device-specific co-installers for this device,
*/
fResult = SetupDiCallClassInstaller(DIF_REGISTER_COINSTALLERS,
hDeviceInfo,
&DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiCallClassInstaller (DIF_REGISTER_COINSTALLERS) failed (0x%08X)",
GetLastError()));
/*
* install any installer-specified interfaces.
* and then do the real install
*/
fResult = SetupDiCallClassInstaller(DIF_INSTALLINTERFACES,
hDeviceInfo,
&DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiCallClassInstaller (DIF_INSTALLINTERFACES) failed (0x%08X)",
GetLastError()));
fResult = SetupDiCallClassInstaller(DIF_INSTALLDEVICE,
hDeviceInfo,
&DeviceInfoData);
if (!fResult)
SetErrBreak (("SetupDiCallClassInstaller (DIF_INSTALLDEVICE) failed (0x%08X)",
GetLastError()));
/* Query the instance ID; on Windows 10, the registry key may take a short
* while to appear. Microsoft recommends waiting for up to 5 seconds, but
* we want to be on the safe side, so let's wait for 20 seconds. Waiting
* longer is harmful as network setup service will shut down after a period
* of inactivity.
*/
for (int retries = 0; retries < 2 * 20; ++retries)
{
Sleep(500); /* half second */
/* Figure out NetCfgInstanceId */
hkey = SetupDiOpenDevRegKey(hDeviceInfo,
&DeviceInfoData,
DICS_FLAG_GLOBAL,
0,
DIREG_DRV,
KEY_READ);
if (hkey == INVALID_HANDLE_VALUE)
break;
cbSize = sizeof(pWCfgGuidString);
ret = RegQueryValueExW (hkey, L"NetCfgInstanceId", NULL,
&dwValueType, (LPBYTE) pWCfgGuidString, &cbSize);
/* As long as the return code is FILE_NOT_FOUND, sleep and retry. */
if (ret != ERROR_FILE_NOT_FOUND)
break;
RegCloseKey (hkey);
hkey = (HKEY)INVALID_HANDLE_VALUE;
}
if (ret == ERROR_FILE_NOT_FOUND)
{
hrc = E_ABORT;
break;
}
/*
* We need to check 'hkey' after we check 'ret' to distinguish the case
* of failed SetupDiOpenDevRegKey from the case when we timed out.
*/
if (hkey == INVALID_HANDLE_VALUE)
SetErrBreak(("SetupDiOpenDevRegKey failed (0x%08X)", GetLastError()));
if (ret != ERROR_SUCCESS)
SetErrBreak(("Querying NetCfgInstanceId failed (0x%08X)", ret));
NET_LUID luid;
HRESULT hSMRes = vboxNetCfgWinGetInterfaceLUID(hkey, &luid);
/* Close the key as soon as possible. See @bugref{7973}. */
RegCloseKey (hkey);
hkey = (HKEY)INVALID_HANDLE_VALUE;
if (FAILED(hSMRes))
{
/*
* The setting of Metric is not very important functionality,
* So we will not break installation process due to this error.
*/
NonStandardLogFlow(("vboxNetCfgWinCreateHostOnlyNetworkInterface Warning! "
"vboxNetCfgWinGetInterfaceLUID failed, default metric "
"for new interface will not be set, hr (0x%x)\n", hSMRes));
}
else
{
/*
* Set default metric value of interface to fix multicast issue
* See @bugref{6379} for details.
*/
hSMRes = vboxNetCfgWinSetupMetric(&luid);
if (FAILED(hSMRes))
{
/*
* The setting of Metric is not very important functionality,
* So we will not break installation process due to this error.
*/
NonStandardLogFlow(("vboxNetCfgWinCreateHostOnlyNetworkInterface Warning! "
"vboxNetCfgWinSetupMetric failed, default metric "
"for new interface will not be set, hr (0x%x)\n", hSMRes));
}
}
#ifndef VBOXNETCFG_DELAYEDRENAME
/*
* We need to query the device name after we have succeeded in querying its
* instance ID to avoid similar waiting-and-retrying loop (see @bugref{7973}).
*/
if (!SetupDiGetDeviceRegistryPropertyW(hDeviceInfo, &DeviceInfoData,
SPDRP_FRIENDLYNAME , /* IN DWORD Property,*/
NULL, /*OUT PDWORD PropertyRegDataType, OPTIONAL*/
(PBYTE)DevName, /*OUT PBYTE PropertyBuffer,*/
sizeof(DevName), /* IN DWORD PropertyBufferSize,*/
NULL /*OUT PDWORD RequiredSize OPTIONAL*/))
{
int err = GetLastError();
if (err != ERROR_INVALID_DATA)
{
SetErrBreak (("SetupDiGetDeviceRegistryProperty failed (0x%08X)",
err));
}
if (!SetupDiGetDeviceRegistryPropertyW(hDeviceInfo, &DeviceInfoData,
SPDRP_DEVICEDESC, /* IN DWORD Property,*/
NULL, /*OUT PDWORD PropertyRegDataType, OPTIONAL*/
(PBYTE)DevName, /*OUT PBYTE PropertyBuffer,*/
sizeof(DevName), /* IN DWORD PropertyBufferSize,*/
NULL /*OUT PDWORD RequiredSize OPTIONAL*/
))
{
err = GetLastError();
SetErrBreak (("SetupDiGetDeviceRegistryProperty failed (0x%08X)",
err));
}
}
#else /* !VBOXNETCFG_DELAYEDRENAME */
/* Re-use DevName for device instance id retrieval. */
if (!SetupDiGetDeviceInstanceId(hDeviceInfo, &DeviceInfoData, DevName, RT_ELEMENTS(DevName), &cbSize))
SetErrBreak (("SetupDiGetDeviceInstanceId failed (0x%08X)",
GetLastError()));
#endif /* !VBOXNETCFG_DELAYEDRENAME */
}
while (0);
/*
* cleanup
*/
if (hkey != INVALID_HANDLE_VALUE)
RegCloseKey (hkey);
if (pQueueCallbackContext)
SetupTermDefaultQueueCallback(pQueueCallbackContext);
if (hDeviceInfo != INVALID_HANDLE_VALUE)
{
/* an error has occurred, but the device is registered, we must remove it */
if (ret != 0 && registered)
SetupDiCallClassInstaller(DIF_REMOVE, hDeviceInfo, &DeviceInfoData);
SetupDiDeleteDeviceInfo(hDeviceInfo, &DeviceInfoData);
/* destroy the driver info list */
if (destroyList)
SetupDiDestroyDriverInfoList(hDeviceInfo, &DeviceInfoData,
SPDIT_CLASSDRIVER);
/* clean up the device info set */
SetupDiDestroyDeviceInfoList (hDeviceInfo);
}
/* return the network connection GUID on success */
if (SUCCEEDED(hrc))
{
HRESULT hr;
INetCfg *pNetCfg = NULL;
LPWSTR lpszApp = NULL;
#ifndef VBOXNETCFG_DELAYEDRENAME
WCHAR ConnectionName[128];
ULONG cbName = sizeof(ConnectionName);
hr = VBoxNetCfgWinGenHostonlyConnectionName(DevName, ConnectionName, &cbName);
if (SUCCEEDED(hr))
hr = VBoxNetCfgWinRenameConnection(pWCfgGuidString, ConnectionName);
#endif
if (lppszName)
{
*lppszName = SysAllocString((const OLECHAR *) DevName);
if (!*lppszName)
{
NonStandardLogFlow(("SysAllocString failed\n"));
hrc = HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY);
}
}
if (pGuid)
{
hrc = CLSIDFromString(pWCfgGuidString, (LPCLSID)pGuid);
if (FAILED(hrc))
NonStandardLogFlow(("CLSIDFromString failed, hrc (0x%x)\n", hrc));
}
hr = VBoxNetCfgWinQueryINetCfg(&pNetCfg, TRUE, L"VirtualBox Host-Only Creation",
30 * 1000, /* on Vista we often get 6to4svc.dll holding the lock, wait for 30 sec. */
/** @todo special handling for 6to4svc.dll ???, i.e. several retrieves */
&lpszApp);
if (hr == S_OK)
{
hr = vboxNetCfgWinEnumNetCfgComponents(pNetCfg,
&GUID_DEVCLASS_NETSERVICE,
vboxNetCfgWinAdjustHostOnlyNetworkInterfacePriority,
pGuid);
if (SUCCEEDED(hr))
{
hr = vboxNetCfgWinEnumNetCfgComponents(pNetCfg,
&GUID_DEVCLASS_NETTRANS,
vboxNetCfgWinAdjustHostOnlyNetworkInterfacePriority,
pGuid);
if (SUCCEEDED(hr))
hr = vboxNetCfgWinEnumNetCfgComponents(pNetCfg,
&GUID_DEVCLASS_NETCLIENT,
vboxNetCfgWinAdjustHostOnlyNetworkInterfacePriority,
pGuid);
}
if (SUCCEEDED(hr))
{
hr = pNetCfg->Apply();
}
else
NonStandardLogFlow(("Enumeration failed, hr 0x%x\n", hr));
VBoxNetCfgWinReleaseINetCfg(pNetCfg, TRUE);
}
else if (hr == NETCFG_E_NO_WRITE_LOCK && lpszApp)
{
NonStandardLogFlow(("Application %ws is holding the lock, failed\n", lpszApp));
CoTaskMemFree(lpszApp);
}
else
NonStandardLogFlow(("VBoxNetCfgWinQueryINetCfg failed, hr 0x%x\n", hr));
}
if (pErrMsg && bstrError.length())
*pErrMsg = bstrError.Detach();
return hrc;
}
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinCreateHostOnlyNetworkInterface(IN LPCWSTR pInfPath, IN bool bIsInfPathFile,
OUT GUID *pGuid, OUT BSTR *lppszName, OUT BSTR *pErrMsg)
{
HRESULT hrc = vboxNetCfgWinCreateHostOnlyNetworkInterface(pInfPath, bIsInfPathFile, pGuid, lppszName, pErrMsg);
if (hrc == E_ABORT)
{
NonStandardLogFlow(("Timed out while waiting for NetCfgInstanceId, try again immediately...\n"));
/*
* This is the first time we fail to obtain NetCfgInstanceId, let us
* retry it once. It is needed to handle the situation when network
* setup fails to recognize the arrival of our device node while it
* is busy removing another host-only interface, and it gets stuck
* with no matching network interface created for our device node.
* See @bugref{7973} for details.
*/
hrc = vboxNetCfgWinCreateHostOnlyNetworkInterface(pInfPath, bIsInfPathFile, pGuid, lppszName, pErrMsg);
if (hrc == E_ABORT)
{
NonStandardLogFlow(("Timed out again while waiting for NetCfgInstanceId, try again after a while...\n"));
/*
* This is the second time we fail to obtain NetCfgInstanceId, let us
* retry it once more. This time we wait to network setup service
* to go down before retrying. Hopefully it will resolve all error
* conditions. See @bugref{7973} for details.
*/
SC_HANDLE hSCM = NULL;
SC_HANDLE hService = NULL;
hSCM = OpenSCManager(NULL, NULL, GENERIC_READ);
if (hSCM)
{
hService = OpenService(hSCM, _T("NetSetupSvc"), GENERIC_READ);
if (hService)
{
for (int retries = 0; retries < 60 && !vboxNetCfgWinIsNetSetupStopped(hService); ++retries)
Sleep(1000);
CloseServiceHandle(hService);
hrc = vboxNetCfgWinCreateHostOnlyNetworkInterface(pInfPath, bIsInfPathFile, pGuid, lppszName, pErrMsg);
}
else
NonStandardLogFlow(("OpenService failed (0x%x)\n", GetLastError()));
CloseServiceHandle(hSCM);
}
else
NonStandardLogFlow(("OpenSCManager failed (0x%x)", GetLastError()));
/* Give up and report the error. */
if (hrc == E_ABORT)
{
if (pErrMsg)
{
bstr_t bstrError = bstr_printf("Querying NetCfgInstanceId failed (0x%08X)", ERROR_FILE_NOT_FOUND);
*pErrMsg = bstrError.Detach();
}
hrc = E_FAIL;
}
}
}
return hrc;
}
HRESULT vboxLoadIpHelpFunctions(HINSTANCE& pIpHlpInstance)
{
Assert(pIpHlpInstance != NULL);
pIpHlpInstance = loadSystemDll("Iphlpapi.dll");
if (pIpHlpInstance == NULL)
return E_FAIL;
g_pfnInitializeIpInterfaceEntry =
(PFNINITIALIZEIPINTERFACEENTRY)GetProcAddress(pIpHlpInstance, "InitializeIpInterfaceEntry");
Assert(g_pfnInitializeIpInterfaceEntry);
if (g_pfnInitializeIpInterfaceEntry)
{
g_pfnGetIpInterfaceEntry =
(PFNGETIPINTERFACEENTRY)GetProcAddress(pIpHlpInstance, "GetIpInterfaceEntry");
Assert(g_pfnGetIpInterfaceEntry);
}
if (g_pfnGetIpInterfaceEntry)
{
g_pfnSetIpInterfaceEntry =
(PFNSETIPINTERFACEENTRY)GetProcAddress(pIpHlpInstance, "SetIpInterfaceEntry");
Assert(g_pfnSetIpInterfaceEntry);
}
if (g_pfnInitializeIpInterfaceEntry == NULL)
{
FreeLibrary(pIpHlpInstance);
pIpHlpInstance = NULL;
return E_FAIL;
}
return S_OK;
}
HRESULT vboxNetCfgWinGetLoopbackMetric(OUT int* Metric)
{
HRESULT rc = S_OK;
MIB_IPINTERFACE_ROW row;
Assert(g_pfnInitializeIpInterfaceEntry != NULL);
Assert(g_pfnGetIpInterfaceEntry != NULL);
g_pfnInitializeIpInterfaceEntry(&row);
row.Family = AF_INET;
row.InterfaceLuid.Info.IfType = IF_TYPE_SOFTWARE_LOOPBACK;
rc = g_pfnGetIpInterfaceEntry(&row);
if (rc != NO_ERROR)
return HRESULT_FROM_WIN32(rc);
*Metric = row.Metric;
return rc;
}
HRESULT vboxNetCfgWinSetInterfaceMetric(
IN NET_LUID* pInterfaceLuid,
IN DWORD metric)
{
MIB_IPINTERFACE_ROW newRow;
Assert(g_pfnInitializeIpInterfaceEntry != NULL);
Assert(g_pfnSetIpInterfaceEntry != NULL);
g_pfnInitializeIpInterfaceEntry(&newRow);
// identificate the interface to change
newRow.InterfaceLuid = *pInterfaceLuid;
newRow.Family = AF_INET;
// changed settings
newRow.UseAutomaticMetric = false;
newRow.Metric = metric;
// change settings
return HRESULT_FROM_WIN32(g_pfnSetIpInterfaceEntry(&newRow));
}
HRESULT vboxNetCfgWinGetInterfaceLUID(IN HKEY hKey, OUT NET_LUID* pLUID)
{
HRESULT res = S_OK;
DWORD luidIndex = 0;
DWORD ifType = 0;
DWORD cbSize = sizeof(luidIndex);
DWORD dwValueType = REG_DWORD;
if (pLUID == NULL)
return E_INVALIDARG;
res = RegQueryValueExW(hKey, L"NetLuidIndex", NULL,
&dwValueType, (LPBYTE)&luidIndex, &cbSize);
if (res != 0)
return HRESULT_FROM_WIN32(res);
cbSize = sizeof(ifType);
dwValueType = REG_DWORD;
res = RegQueryValueExW(hKey, L"*IfType", NULL,
&dwValueType, (LPBYTE)&ifType, &cbSize);
if (res != 0)
return HRESULT_FROM_WIN32(res);
ZeroMemory(pLUID, sizeof(NET_LUID));
pLUID->Info.IfType = ifType;
pLUID->Info.NetLuidIndex = luidIndex;
return res;
}
HRESULT vboxNetCfgWinSetupMetric(IN NET_LUID* pLuid)
{
HINSTANCE hModule = NULL;
HRESULT rc = vboxLoadIpHelpFunctions(hModule);
if (SUCCEEDED(rc))
{
int loopbackMetric;
rc = vboxNetCfgWinGetLoopbackMetric(&loopbackMetric);
if (SUCCEEDED(rc))
rc = vboxNetCfgWinSetInterfaceMetric(pLuid, loopbackMetric - 1);
}
g_pfnInitializeIpInterfaceEntry = NULL;
g_pfnSetIpInterfaceEntry = NULL;
g_pfnGetIpInterfaceEntry = NULL;
FreeLibrary(hModule);
return rc;
}
#ifdef VBOXNETCFG_DELAYEDRENAME
VBOXNETCFGWIN_DECL(HRESULT) VBoxNetCfgWinRenameHostOnlyConnection(IN const GUID *pGuid, IN LPCWSTR pwszId, OUT BSTR *pDevName)
{
HRESULT hr = S_OK;
WCHAR wszDevName[256];
WCHAR wszConnectionNewName[128];
ULONG cbName = sizeof(wszConnectionNewName);
HDEVINFO hDevInfo = SetupDiCreateDeviceInfoList(&GUID_DEVCLASS_NET, NULL);
if (hDevInfo != INVALID_HANDLE_VALUE)
{
SP_DEVINFO_DATA DevInfoData;
DevInfoData.cbSize = sizeof(SP_DEVINFO_DATA);
if (SetupDiOpenDeviceInfo(hDevInfo, pwszId, NULL, 0, &DevInfoData))
{
DWORD err = ERROR_SUCCESS;
if (!SetupDiGetDeviceRegistryPropertyW(hDevInfo, &DevInfoData,
SPDRP_FRIENDLYNAME, NULL,
(PBYTE)wszDevName, RT_ELEMENTS(wszDevName), NULL))
{
err = GetLastError();
if (err == ERROR_INVALID_DATA)
{
err = SetupDiGetDeviceRegistryPropertyW(hDevInfo, &DevInfoData,
SPDRP_DEVICEDESC, NULL,
(PBYTE)wszDevName, RT_ELEMENTS(wszDevName), NULL)
? ERROR_SUCCESS
: GetLastError();
}
}
if (err == ERROR_SUCCESS)
{
hr = VBoxNetCfgWinGenHostonlyConnectionName(wszDevName, wszConnectionNewName, &cbName);
if (SUCCEEDED(hr))
{
WCHAR wszGuid[50];
int cbWGuid = StringFromGUID2(*pGuid, wszGuid, RT_ELEMENTS(wszGuid));
if (cbWGuid)
{
hr = VBoxNetCfgWinRenameConnection(wszGuid, wszConnectionNewName);
if (FAILED(hr))
NonStandardLogFlow(("VBoxNetCfgWinRenameHostOnlyConnection: VBoxNetCfgWinRenameConnection failed (0x%x)\n", hr));
}
else
{
err = GetLastError();
hr = HRESULT_FROM_WIN32(err);
if (SUCCEEDED(hr))
hr = E_FAIL;
NonStandardLogFlow(("StringFromGUID2 failed err=%u, hr=0x%x\n", err, hr));
}
}
else
NonStandardLogFlow(("VBoxNetCfgWinRenameHostOnlyConnection: VBoxNetCfgWinGenHostonlyConnectionName failed (0x%x)\n", hr));
if (SUCCEEDED(hr) && pDevName)
{
*pDevName = SysAllocString((const OLECHAR *)wszDevName);
if (!*pDevName)
{
NonStandardLogFlow(("SysAllocString failed\n"));
hr = HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY);
}
}
}
else
{
hr = HRESULT_FROM_WIN32(err);
NonStandardLogFlow(("VBoxNetCfgWinRenameHostOnlyConnection: SetupDiGetDeviceRegistryPropertyW failed (0x%x)\n", err));
}
}
else
{
DWORD err = GetLastError();
hr = HRESULT_FROM_WIN32(err);
NonStandardLogFlow(("VBoxNetCfgWinRenameHostOnlyConnection: SetupDiOpenDeviceInfo failed (0x%x)\n", err));
}
SetupDiDestroyDeviceInfoList(hDevInfo);
}
return hr;
}
#endif /* VBOXNETCFG_DELAYEDRENAME */
#undef SetErrBreak
| 72,398 |
538 | <gh_stars>100-1000
from vint.ast.node_type import NodeType
from typing import Dict, Any, Optional # noqa: F401
import re
CONFIG_COMMENT_PATTERN = re.compile(r'^\s*vint:\s*')
POLICY_SWITCH_PATTERN = re.compile(r'(?:^|\s)[-+]\S+')
NEXT_LINE_FLAG_PATTERN = 'next-line'
class ConfigComment:
def __init__(self, config_dict, is_only_next_line):
# type: (Dict[str, Any], bool) -> None
self.config_dict = config_dict
self.is_only_next_line = is_only_next_line
def parse_config_comment_node_if_exists(node):
# type: (Dict[str, Any]) -> Optional[ConfigComment]
if NodeType(node['type']) is not NodeType.COMMENT:
return None
comment_node = node
comment_content = comment_node['str']
if not is_config_comment(comment_content):
return None
return parse_config_comment(comment_content)
def parse_config_comment(comment_content):
# type: (str) -> Optional[ConfigComment]
if not is_config_comment(comment_content):
return None
striped_comment_content = CONFIG_COMMENT_PATTERN.sub('', comment_content)
policy_switches = [policy_switch.strip() for policy_switch in POLICY_SWITCH_PATTERN.findall(striped_comment_content)]
is_only_next_line = NEXT_LINE_FLAG_PATTERN in striped_comment_content
policies = {}
for policy_switch in policy_switches:
policy_name = policy_switch[1:]
is_enabling_switch = policy_switch[0] == '+'
policies[policy_name] = {
'enabled': is_enabling_switch
}
return ConfigComment(
config_dict={'policies': policies},
is_only_next_line=is_only_next_line
)
def is_config_comment(comment_content):
# type: (str) -> bool
return CONFIG_COMMENT_PATTERN.match(comment_content) is not None
| 710 |
1,056 | <filename>ide/xml.text/src/org/netbeans/modules/xml/text/completion/ElementResultItem.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.xml.text.completion;
import java.awt.Color;
import java.beans.BeanInfo;
import java.net.URL;
import javax.swing.Action;
import org.netbeans.modules.xml.api.model.*;
import javax.swing.text.JTextComponent;
import javax.swing.text.Caret;
import org.netbeans.spi.editor.completion.CompletionDocumentation;
import org.netbeans.spi.editor.completion.CompletionResultSet;
import org.netbeans.spi.editor.completion.CompletionTask;
import org.netbeans.spi.editor.completion.support.AsyncCompletionTask;
import org.netbeans.swing.plaf.LFCustoms;
/**
* Represent element name (or its part for namespace prefix).
*
* @author sands
* @author <NAME>
*/
class ElementResultItem extends XMLResultItem {
private static final Color COLOR = new Color(64, 64, 255);
// does it represent start element name?
// then there is more possibilities how to complete it
private final boolean startElement;
private final boolean empty;
private GrammarResult res;
/**
* Create a start element result item.
*/
public ElementResultItem(int position, GrammarResult res){
super(position, res.getNodeName());
this.res = res;
foreground = LFCustoms.shiftColor(COLOR);
startElement = true;
empty = res.isEmptyElement();
icon = res.getIcon(BeanInfo.ICON_COLOR_16x16);
}
/**
* Create an end element result item.
*/
public ElementResultItem(int position, String name) {
super(position, name);
foreground = LFCustoms.shiftColor(COLOR);
startElement = false;
empty = false;
}
/**
* Replacenment text can be cutomized to retun pairs, empty tag or
* just name of element.
*/
public String getReplacementText(int modifiers) {
boolean shift = (modifiers & java.awt.event.InputEvent.SHIFT_MASK) != 0;
if (shift && startElement) {
if (empty) {
return displayText + "/>";
} else {
return displayText + ">";
}
} else if (startElement) {
return displayText;
} else {
return displayText + '>';
}
}
@Override
public CompletionTask createDocumentationTask() {
return doCreateDocumentationTask(res);
}
/**
* If called with <code>SHIFT_MASK</code> modified it createa a start tag and
* end tag pair and place caret between them.
*/
public boolean substituteText( JTextComponent c, int offset, int len, int modifiers ){
String replacementText = getReplacementText(modifiers);
replaceText(c, replacementText, offset, len);
boolean shift = (modifiers & java.awt.event.InputEvent.SHIFT_MASK) != 0;
if (shift && startElement) {
Caret caret = c.getCaret(); // it is at the end of replacement
int dot = caret.getDot();
int rlen = replacementText.length();
if (empty) {
caret.setDot((dot - rlen) + replacementText.indexOf('/'));
}
}
return false;
}
/**
* @deprecated we use startElement flag
*/
// static class EndTag extends ElementResultItem {
// }
Color getPaintColor() { return LFCustoms.shiftColor(COLOR); }
}
| 1,596 |
1,484 | {"data": [{"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.13-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14330", "Metadata": {"NVD": {"CVSSv2": {"Score": 2.1, "Vectors": "AV:L/AC:L/Au:N/C:P/I:N/A:N"}}}, "Name": "CVE-2020-14330", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.13-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14332", "Metadata": {"NVD": {"CVSSv2": {"Score": 2.1, "Vectors": "AV:L/AC:L/Au:N/C:P/I:N/A:N"}}}, "Name": "CVE-2020-14332", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.13-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14365", "Metadata": {"NVD": {"CVSSv2": {"Score": 6.6, "Vectors": "AV:L/AC:L/Au:N/C:N/I:C/A:C"}}}, "Name": "CVE-2020-14365", "NamespaceName": "alpine:3.13", "Severity": "Medium"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-3828", "Metadata": {"NVD": {"CVSSv2": {"Score": 3.3, "Vectors": "AV:L/AC:M/Au:N/C:P/I:P/A:N"}}}, "Name": "CVE-2019-3828", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-1733", "Metadata": {"NVD": {"CVSSv2": {"Score": 3.7, "Vectors": "AV:L/AC:H/Au:N/C:P/I:P/A:P"}}}, "Name": "CVE-2020-1733", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-1739", "Metadata": {"NVD": {"CVSSv2": {"Score": 3.3, "Vectors": "AV:L/AC:M/Au:N/C:P/I:P/A:N"}}}, "Name": "CVE-2020-1739", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-1740", "Metadata": {"NVD": {"CVSSv2": {"Score": 1.9, "Vectors": "AV:L/AC:M/Au:N/C:P/I:N/A:N"}}}, "Name": "CVE-2020-1740", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-1746", "Metadata": {"NVD": {"CVSSv2": {"Score": 1.9, "Vectors": "AV:L/AC:M/Au:N/C:P/I:N/A:N"}}}, "Name": "CVE-2020-1746", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.7-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10691", "Metadata": {"NVD": {"CVSSv2": {"Score": 3.6, "Vectors": "AV:L/AC:L/Au:N/C:N/I:P/A:P"}}}, "Name": "CVE-2020-10691", "NamespaceName": "alpine:3.13", "Severity": "Low"}}, {"Vulnerability": {"CVSS": [], "Description": "", "FixedIn": [{"Name": "ansible-base", "NamespaceName": "alpine:3.13", "Version": "2.9.6-r0", "VersionFormat": "apk"}], "Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-1737", "Metadata": {"NVD": {"CVSSv2": {"Score": 4.6, "Vectors": "AV:L/AC:L/Au:N/C:P/I:P/A:P"}}}, "Name": "CVE-2020-1737", "NamespaceName": "alpine:3.13", "Severity": "Medium"}}], "next_token": ""} | 1,852 |
852 | #ifndef SurveyInputTextReader_h
#define SurveyInputTextReader_h
//
// Class to read in survey data from text file
//
// The format of the file is assumed to be:
//
// DetId AlignableObjectId dx sigma_x dy sigma_y dz sigma_z angle_x sigma(angle_x) ...
// angle_y sigma(angle_y) angle_z sigma(angle_z)
// Where all numbers are floats, except DetId which is an unsigned integer
//
// The result is a map of UniqueIds and corresponding SurveyInfo
//
#include "Alignment/CommonAlignment/interface/StructureType.h"
#include "Alignment/CommonAlignment/interface/Utilities.h"
class SurveyInputTextReader {
public:
typedef std::pair<align::ID, align::StructureType> UniqueId;
typedef std::map<UniqueId, align::Scalars> MapType;
typedef std::pair<UniqueId, align::Scalars> PairType;
/// Read given text file
void readFile(const std::string& textFileName);
// Returns the Map
const MapType& UniqueIdMap() const { return theMap; }
private:
MapType theMap;
static const int NINPUTS = 27; // Not including DetId
};
#endif
| 334 |
939 | /* Copyright 2021 Google LLC. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "research/carls/gradient_descent/gradient_descent_optimizer.h"
#include "research/carls/embedding.pb.h" // proto to pb
namespace carls {
namespace {
// Returns a 1-D zero tensor with given dimension.
EmbeddingVectorProto InitTensor(const int dimension, const float init_value) {
CHECK_GT(dimension, 0);
EmbeddingVectorProto result;
result.mutable_value()->Reserve(dimension);
for (int i = 0; i < dimension; ++i) {
result.add_value(init_value);
}
return result;
}
} // namespace
// Static
std::unique_ptr<GradientDescentOptimizer> GradientDescentOptimizer::Create(
const int embedding_dimension, const GradientDescentConfig& config) {
if (config.optimizer_case() == GradientDescentConfig::OPTIMIZER_NOT_SET) {
LOG(ERROR) << "Optimizer is not set.";
return nullptr;
}
if (embedding_dimension <= 0) {
LOG(ERROR) << "Invalid embedding_dimension: " << embedding_dimension;
return nullptr;
}
// Checks learning rate.
if (config.learning_rate() <= 0) {
LOG(ERROR) << "Invalid learning rate: " << config.learning_rate();
return nullptr;
}
// Checks params for AdaGrad optimizer.
if (config.optimizer_case() == GradientDescentConfig::kAdagrad) {
if (config.adagrad().init_accumulator_value() <= 0) {
LOG(ERROR)
<< "init_accumulator_value must be positive for ADAGRAD optimizer.";
return nullptr;
}
}
return absl::make_unique<GradientDescentOptimizer>(embedding_dimension,
config);
}
GradientDescentOptimizer::GradientDescentOptimizer(
const int embedding_dimension, const GradientDescentConfig& config)
: embedding_dimension_(embedding_dimension),
learning_rate_(config.learning_rate()),
config_(config) {}
std::vector<EmbeddingVectorProto> GradientDescentOptimizer::Apply(
const std::vector<EmbeddingVectorProto>& variables,
const std::vector<const EmbeddingVectorProto*>& gradients,
std::string* error_msg) {
CHECK(error_msg != nullptr);
if (variables.empty()) {
*error_msg = "Empty variables.";
return {};
}
if (variables.size() != gradients.size()) {
*error_msg = absl::StrCat("Inconsistent (variables, gradients) sizes: (",
variables.size(), ", ", gradients.size(), ")");
return {};
}
std::vector<EmbeddingVectorProto> results(variables.size());
for (size_t i = 0; i < variables.size(); ++i) {
if (variables[i].value_size() != embedding_dimension_ ||
gradients[i]->value_size() != embedding_dimension_) {
*error_msg =
absl::StrCat("Inconsistent variable and gradient value size: ",
variables[i].value_size(), " v.s. ",
gradients[i]->value_size(), " for input ", i);
return {};
}
switch (config_.optimizer_case()) {
case GradientDescentConfig::kSgd:
results[i] = ApplyGradientDescent(variables[i], *gradients[i]);
break;
case GradientDescentConfig::kAdagrad:
results[i] = ApplyAdagrad(variables[i], *gradients[i]);
break;
default:
LOG(FATAL) << "Unsupported optimizer: " << config_.optimizer_case();
}
results[i].set_tag(variables[i].tag());
results[i].set_weight(variables[i].weight());
}
return results;
}
EmbeddingVectorProto GradientDescentOptimizer::ApplyGradientDescent(
const EmbeddingVectorProto& var, const EmbeddingVectorProto& grad) {
EmbeddingVectorProto result;
result.mutable_value()->Reserve(var.value_size());
for (int i = 0; i < var.value_size(); ++i) {
result.add_value(var.value(i) - grad.value(i) * learning_rate_);
}
return result;
}
EmbeddingVectorProto GradientDescentOptimizer::ApplyAdagrad(
const EmbeddingVectorProto& var, const EmbeddingVectorProto& grad) {
EmbeddingVectorProto result;
result.mutable_value()->Reserve(embedding_dimension_);
const auto& key = var.tag();
absl::MutexLock l(¶ms_mu_);
if (!params_[kAccum].contains(key)) {
params_[kAccum][key] = InitTensor(
embedding_dimension_, config_.adagrad().init_accumulator_value());
}
auto* accum = params_[kAccum][key].mutable_value();
for (int i = 0; i < embedding_dimension_; ++i) {
*accum->Mutable(i) += grad.value(i) * grad.value(i);
result.add_value(var.value(i) -
grad.value(i) * learning_rate_ / std::sqrt(accum->Get(i)));
}
return result;
}
} // namespace carls
| 1,927 |
575 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_PUBLIC_BROWSER_GPU_SERVICE_REGISTRY_H_
#define CONTENT_PUBLIC_BROWSER_GPU_SERVICE_REGISTRY_H_
#include <string>
#include <utility>
#include "content/common/content_export.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/system/message_pipe.h"
namespace content {
CONTENT_EXPORT void BindInterfaceInGpuProcess(
const std::string& interface_name,
mojo::ScopedMessagePipeHandle interface_pipe);
// Bind to an interface exposed by the GPU process.
template <typename Interface>
void BindInterfaceInGpuProcess(mojo::PendingReceiver<Interface> receiver) {
BindInterfaceInGpuProcess(Interface::Name_, receiver.PassPipe());
}
} // namespace content
#endif // CONTENT_PUBLIC_BROWSER_GPU_SERVICE_REGISTRY_H_
| 312 |
1,662 | <gh_stars>1000+
"""
Test reactions more wrt dynamism.
"""
import gc
import sys
import weakref
from flexx.util.testing import run_tests_if_main, skipif, skip, raises
from flexx.event.both_tester import run_in_both, this_is_js
from flexx.util.logging import capture_log
from flexx import event
loop = event.loop
logger = event.logger
class Node(event.Component):
val = event.IntProp(settable=True)
parent = event.ComponentProp(settable=True)
children = event.TupleProp(settable=True)
@event.reaction('parent.val')
def handle_parent_val(self, *events):
xx = []
for ev in events:
if self.parent:
xx.append(self.parent.val)
else:
xx.append(None)
print('parent.val ' + ', '.join([str(x) for x in xx]))
@event.reaction('children*.val')
def handle_children_val(self, *events):
xx = []
for ev in events:
if isinstance(ev.new_value, (int, float)):
xx.append(ev.new_value)
else:
xx.append(None)
print('children.val ' + ', '.join([str(x) for x in xx]))
@run_in_both(Node)
def test_dynamism1():
"""
parent.val 17
parent.val 18
parent.val 29
done
"""
n = Node()
n1 = Node()
n2 = Node()
loop.iter()
with loop: # does not get trigger, because n1.val was not set
n.set_parent(n1)
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # does not trigger
n.set_parent(n2)
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_parent(None)
with loop:
n1.set_val(11)
n2.set_val(21)
print('done')
@run_in_both(Node)
def test_dynamism2a():
"""
parent.val 17
parent.val 18
parent.val 29
[17, 18, 29]
"""
n = Node()
n1 = Node()
n2 = Node()
res = []
def func(*events):
for ev in events:
if n.parent:
res.append(n.parent.val)
else:
res.append(None)
n.reaction(func, 'parent.val')
loop.iter()
with loop: # does not get trigger, because n1.val was not set
n.set_parent(n1)
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # does not trigger
n.set_parent(n2)
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_parent(None)
with loop:
n1.set_val(11)
n2.set_val(21)
print(res)
@run_in_both(Node)
def test_dynamism2b():
"""
parent.val 17
parent.val 18
parent.val 29
[None, None, 17, 18, None, 29, None]
"""
n = Node()
n1 = Node()
n2 = Node()
res = []
def func(*events):
for ev in events:
if ev.type == 'val':
res.append(n.parent.val)
else:
res.append(None)
handler = n.reaction(func, 'parent', 'parent.val') # also connect to parent
loop.iter()
with loop: # does not get trigger, because n1.val was not set
n.set_parent(n1)
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # does not trigger
n.set_parent(n2)
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_parent(None)
with loop:
n1.set_val(11)
n2.set_val(21)
print(res)
@run_in_both(Node)
def test_dynamism3():
"""
children.val 17, 27
children.val 18, 28
children.val 29
done
"""
n = Node()
n1 = Node()
n2 = Node()
loop.iter()
with loop: # no trigger
n.set_children((n1, n2))
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # no trigger
n.set_children((n2, ))
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_children(())
with loop:
n1.set_val(11)
n2.set_val(21)
print('done')
@run_in_both(Node)
def test_dynamism4a():
"""
children.val 17, 27
children.val 18, 28
children.val 29
[17, 27, 18, 28, 29]
"""
n = Node()
n1 = Node()
n2 = Node()
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
res.append(ev.new_value)
else:
res.append(None)
handler = n.reaction(func, 'children*.val')
loop.iter()
with loop: # no trigger
n.set_children((n1, n2))
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # no trigger
n.set_children((n2, ))
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_children(())
with loop:
n1.set_val(11)
n2.set_val(21)
print(res)
@run_in_both(Node)
def test_dynamism4b():
"""
children.val 17, 27
children.val 18, 28
children.val 29
[None, None, 17, 27, 18, 28, None, 29, None]
"""
n = Node()
n1 = Node()
n2 = Node()
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
res.append(ev.new_value)
else:
res.append(None)
handler = n.reaction(func, 'children', 'children*.val') # also connect children
loop.iter()
with loop: # no trigger
n.set_children((n1, n2))
n.set_val(42)
with loop:
n1.set_val(17)
n2.set_val(27)
with loop:
n1.set_val(18)
n2.set_val(28)
with loop: # no trigger
n.set_children((n2, ))
with loop:
n1.set_val(19)
n2.set_val(29)
with loop:
n.set_children(())
with loop:
n1.set_val(11)
n2.set_val(21)
print(res)
@run_in_both(Node)
def test_dynamism5a():
"""
[0, 17, 18, 19]
"""
# connection strings with static attributes - no reconnect
n = Node()
n1 = Node()
n.foo = n1
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
res.append(ev.new_value)
else:
res.append(None)
# because the connection is fully resolved upon connecting, and at that time
# the object is still in its init stage, the handler does get the init event
# with value 0.
handler = n.reaction(func, 'foo.val')
loop.iter()
with loop:
n.set_val(42)
with loop:
n1.set_val(17)
n1.set_val(18)
with loop:
n.foo = None # no reconnect in this case
with loop:
n1.set_val(19)
print(res)
@run_in_both(Node)
def test_dynamism5b():
"""
[17, 18, 19]
"""
# connection strings with static attributes - no reconnect
n = Node()
n1 = Node()
n.foo = n1
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
res.append(ev.new_value)
else:
res.append(None)
# But not now
loop.iter() # <-- only change
handler = n.reaction(func, 'foo.val')
loop.iter()
with loop:
n.set_val(42)
with loop:
n1.set_val(17)
n1.set_val(18)
with loop:
n.foo = None # no reconnect in this case
with loop:
n1.set_val(19)
print(res)
@run_in_both(Node)
def test_deep1():
"""
children.val 7
children.val 8
children.val 17
[7, 8, 17]
"""
# deep connectors
n = Node()
n1 = Node()
n2 = Node()
n.set_children((Node(), n1))
loop.iter()
n.children[0].set_children((Node(), n2))
loop.iter()
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
if ev.new_value:
res.append(ev.new_value)
else:
res.append(None)
handler = n.reaction(func, 'children**.val')
loop.iter()
# We want these
with loop:
n1.set_val(7)
with loop:
n2.set_val(8)
# But not these
with loop:
n.set_val(42)
with loop:
n1.set_children((Node(), Node()))
n.children[0].set_children([])
# again ...
with loop:
n1.set_val(17)
with loop:
n2.set_val(18) # n2 is no longer in the tree
print(res)
@run_in_both(Node)
def test_deep2():
"""
children.val 11
children.val 12
['id12', 'id11', 'id10', 'id11']
"""
# deep connectors - string ends in deep connector
n = Node()
n1 = Node()
n2 = Node()
n.set_children((Node(), n1))
loop.iter()
n.children[0].set_children((Node(), n2))
loop.iter()
res = []
def func(*events):
for ev in events:
if isinstance(ev.new_value, (float, int)):
res.append(ev.new_value)
elif ev.type == 'children':
if ev.source.val:
res.append('id%i' % ev.source.val)
else:
res.append(None)
handler = n.reaction(func, 'children**')
loop.iter()
# Give val to id by - these should have no effect on res though
with loop:
n.set_val(10)
with loop:
n1.set_val(11)
with loop:
n2.set_val(12)
# Change children
with loop:
n2.set_children((Node(), Node(), Node()))
n1.set_children((Node(), Node()))
n.set_children((Node(), n1, Node()))
with loop:
n2.set_children([]) # no longer in the tree
n1.set_children([])
print(res)
class TestOb(event.Component):
children = event.TupleProp(settable=True)
foo = event.StringProp(settable=True)
class Tester(event.Component):
children = event.TupleProp(settable=True)
@event.reaction('children**.foo')
def track_deep(self, *events):
for ev in events:
if ev.new_value:
print(ev.new_value)
@event.action
def set_foos(self, prefix):
for i, child in enumerate(self.children):
child.set_foo(prefix + str(i))
for j, subchild in enumerate(child.children):
subchild.set_foo(prefix + str(i) + str(j))
@event.action
def make_children1(self):
t1 = TestOb()
t2 = TestOb()
t1.set_children((TestOb(), ))
t2.set_children((TestOb(), ))
self.set_children(t1, t2)
@event.action
def make_children2(self):
for i, child in enumerate(self.children):
child.set_children(child.children + (TestOb(), ))
@event.action
def make_children3(self):
# See issue #460
t = TestOb()
my_children = self.children
self.set_children(my_children + (t, ))
for i, child in enumerate(my_children):
child.set_children(child.children + (t, ))
self.set_children(my_children)
@run_in_both(TestOb, Tester)
def test_issue_460_and_more():
"""
A0
A00
A1
A10
-
B0
B00
B01
B1
B10
B11
-
C0
C00
C01
C02
C1
C10
C11
C12
"""
tester = Tester()
loop.iter()
tester.make_children1()
loop.iter()
tester.set_foos('A')
loop.iter()
print('-')
tester.make_children2()
loop.iter()
tester.set_foos('B')
loop.iter()
print('-')
tester.make_children3()
loop.iter()
tester.set_foos('C')
loop.iter()
## Python only
class MyComponent(event.Component):
a = event.AnyProp()
aa = event.TupleProp()
def test_connectors1():
""" test connectors """
x = MyComponent()
def foo(*events):
pass
# Can haz any char in label
with capture_log('warning') as log:
h = x.reaction(foo, 'a:+asdkjb&^*!')
type = h.get_connection_info()[0][1][0]
assert type.startswith('a:')
assert not log
# Warn if no known event
with capture_log('warning') as log:
h = x.reaction(foo, 'b')
assert log
x._Component__handlers.pop('b')
# Supress warn
with capture_log('warning') as log:
h = x.reaction(foo, '!b')
assert not log
x._Component__handlers.pop('b')
# Supress warn, with label
with capture_log('warning') as log:
h = x.reaction(foo, '!b:meh')
assert not log
x._Component__handlers.pop('b')
# Supress warn, with label - not like this
with capture_log('warning') as log:
h = x.reaction(foo, 'b:meh!')
assert log
assert 'does not exist' in log[0]
x._Component__handlers.pop('b')
# Invalid syntax - but fix and warn
with capture_log('warning') as log:
h = x.reaction(foo, 'b!:meh')
assert log
assert 'Exclamation mark' in log[0]
def test_connectors2():
""" test connectors with sub """
x = MyComponent()
y = MyComponent()
x.sub = [y]
def foo(*events):
pass
# Warn if no known event
with capture_log('warning') as log:
h = x.reaction(foo, 'sub*.b')
assert log
y._Component__handlers.pop('b')
# Supress warn
with capture_log('warning') as log:
h = x.reaction(foo, '!sub*.b')
assert not log
y._Component__handlers.pop('b')
# Supress warn, with label
with capture_log('warning') as log:
h = x.reaction(foo, '!sub*.b:meh')
assert not log
y._Component__handlers.pop('b')
# Invalid syntax - but fix and warn
with capture_log('warning') as log:
h = x.reaction(foo, 'sub*.!b:meh')
assert log
assert 'Exclamation mark' in log[0]
y._Component__handlers.pop('b')
# Position of *
with capture_log('warning') as log:
h = x.reaction(foo, 'sub*.a')
assert not log
with capture_log('warning') as log:
h = x.reaction(foo, 'sub.*.a')
assert log
with raises(ValueError):
h = x.reaction(foo, 'sub.*a') # fail
# No star, no connection, fail!
with raises(RuntimeError):
h = x.reaction(foo, 'sub.b')
# y.a is not a list, fail!
with raises(RuntimeError):
h = y.reaction(foo, 'a*.b')
# Mix it
with capture_log('warning') as log:
h = x.reaction(foo, '!aa**')
with capture_log('warning') as log:
h = x.reaction(foo, '!aa*')
assert not log
with capture_log('warning') as log:
h = y.reaction(foo, '!aa*')
assert not log
with capture_log('warning') as log:
h = x.reaction(foo, '!aa**')
assert not log
with capture_log('warning') as log:
h = x.reaction(foo, '!aa**:meh') # why not
assert not log
def test_dynamism_and_handler_reconnecting():
# Flexx' event system tries to be smart about reusing connections when
# reconnections are made. This tests checks that this works, and when
# it does not.
class Foo(event.Component):
def __init__(self):
super().__init__()
bars = event.ListProp(settable=True)
def disconnect(self, *args): # Detect disconnections
super().disconnect(*args)
disconnects.append(self)
class Bar(event.Component):
def __init__(self):
super().__init__()
spam = event.AnyProp(0, settable=True)
def disconnect(self, *args): # Detect disconnections
super().disconnect(*args)
disconnects.append(self)
f = Foo()
triggers = []
disconnects = []
@f.reaction('!bars*.spam')
def handle_foo(*events):
triggers.append(len(events))
assert len(triggers) == 0
assert len(disconnects) == 0
# Assign new bar objects
with event.loop:
f.set_bars([Bar(), Bar()])
#
assert len(triggers) == 0
assert len(disconnects) == 0
# Change values of bar.spam
with event.loop:
f.bars[0].set_spam(7)
f.bars[1].set_spam(42)
#
assert sum(triggers) == 2
assert len(disconnects) == 0
# Assign 3 new bar objects - old ones are disconnected
with event.loop:
f.set_bars([Bar(), Bar(), Bar()])
#
assert sum(triggers) == 2
assert len(disconnects) == 2
#
# Append to bars property
disconnects = []
with event.loop:
f.set_bars(f.bars + [Bar(), Bar()])
assert len(disconnects) == 0
# Append to bars property, drop one
disconnects = []
with event.loop:
f.set_bars(f.bars[:-1] + [Bar(), Bar()])
assert len(disconnects) == 1
# Append to bars property, drop one at the wrong end: Flexx can't optimize
disconnects = []
with event.loop:
f.set_bars(f.bars[1:] + [Bar(), Bar()])
assert len(disconnects) == len(f.bars) - 1
# Prepend to bars property
disconnects = []
with event.loop:
f.set_bars([Bar(), Bar()] + f.bars)
assert len(disconnects) == 0
# Prepend to bars property, drop one
disconnects = []
with event.loop:
f.set_bars([Bar(), Bar()] + f.bars[1:])
assert len(disconnects) == 1
# Prepend to bars property, drop one at the wrong end: Flexx can't optimize
disconnects = []
with event.loop:
f.set_bars([Bar(), Bar()] + f.bars[:-1])
assert len(disconnects) == len(f.bars) - 1
run_tests_if_main()
| 8,580 |
340 | <reponame>nkumar04/concord-bft
// Concord
//
// Copyright (c) 2019-2021 VMware, Inc. All Rights Reserved.
//
// This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in
// compliance with the Apache 2.0 License.
//
// This product may include a number of subcomponents with separate copyright notices and license terms. Your use of
// these subcomponents is subject to the terms and conditions of the sub-component's license, as noted in the LICENSE
// file.
#include "PreProcessRequestMsg.hpp"
#include "assertUtils.hpp"
#include "SigManager.hpp"
#include <cstring>
namespace preprocessor {
PreProcessRequestMsg::PreProcessRequestMsg(RequestType reqType,
NodeIdType senderId,
uint16_t clientId,
uint16_t reqOffsetInBatch,
uint64_t reqSeqNum,
uint64_t reqRetryId,
uint32_t reqLength,
const char* request,
const std::string& cid,
const char* requestSignature,
uint16_t requestSignatureLength,
uint64_t blockId,
const concordUtils::SpanContext& span_context)
: MessageBase(senderId,
MsgCode::PreProcessRequest,
span_context.data().size(),
sizeof(Header) + reqLength + cid.size() + requestSignatureLength) {
ConcordAssert((requestSignatureLength > 0) == (nullptr != requestSignature));
setParams(reqType,
senderId,
clientId,
reqOffsetInBatch,
reqSeqNum,
cid.size(),
span_context.data().size(),
reqRetryId,
reqLength,
requestSignatureLength,
blockId);
auto position = body() + sizeof(Header);
memcpy(position, span_context.data().data(), span_context.data().size());
position += span_context.data().size();
memcpy(position, request, reqLength);
position += reqLength;
memcpy(position, cid.c_str(), cid.size());
uint64_t msgLength = sizeof(Header) + span_context.data().size() + reqLength + cid.size();
if (requestSignatureLength) {
position += cid.size();
memcpy(position, requestSignature, requestSignatureLength);
msgLength += requestSignatureLength;
}
SCOPED_MDC_CID(cid);
LOG_DEBUG(logger(),
KVLOG(reqType,
senderId,
clientId,
reqSeqNum,
reqRetryId,
reqLength,
cid.size(),
span_context.data().size(),
requestSignatureLength,
msgLength,
blockId));
}
void PreProcessRequestMsg::validate(const ReplicasInfo& repInfo) const {
if (size() < (sizeof(Header))) throw std::runtime_error(__PRETTY_FUNCTION__);
auto* header = msgBody();
auto* requestSignature = this->requestSignature();
auto* sigManager = SigManager::instance();
auto expectedMsgSize = (sizeof(Header) + header->spanContextSize + header->requestLength + header->cidLength +
header->reqSignatureLength);
if (size() != expectedMsgSize) throw std::runtime_error(__PRETTY_FUNCTION__);
if (type() != MsgCode::PreProcessRequest) {
LOG_WARN(logger(), "Message type is incorrect" << KVLOG(type()));
throw std::runtime_error(__PRETTY_FUNCTION__);
}
if (senderId() == repInfo.myId()) {
LOG_WARN(logger(), "Message sender is ivalid" << KVLOG(senderId(), repInfo.myId()));
throw std::runtime_error(__PRETTY_FUNCTION__);
}
if (requestSignature) {
ConcordAssert(sigManager->isClientTransactionSigningEnabled());
if (!sigManager->verifySig(
header->clientId, requestBuf(), header->requestLength, requestSignature, header->reqSignatureLength)) {
std::stringstream msg;
LOG_WARN(logger(),
"Signature verification failed for " << KVLOG(header->reqSeqNum, header->clientId, this->senderId()));
msg << "Signature verification failed for: "
<< KVLOG(header->clientId, header->reqSeqNum, header->requestLength, header->reqSignatureLength);
throw std::runtime_error(msg.str());
}
LOG_TRACE(GL, "Signature verified for " << KVLOG(header->reqSeqNum, this->senderId(), header->clientId));
}
}
void PreProcessRequestMsg::setParams(RequestType reqType,
NodeIdType senderId,
uint16_t clientId,
uint16_t reqOffsetInBatch,
ReqId reqSeqNum,
uint32_t cidLength,
uint32_t spanContextSize,
uint64_t reqRetryId,
uint32_t reqLength,
uint16_t reqSignatureLength,
uint64_t blockId) {
auto* header = msgBody();
header->reqType = reqType;
header->senderId = senderId;
header->clientId = clientId;
header->reqOffsetInBatch = reqOffsetInBatch;
header->reqSeqNum = reqSeqNum;
header->cidLength = cidLength;
header->spanContextSize = spanContextSize;
header->reqRetryId = reqRetryId;
header->requestLength = reqLength;
header->reqSignatureLength = reqSignatureLength;
header->primaryBlockId = blockId;
}
std::string PreProcessRequestMsg::getCid() const {
return std::string(body() + sizeof(Header) + msgBody()->spanContextSize + msgBody()->requestLength,
msgBody()->cidLength);
}
} // namespace preprocessor
| 2,848 |
1,375 | <gh_stars>1000+
package com.vise.baseble.exception;
import com.vise.baseble.common.BleExceptionCode;
/**
* @Description: 初始化异常
* @author: <a href="http://www.xiaoyaoyou1212.com">DAWI</a>
* @date: 16/8/14 10:30.
*/
public class InitiatedException extends BleException {
public InitiatedException() {
super(BleExceptionCode.INITIATED_ERR, "Initiated Exception Occurred! ");
}
}
| 159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.