max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
879 | <gh_stars>100-1000
package org.zstack.network.service.flat;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.annotation.Transactional;
import org.zstack.core.db.Q;
import org.zstack.core.db.SQL;
import org.zstack.header.apimediator.ApiMessageInterceptionException;
import org.zstack.header.apimediator.GlobalApiMessageInterceptor;
import org.zstack.header.message.APIMessage;
import org.zstack.header.network.service.NetworkServiceProviderType;
import org.zstack.header.vm.VmNicVO;
import org.zstack.header.vm.VmNicVO_;
import org.zstack.network.service.NetworkServiceManager;
import org.zstack.network.service.eip.*;
import org.zstack.network.service.vip.Vip;
import org.zstack.network.service.vip.VipVO;
import org.zstack.network.service.vip.VipVO_;
import org.zstack.utils.network.IPv6NetworkUtils;
import org.zstack.utils.network.NetworkUtils;
import java.util.Arrays;
import java.util.List;
import static org.zstack.core.Platform.argerr;
/**
* Created by MaJin on 2017/12/21.
*/
public class FlatEipApiInterceptor implements GlobalApiMessageInterceptor {
@Autowired
private NetworkServiceManager nwServiceMgr;
@Override
public List<Class> getMessageClassToIntercept() {
return Arrays.asList(APICreateEipMsg.class, APIAttachEipMsg.class);
}
@Override
public InterceptorPosition getPosition() {
return InterceptorPosition.FRONT;
}
@Override
public APIMessage intercept(APIMessage msg) throws ApiMessageInterceptionException {
if (msg instanceof APICreateEipMsg) {
validate((APICreateEipMsg) msg);
} else if (msg instanceof APIAttachEipMsg) {
validate((APIAttachEipMsg) msg);
}
return msg;
}
@Transactional(readOnly = true)
protected void validate(APICreateEipMsg msg) {
if (msg.getVmNicUuid() == null) {
return;
}
String privateL3Uuid = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).eq(VmNicVO_.uuid, msg.getVmNicUuid()).findValue();
NetworkServiceProviderType providerType = nwServiceMgr.getTypeOfNetworkServiceProviderForService(privateL3Uuid, EipConstant.EIP_TYPE);
if (!providerType.toString().equals(FlatNetworkServiceConstant.FLAT_NETWORK_SERVICE_TYPE_STRING)) {
return;
}
String pubL3Uuid = Q.New(VipVO.class).select(VipVO_.l3NetworkUuid).eq(VipVO_.uuid, msg.getVipUuid()).findValue();
checkVipPublicL3Network(msg.getVmNicUuid(), pubL3Uuid);
checkFlatVmNicAlreadyHasEip(msg.getVmNicUuid(), null, msg.getVipUuid());
}
@Transactional(readOnly = true)
protected void validate(APIAttachEipMsg msg) {
String privateL3Uuid = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).eq(VmNicVO_.uuid, msg.getVmNicUuid()).findValue();
NetworkServiceProviderType providerType = nwServiceMgr.getTypeOfNetworkServiceProviderForService(privateL3Uuid, EipConstant.EIP_TYPE);
/* TODO: this is temp limitation, ipv6 eip can be only attached to flat eip */
EipVO eip = Q.New(EipVO.class).eq(EipVO_.uuid, msg.getEipUuid()).find();
if (IPv6NetworkUtils.isIpv6Address(eip.getVipIp()) && !providerType.toString().equals(FlatNetworkServiceConstant.FLAT_NETWORK_SERVICE_TYPE_STRING)) {
throw new ApiMessageInterceptionException(argerr("could not attach eip because ipv6 eip can ONLY be attached to flat network"));
}
if (!providerType.toString().equals(FlatNetworkServiceConstant.FLAT_NETWORK_SERVICE_TYPE_STRING)) {
return;
}
String pubL3Uuid = SQL.New("select vip.l3NetworkUuid from EipVO eip, VipVO vip" +
" where eip.uuid = :eipUuid" +
" and eip.vipUuid = vip.uuid", String.class)
.param("eipUuid", msg.getEipUuid())
.find();
checkVipPublicL3Network(msg.getVmNicUuid(), pubL3Uuid);
checkFlatVmNicAlreadyHasEip(msg.getVmNicUuid(), msg.getEipUuid(), null);
}
@Transactional(readOnly = true)
private void checkVipPublicL3Network(String vmNicUuid, String pubL3Uuid){
boolean isPublicL2NetworkAttachedVmCluster = (Long) SQL.New("select count(l3)" +
" from VmInstanceVO vm, VmNicVO nic, L2NetworkClusterRefVO ref, L3NetworkVO l3" +
" where nic.uuid = :nicUuid" +
" and vm.uuid = nic.vmInstanceUuid" +
" and ref.clusterUuid = vm.clusterUuid" +
" and ref.l2NetworkUuid = l3.l2NetworkUuid" +
" and l3.uuid = :publicL3Uuid", Long.class)
.param("nicUuid", vmNicUuid)
.param("publicL3Uuid", pubL3Uuid)
.find() > 0;
if (!isPublicL2NetworkAttachedVmCluster){
throw new ApiMessageInterceptionException(argerr("L2Network where vip's L3Network based hasn't attached" +
" the cluster where vmNic[uuid:%s] located", vmNicUuid));
}
}
@Transactional(readOnly = true)
private void checkFlatVmNicAlreadyHasEip(String vmNicUuid, String eipUuid, String vipUuid){
VipVO newVipVO;
if (vipUuid != null) {
newVipVO = Q.New(VipVO.class).eq(VipVO_.uuid, vipUuid).find();
} else {
String uuid = Q.New(EipVO.class).eq(EipVO_.uuid, eipUuid).select(EipVO_.vipUuid).findValue();
newVipVO = Q.New(VipVO.class).eq(VipVO_.uuid, uuid).find();
}
boolean newVipVersion = NetworkUtils.isIpv4Address(newVipVO.getIp());
List<String> oldVipIps = Q.New(EipVO.class).eq(EipVO_.vmNicUuid, vmNicUuid).select(EipVO_.vipIp).listValues();
if (oldVipIps.isEmpty()) {
return;
}
for (String oldVipIp : oldVipIps) {
boolean oldVipVersion = NetworkUtils.isIpv4Address(oldVipIp);
if (oldVipVersion == newVipVersion) {
String version = oldVipVersion ? "ipv4" : "ipv6";
throw new ApiMessageInterceptionException(argerr("can not bound more than 1 %s eip to a vm nic[uuid:%s] of flat ",
version, vmNicUuid));
}
}
}
}
| 2,761 |
3,369 | //
// LoaderVC.h
// Advanced-Example
//
#import <UIKit/UIKit.h>
@interface LoaderVC : UIViewController
@end
| 47 |
2,042 | /*
Copyright 2017 yangchong211(github.com/yangchong211)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.yc.video.player;
import android.graphics.Color;
import androidx.annotation.ColorInt;
/**
* <pre>
* @author yangchong
* blog : https://github.com/yangchong211
* time : 2018/11/9
* desc : 播放器设置属性builder类
* revise:
* </pre>
*/
public class VideoPlayerBuilder {
public static Builder newBuilder() {
return new Builder();
}
public final static class Builder {
private int mColor = 0;
private int[] mTinyScreenSize;
private int mCurrentPosition = -1;
private boolean mEnableAudioFocus = true;
/**
* 设置视频播放器的背景色
* @param color color
* @return Builder
*/
public Builder setPlayerBackgroundColor(@ColorInt int color) {
//使用注解限定福
if (color==0){
this.mColor = Color.BLACK;
} else {
this.mColor = color;
}
return this;
}
/**
* 设置小屏的宽高
* @param tinyScreenSize 其中tinyScreenSize[0]是宽,tinyScreenSize[1]是高
* @return Builder
*/
public Builder setTinyScreenSize(int[] tinyScreenSize) {
this.mTinyScreenSize = tinyScreenSize;
return this;
}
/**
* 一开始播放就seek到预先设置好的位置
* @param position 位置
* @return Builder
*/
public Builder skipPositionWhenPlay(int position) {
this.mCurrentPosition = position;
return this;
}
/**
* 是否开启AudioFocus监听, 默认开启,用于监听其它地方是否获取音频焦点,如果有其它地方获取了
* 音频焦点,此播放器将做出相应反应,具体实现见{@link AudioFocusHelper}
* @param enableAudioFocus 是否开启
* @return Builder
*/
public Builder setEnableAudioFocus(boolean enableAudioFocus) {
this.mEnableAudioFocus = enableAudioFocus;
return this;
}
public VideoPlayerBuilder build() {
//创建builder对象
return new VideoPlayerBuilder(this);
}
}
public final int mColor;
public final int[] mTinyScreenSize;
public final int mCurrentPosition;
public final boolean mEnableAudioFocus;
public VideoPlayerBuilder(Builder builder) {
mColor = builder.mColor;
mTinyScreenSize = builder.mTinyScreenSize;
mCurrentPosition = builder.mCurrentPosition;
mEnableAudioFocus = builder.mEnableAudioFocus;
}
}
| 1,690 |
2,577 | <gh_stars>1000+
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.qa.upgrade.gson.batch;
import org.camunda.bpm.engine.ProcessEngine;
import org.camunda.bpm.engine.test.Deployment;
import org.camunda.bpm.qa.upgrade.DescribesScenario;
import org.camunda.bpm.qa.upgrade.ScenarioSetup;
import java.util.ArrayList;
import java.util.List;
/**
* @author <NAME>
*/
public class ModificationBatchScenario {
@Deployment
public static String deploy() {
return "org/camunda/bpm/qa/upgrade/gson/oneTaskProcessModification.bpmn20.xml";
}
@DescribesScenario("ModificationBatchScenario")
public static ScenarioSetup initModificationBatch() {
return new ScenarioSetup() {
public void execute(ProcessEngine engine, String scenarioName) {
String processDefinitionId = engine.getRepositoryService().createProcessDefinitionQuery()
.processDefinitionKey("oneTaskProcessModification_710")
.singleResult()
.getId();
List<String> processInstanceIds = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String processInstanceId = engine.getRuntimeService()
.startProcessInstanceById(processDefinitionId, "ModificationBatchScenario").getId();
processInstanceIds.add(processInstanceId);
}
engine.getRuntimeService().createModification(processDefinitionId)
.startAfterActivity("theStart")
.startBeforeActivity("theTask")
.startBeforeActivity("userTask4")
.startTransition("flow2")
.cancelAllForActivity("userTask4", false)
.processInstanceIds(processInstanceIds)
.skipCustomListeners()
.skipIoMappings()
.executeAsync();
}
};
}
}
| 851 |
312 | <gh_stars>100-1000
/*
*
* Copyright 2015 Rockchip Electronics Co. LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define MODULE_TAG "hal_h264d_vdpu_reg"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "rk_type.h"
#include "mpp_err.h"
#include "mpp_mem.h"
#include "mpp_soc.h"
#include "mpp_common.h"
#include "hal_h264d_global.h"
#include "hal_h264d_api.h"
#include "hal_h264d_vdpu_com.h"
#include "hal_h264d_vdpu2.h"
#include "hal_h264d_vdpu2_reg.h"
#include "mpp_dec_cb_param.h"
const RK_U32 vdpu2_ref_idx[16] = {
84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99
};
static MPP_RET set_device_regs(H264dHalCtx_t *p_hal, H264dVdpuRegs_t *p_reg)
{
MPP_RET ret = MPP_ERR_UNKNOW;
p_reg->sw53.dec_fmt_sel = 0; //!< set H264 mode
p_reg->sw54.dec_out_endian = 1; //!< little endian
p_reg->sw54.dec_in_endian = 0; //!< big endian
p_reg->sw54.dec_strendian_e = 1; //!< little endian
p_reg->sw50.dec_tiled_msb = 0; //!< 0: raster scan 1: tiled
p_reg->sw56.dec_max_burlen = 16; //!< (0, 4, 8, 16) choice one
p_reg->sw50.dec_ascmd0_dis = 0; //!< disable
p_reg->sw50.adv_pref_dis = 0; //!< disable
p_reg->sw52.adv_pref_thrd = 8;
p_reg->sw50.adtion_latency = 0; //!< compensation for bus latency; values up to 63
p_reg->sw56.dec_data_discd_en = 0;
p_reg->sw54.dec_out_wordsp = 1;//!< little endian
p_reg->sw54.dec_in_wordsp = 1;//!< little endian
p_reg->sw54.dec_strm_wordsp = 1;//!< little endian
p_reg->sw57.timeout_sts_en = 1;
p_reg->sw57.dec_clkgate_en = 1;
p_reg->sw55.dec_irq_dis = 0;
//!< set AXI RW IDs
p_reg->sw56.dec_axi_id_rd = (0xFF & 0xFFU); //!< 0-255
p_reg->sw56.dec_axi_id_wr = (0x0 & 0xFFU); //!< 0-255
///!< Set prediction filter taps
{
RK_U32 val = 0;
p_reg->sw59.pflt_set0_tap0 = 1;
val = (RK_U32)(-5);
p_reg->sw59.pflt_set0_tap1 = val;
p_reg->sw59.pflt_set0_tap2 = 20;
}
p_reg->sw50.adtion_latency = 0;
//!< clock_gating 0:clock always on, 1: clock gating module control the key(turn off when decoder free)
p_reg->sw57.dec_clkgate_en = 1;
p_reg->sw50.dec_tiled_msb = 0; //!< 0: raster scan 1: tiled
//!< bus_burst_length = 16, bus burst
p_reg->sw56.dec_max_burlen = 16;
p_reg->sw56.dec_data_discd_en = 0;
(void)p_hal;
return ret = MPP_OK;
}
static MPP_RET set_refer_pic_idx(H264dVdpuRegs_t *p_regs, RK_U32 i, RK_U16 val)
{
switch (i) {
case 0:
p_regs->sw76.num_ref_idx0 = val;
break;
case 1:
p_regs->sw76.num_ref_idx1 = val;
break;
case 2:
p_regs->sw77.num_ref_idx2 = val;
break;
case 3:
p_regs->sw77.num_ref_idx3 = val;
break;
case 4:
p_regs->sw78.num_ref_idx4 = val;
break;
case 5:
p_regs->sw78.num_ref_idx5 = val;
break;
case 6:
p_regs->sw79.num_ref_idx6 = val;
break;
case 7:
p_regs->sw79.num_ref_idx7 = val;
break;
case 8:
p_regs->sw80.num_ref_idx8 = val;
break;
case 9:
p_regs->sw80.num_ref_idx9 = val;
break;
case 10:
p_regs->sw81.num_ref_idx10 = val;
break;
case 11:
p_regs->sw81.num_ref_idx11 = val;
break;
case 12:
p_regs->sw82.num_ref_idx12 = val;
break;
case 13:
p_regs->sw82.num_ref_idx13 = val;
break;
case 14:
p_regs->sw83.num_ref_idx14 = val;
break;
case 15:
p_regs->sw83.num_ref_idx15 = val;
break;
default:
break;
}
return MPP_OK;
}
static MPP_RET set_refer_pic_list_p(H264dVdpuRegs_t *p_regs, RK_U32 i,
RK_U16 val)
{
switch (i) {
case 0:
p_regs->sw106.init_reflist_pf0 = val;
break;
case 1:
p_regs->sw106.init_reflist_pf1 = val;
break;
case 2:
p_regs->sw106.init_reflist_pf2 = val;
break;
case 3:
p_regs->sw106.init_reflist_pf3 = val;
break;
case 4:
p_regs->sw74.init_reflist_pf4 = val;
break;
case 5:
p_regs->sw74.init_reflist_pf5 = val;
break;
case 6:
p_regs->sw74.init_reflist_pf6 = val;
break;
case 7:
p_regs->sw74.init_reflist_pf7 = val;
break;
case 8:
p_regs->sw74.init_reflist_pf8 = val;
break;
case 9:
p_regs->sw74.init_reflist_pf9 = val;
break;
case 10:
p_regs->sw75.init_reflist_pf10 = val;
break;
case 11:
p_regs->sw75.init_reflist_pf11 = val;
break;
case 12:
p_regs->sw75.init_reflist_pf12 = val;
break;
case 13:
p_regs->sw75.init_reflist_pf13 = val;
break;
case 14:
p_regs->sw75.init_reflist_pf14 = val;
break;
case 15:
p_regs->sw75.init_reflist_pf15 = val;
break;
default:
break;
}
return MPP_OK;
}
static MPP_RET set_refer_pic_list_b0(H264dVdpuRegs_t *p_regs, RK_U32 i,
RK_U16 val)
{
switch (i) {
case 0:
p_regs->sw100.init_reflist_df0 = val;
break;
case 1:
p_regs->sw100.init_reflist_df1 = val;
break;
case 2:
p_regs->sw100.init_reflist_df2 = val;
break;
case 3:
p_regs->sw100.init_reflist_df3 = val;
break;
case 4:
p_regs->sw100.init_reflist_df4 = val;
break;
case 5:
p_regs->sw100.init_reflist_df5 = val;
break;
case 6:
p_regs->sw101.init_reflist_df6 = val;
break;
case 7:
p_regs->sw101.init_reflist_df7 = val;
break;
case 8:
p_regs->sw101.init_reflist_df8 = val;
break;
case 9:
p_regs->sw101.init_reflist_df9 = val;
break;
case 10:
p_regs->sw101.init_reflist_df10 = val;
break;
case 11:
p_regs->sw101.init_reflist_df11 = val;
break;
case 12:
p_regs->sw102.init_reflist_df12 = val;
break;
case 13:
p_regs->sw102.init_reflist_df13 = val;
break;
case 14:
p_regs->sw102.init_reflist_df14 = val;
break;
case 15:
p_regs->sw102.init_reflist_df15 = val;
break;
default:
break;
}
return MPP_OK;
}
static MPP_RET set_refer_pic_list_b1(H264dVdpuRegs_t *p_regs, RK_U32 i,
RK_U16 val)
{
switch (i) {
case 0:
p_regs->sw103.init_reflist_db0 = val;
break;
case 1:
p_regs->sw103.init_reflist_db1 = val;
break;
case 2:
p_regs->sw103.init_reflist_db2 = val;
break;
case 3:
p_regs->sw103.init_reflist_db3 = val;
break;
case 4:
p_regs->sw103.init_reflist_db4 = val;
break;
case 5:
p_regs->sw103.init_reflist_db5 = val;
break;
case 6:
p_regs->sw104.init_reflist_db6 = val;
break;
case 7:
p_regs->sw104.init_reflist_db7 = val;
break;
case 8:
p_regs->sw104.init_reflist_db8 = val;
break;
case 9:
p_regs->sw104.init_reflist_db9 = val;
break;
case 10:
p_regs->sw104.init_reflist_db10 = val;
break;
case 11:
p_regs->sw104.init_reflist_db11 = val;
break;
case 12:
p_regs->sw105.init_reflist_db12 = val;
break;
case 13:
p_regs->sw105.init_reflist_db13 = val;
break;
case 14:
p_regs->sw105.init_reflist_db14 = val;
break;
case 15:
p_regs->sw105.init_reflist_db15 = val;
break;
default:
break;
}
return MPP_OK;
}
static MPP_RET set_refer_pic_base_addr(H264dVdpuRegs_t *p_regs, RK_U32 i,
RK_U32 val)
{
switch (i) {
case 0:
p_regs->sw84.ref0_st_addr = val;
break;
case 1:
p_regs->sw85.ref1_st_addr = val;
break;
case 2:
p_regs->sw86.ref2_st_addr = val;
break;
case 3:
p_regs->sw87.ref3_st_addr = val;
break;
case 4:
p_regs->sw88.ref4_st_addr = val;
break;
case 5:
p_regs->sw89.ref5_st_addr = val;
break;
case 6:
p_regs->sw90.ref6_st_addr = val;
break;
case 7:
p_regs->sw91.ref7_st_addr = val;
break;
case 8:
p_regs->sw92.ref8_st_addr = val;
break;
case 9:
p_regs->sw93.ref9_st_addr = val;
break;
case 10:
p_regs->sw94.ref10_st_addr = val;
break;
case 11:
p_regs->sw95.ref11_st_addr = val;
break;
case 12:
p_regs->sw96.ref12_st_addr = val;
break;
case 13:
p_regs->sw97.ref13_st_addr = val;
break;
case 14:
p_regs->sw98.ref14_st_addr = val;
break;
case 15:
p_regs->sw99.ref15_st_addr = val;
break;
default:
break;
}
return MPP_OK;
}
static MPP_RET set_pic_regs(H264dHalCtx_t *p_hal, H264dVdpuRegs_t *p_regs)
{
MPP_RET ret = MPP_ERR_UNKNOW;
p_regs->sw110.pic_mb_w = p_hal->pp->wFrameWidthInMbsMinus1 + 1;
p_regs->sw110.pic_mb_h = (2 - p_hal->pp->frame_mbs_only_flag)
* (p_hal->pp->wFrameHeightInMbsMinus1 + 1);
return ret = MPP_OK;
}
static MPP_RET set_vlc_regs(H264dHalCtx_t *p_hal, H264dVdpuRegs_t *p_regs)
{
RK_U32 i = 0;
MPP_RET ret = MPP_ERR_UNKNOW;
DXVA_PicParams_H264_MVC *pp = p_hal->pp;
RK_U32 validFlags = 0;
RK_U32 longTermTmp = 0, longTermflags = 0;
p_regs->sw57.dec_wr_extmen_dis = 0;
p_regs->sw57.rlc_mode_en = 0;
p_regs->sw51.qp_init_val = pp->pic_init_qp_minus26 + 26;
p_regs->sw114.max_refidx0 = pp->num_ref_idx_l0_active_minus1 + 1;
p_regs->sw111.max_refnum = pp->num_ref_frames;
p_regs->sw112.cur_frm_len = pp->log2_max_frame_num_minus4 + 4;
p_regs->sw112.curfrm_num = pp->frame_num;
p_regs->sw115.const_intra_en = pp->constrained_intra_pred_flag;
p_regs->sw112.dblk_ctrl_flag = pp->deblocking_filter_control_present_flag;
p_regs->sw112.rpcp_flag = pp->redundant_pic_cnt_present_flag;
p_regs->sw113.refpic_mk_len = p_hal->slice_long[0].drpm_used_bitlen;
p_regs->sw115.idr_pic_flag = p_hal->slice_long[0].idr_flag;
p_regs->sw113.idr_pic_id = p_hal->slice_long[0].idr_pic_id;
p_regs->sw114.pps_id = p_hal->slice_long[0].active_pps_id;
p_regs->sw114.poc_field_len = p_hal->slice_long[0].poc_used_bitlen;
/* reference picture flags, TODO separate fields */
if (pp->field_pic_flag) {
for (i = 0; i < 32; i++) {
if (pp->RefFrameList[i / 2].bPicEntry == 0xff) { //!< invalid
longTermflags <<= 1;
validFlags <<= 1;
} else {
longTermTmp = pp->RefFrameList[i / 2].AssociatedFlag; //!< get long term flag
longTermflags = (longTermflags << 1) | longTermTmp;
validFlags = (validFlags << 1)
| ((pp->UsedForReferenceFlags >> i) & 0x01);
}
}
p_regs->sw107.refpic_term_flag = longTermflags;
p_regs->sw108.refpic_valid_flag = validFlags;
} else {
for (i = 0; i < 16; i++) {
if (pp->RefFrameList[i].bPicEntry == 0xff) { //!< invalid
longTermflags <<= 1;
validFlags <<= 1;
} else {
RK_U32 use_flag = (pp->UsedForReferenceFlags >> (2 * i)) & 0x03;
longTermTmp = pp->RefFrameList[i].AssociatedFlag;
longTermflags = (longTermflags << 1) | longTermTmp;
validFlags = (validFlags << 1) | (use_flag > 0);
}
}
p_regs->sw107.refpic_term_flag = (longTermflags << 16);
p_regs->sw108.refpic_valid_flag = (validFlags << 16);
}
for (i = 0; i < 16; i++) {
if (pp->RefFrameList[i].bPicEntry != 0xff) { //!< valid
if (pp->RefFrameList[i].AssociatedFlag) { //!< longterm flag
set_refer_pic_idx(p_regs, i, pp->LongTermPicNumList[i]); //!< pic_num
} else {
set_refer_pic_idx(p_regs, i, pp->FrameNumList[i]); //< frame_num
}
}
}
p_regs->sw57.rd_cnt_tab_en = 1;
//!< set poc to buffer
{
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
RK_U32 *ptr = (RK_U32 *)reg_ctx->poc_ptr;
//!< set reference reorder poc
for (i = 0; i < 32; i++) {
if (pp->RefFrameList[i / 2].bPicEntry != 0xff) {
*ptr++ = pp->FieldOrderCntList[i / 2][i & 0x1];
} else {
*ptr++ = 0;
}
}
//!< set current poc
if (pp->field_pic_flag || !pp->MbaffFrameFlag) {
if (pp->field_pic_flag)
*ptr++ = pp->CurrFieldOrderCnt[pp->CurrPic.AssociatedFlag ? 1 : 0];
else
*ptr++ = MPP_MIN(pp->CurrFieldOrderCnt[0], pp->CurrFieldOrderCnt[1]);
} else {
*ptr++ = pp->CurrFieldOrderCnt[0];
*ptr++ = pp->CurrFieldOrderCnt[1];
}
#if DEBUG_REF_LIST
{
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
RK_U32 *ptr_tmp = (RK_U32 *)reg_ctx->poc_ptr;
RK_U32 *ref_reg = &p_regs->sw76;
char file_name[128];
sprintf(file_name, "/sdcard/test/mpp_pocbase_log.txt");
FILE *fp = fopen(file_name, "ab");
char buf[1024];
RK_S32 buf_len = 0, buf_size = sizeof(buf) - 1;
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "=== poc_base filed %d fram_num %d ===\n",
pp->field_pic_flag || !pp->MbaffFrameFlag, pp->frame_num);
for (; ptr_tmp < ptr; ptr_tmp++)
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "poc 0x%08x\n", *ptr_tmp);
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "term_flag 0x%08x refpic_valid_flag 0x%08x \n",
longTermflags, validFlags);
for (i = 0; i < 8; i++)
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "ref[%d] 0x%08x\n", i, ref_reg[i]);
fprintf(fp, "%s", buf);
fflush(fp);
fclose(fp);
}
#endif
}
p_regs->sw115.cabac_en = pp->entropy_coding_mode_flag;
//!< stream position update
{
MppBuffer bitstream_buf = NULL;
p_regs->sw57.st_code_exit = 1;
mpp_buf_slot_get_prop(p_hal->packet_slots,
p_hal->in_task->input,
SLOT_BUFFER, &bitstream_buf);
p_regs->sw109.strm_start_bit = 0; //!< sodb stream start bit
p_regs->sw64.rlc_vlc_st_adr = mpp_buffer_get_fd(bitstream_buf);
p_regs->sw51.stream_len = p_hal->strm_len;
}
return ret = MPP_OK;
}
static MPP_RET set_ref_regs(H264dHalCtx_t *p_hal, H264dVdpuRegs_t *p_regs)
{
MPP_RET ret = MPP_ERR_UNKNOW;
RK_U32 i = 0;
RK_U32 num_refs = 0;
RK_U32 num_reorder = 0;
H264dRefsList_t m_lists[3][16];
DXVA_PicParams_H264_MVC *pp = p_hal->pp;
RK_U32 max_frame_num = 1 << (pp->log2_max_frame_num_minus4 + 4);
// init list
memset(m_lists, 0, sizeof(m_lists));
for (i = 0; i < 16; i++) {
RK_U32 ref_flag = pp->UsedForReferenceFlags >> (2 * i) & 0x3;
m_lists[0][i].idx = i;
if (ref_flag) {
num_refs++;
m_lists[0][i].cur_poc = pp->CurrPic.AssociatedFlag
? pp->CurrFieldOrderCnt[1] : pp->CurrFieldOrderCnt[0];
m_lists[0][i].ref_flag = ref_flag;
m_lists[0][i].lt_flag = pp->RefFrameList[i].AssociatedFlag;
if (m_lists[0][i].lt_flag) {
m_lists[0][i].ref_picnum = pp->LongTermPicNumList[i];
} else {
m_lists[0][i].ref_picnum = pp->FrameNumList[i] > pp->frame_num ?
(pp->FrameNumList[i] - max_frame_num) :
pp->FrameNumList[i];
}
if (ref_flag == 3) {
m_lists[0][i].ref_poc = MPP_MIN(pp->FieldOrderCntList[i][0], pp->FieldOrderCntList[i][1]);
} else if (ref_flag & 0x1) {
m_lists[0][i].ref_poc = pp->FieldOrderCntList[i][0];
} else if (ref_flag & 0x2) {
m_lists[0][i].ref_poc = pp->FieldOrderCntList[i][1];
}
#if DEBUG_REF_LIST
mpp_log("i %d ref_pic_num %d lt_flag %d ref_flag %d ref_poc %d cur_poc %d\n",
i, m_lists[0][i].ref_picnum, m_lists[0][i].lt_flag, ref_flag,
m_lists[0][i].ref_poc, m_lists[0][i].cur_poc);
#endif
num_reorder = i + 1;
}
}
/*
* the value of num_reorder may be greater than num_refs,
* e.g. v: valid x: invalid
* num_refs = 3, num_reorder = 4
* the index 1 will be reorder to the end
* ┌─┬─┬─┬─┬─┬─┬─┐
* │0│1│2│3│.│.│F│
* ├─┼─┼─┼─┼─┼─┼─┤
* │v│x│v│v│x│x│x│
* └─┴─┴─┴─┴─┴─┴─┘
*/
memcpy(m_lists[1], m_lists[0], sizeof(m_lists[0]));
memcpy(m_lists[2], m_lists[0], sizeof(m_lists[0]));
qsort(m_lists[0], num_reorder, sizeof(m_lists[0][0]), compare_p);
qsort(m_lists[1], num_reorder, sizeof(m_lists[1][0]), compare_b0);
qsort(m_lists[2], num_reorder, sizeof(m_lists[2][0]), compare_b1);
if (num_refs > 1 && !p_hal->pp->field_pic_flag) {
if (!memcmp(m_lists[1], m_lists[2], sizeof(m_lists[1]))) {
MPP_SWAP(H264dRefsList_t, m_lists[2][0], m_lists[2][1]);
}
}
//!< list0 list1 listP
for (i = 0; i < 16; i++) {
set_refer_pic_list_p(p_regs, i, m_lists[0][i].idx);
set_refer_pic_list_b0(p_regs, i, m_lists[1][i].idx);
set_refer_pic_list_b1(p_regs, i, m_lists[2][i].idx);
}
#if DEBUG_REF_LIST
{
char file_name[128]; \
sprintf(file_name, "/sdcard/test/mpp2_RefPicList_log.txt"); \
FILE *fp = fopen(file_name, "ab"); \
char buf[1024];
RK_S32 buf_len = 0, buf_size = sizeof(buf) - 1;
// fwrite(buf, 1, size, fp);
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "frame_num %d field %d bottom %d\n",
pp->frame_num, pp->field_pic_flag, pp->CurrPic.AssociatedFlag);
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "list0 : ");
for (i = 0; i < 16; i++)
buf_len += snprintf(buf + buf_len, buf_size - buf_len, " %04d", m_lists[1][i]);
fprintf(fp, "%s\n", buf);
buf_len = 0;
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "list1 : ");
for (i = 0; i < 16; i++)
buf_len += snprintf(buf + buf_len, buf_size - buf_len, " %04d", m_lists[2][i]);
fprintf(fp, "%s\n", buf);
buf_len = 0;
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "listP : ");
for (i = 0; i < 16; i++)
buf_len += snprintf(buf + buf_len, buf_size - buf_len, " %04d", m_lists[0][i]);
fprintf(fp, "%s\n", buf);
fflush(fp); \
fclose(fp); \
}
#endif
return ret = MPP_OK;
}
static MPP_RET set_asic_regs(H264dHalCtx_t *p_hal, H264dVdpuRegs_t *p_regs)
{
RK_U32 i = 0, j = 0;
RK_U32 outPhyAddr = 0;
MppBuffer frame_buf = NULL;
MPP_RET ret = MPP_ERR_UNKNOW;
DXVA_PicParams_H264_MVC *pp = p_hal->pp;
DXVA_Slice_H264_Long *p_long = &p_hal->slice_long[0];
{
#if DEBUG_REF_LIST
char file_name[128]; \
sprintf(file_name, "/sdcard/test/mpp2_dpb_log.txt"); \
FILE *fp = fopen(file_name, "ab"); \
char buf[2048];
static RK_U32 num = 0;
RK_S32 buf_len = 0, buf_size = sizeof(buf) - 1;
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "cnt %d frame_num %d field %d bottom %d\n",
num++, pp->frame_num, pp->field_pic_flag, pp->CurrPic.AssociatedFlag);
#endif
for (i = 0, j = 0xff; i < MPP_ARRAY_ELEMS(pp->RefFrameList); i++) {
RK_U32 val = 0;
RK_U32 top_closer = 0;
RK_U32 field_flag = 0;
RK_S32 cur_poc = 0;
RK_U32 used_flag = 0;
if (pp->RefFrameList[i].bPicEntry != 0xff) {
mpp_buf_slot_get_prop(p_hal->frame_slots,
pp->RefFrameList[i].Index7Bits,
SLOT_BUFFER, &frame_buf); //!< reference phy addr
j = i;
#if DEBUG_REF_LIST
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "refPicList[%d], frame_num=%d, poc0=%d, poc1=%d\n",
i, pp->FrameNumList[i], pp->FieldOrderCntList[i][0], pp->FieldOrderCntList[i][1]);
#endif
} else {
mpp_buf_slot_get_prop(p_hal->frame_slots,
pp->CurrPic.Index7Bits,
SLOT_BUFFER, &frame_buf); //!< current out phy addr
}
field_flag = ((pp->RefPicFiledFlags >> i) & 0x1) ? 0x2 : 0;
cur_poc = pp->CurrPic.AssociatedFlag
? pp->CurrFieldOrderCnt[1] : pp->CurrFieldOrderCnt[0];
used_flag = ((pp->UsedForReferenceFlags >> (2 * i)) & 0x3);
if (used_flag & 0x3) {
top_closer = MPP_ABS(pp->FieldOrderCntList[i][0] - cur_poc) <
MPP_ABS(pp->FieldOrderCntList[i][1] - cur_poc) ? 0x1 : 0;
} else if (used_flag & 0x2) {
top_closer = 0;
} else if (used_flag & 0x1) {
top_closer = 1;
}
val = top_closer | field_flag;
if (val) {
mpp_dev_set_reg_offset(p_hal->dev, vdpu2_ref_idx[i], val);
#if DEBUG_REF_LIST
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "ref_offset[%d] %d\n",
i, val);
#endif
}
set_refer_pic_base_addr(p_regs, i, mpp_buffer_get_fd(frame_buf));
}
#if DEBUG_REF_LIST
fprintf(fp, "%s\n", buf);
fflush(fp);
fclose(fp);
#endif
}
/* inter-view reference picture */
{
H264dVdpuPriv_t *priv = (H264dVdpuPriv_t *)p_hal->priv;
if (pp->curr_layer_id && priv->ilt_dpb && priv->ilt_dpb->valid) {
mpp_buf_slot_get_prop(p_hal->frame_slots,
priv->ilt_dpb->slot_index,
SLOT_BUFFER, &frame_buf);
p_regs->sw99.ref15_st_addr = mpp_buffer_get_fd(frame_buf); //!< inter-view base, ref15
p_regs->sw108.refpic_valid_flag |= (pp->field_pic_flag
? 0x3 : 0x10000);
}
}
p_regs->sw50.dec_fixed_quant = pp->curr_layer_id; //!< VDPU_MVC_E
p_regs->sw50.dblk_flt_dis = 0; //!< filterDisable = 0;
mpp_buf_slot_get_prop(p_hal->frame_slots,
pp->CurrPic.Index7Bits,
SLOT_BUFFER, &frame_buf); //!< current out phy addr
outPhyAddr = mpp_buffer_get_fd(frame_buf);
if (pp->field_pic_flag && pp->CurrPic.AssociatedFlag) {
mpp_dev_set_reg_offset(p_hal->dev, 63, ((pp->wFrameWidthInMbsMinus1 + 1) * 16));
}
p_regs->sw63.dec_out_st_adr = outPhyAddr; //!< outPhyAddr, pp->CurrPic.Index7Bits
p_regs->sw110.flt_offset_cb_qp = pp->chroma_qp_index_offset;
p_regs->sw110.flt_offset_cr_qp = pp->second_chroma_qp_index_offset;
/* set default value for register[41] to avoid illegal translation fd */
{
RK_U32 dirMvOffset = 0;
RK_U32 picSizeInMbs = 0;
picSizeInMbs = p_hal->pp->wFrameWidthInMbsMinus1 + 1;
picSizeInMbs = picSizeInMbs
* (2 - pp->frame_mbs_only_flag) * (pp->wFrameHeightInMbsMinus1 + 1);
dirMvOffset = picSizeInMbs
* ((p_hal->pp->chroma_format_idc == 0) ? 256 : 384);
dirMvOffset += (pp->field_pic_flag && pp->CurrPic.AssociatedFlag)
? (picSizeInMbs * 32) : 0;
if (dirMvOffset) {
RK_U32 offset = mpp_get_ioctl_version() ? dirMvOffset : dirMvOffset >> 4;
mpp_dev_set_reg_offset(p_hal->dev, 62, offset);
}
p_regs->sw62.dmmv_st_adr = mpp_buffer_get_fd(frame_buf);
}
p_regs->sw57.dmmv_wr_en = (p_long->nal_ref_idc != 0) ? 1 : 0; //!< defalut set 1
p_regs->sw115.dlmv_method_en = pp->direct_8x8_inference_flag;
p_regs->sw115.weight_pred_en = pp->weighted_pred_flag;
p_regs->sw111.wp_bslice_sel = pp->weighted_bipred_idc;
p_regs->sw114.max_refidx1 = (pp->num_ref_idx_l1_active_minus1 + 1);
p_regs->sw115.fieldpic_flag_exist = (!pp->frame_mbs_only_flag) ? 1 : 0;
p_regs->sw57.curpic_code_sel = (!pp->frame_mbs_only_flag
&& (pp->MbaffFrameFlag || pp->field_pic_flag)) ? 1 : 0;
p_regs->sw57.curpic_stru_sel = pp->field_pic_flag;
p_regs->sw57.pic_decfield_sel = (!pp->CurrPic.AssociatedFlag) ? 1 : 0; //!< bottomFieldFlag
p_regs->sw57.sequ_mbaff_en = pp->MbaffFrameFlag;
p_regs->sw115.tranf_8x8_flag_en = pp->transform_8x8_mode_flag;
p_regs->sw115.monochr_en = (p_long->profileIdc >= 100
&& pp->chroma_format_idc == 0) ? 1 : 0;
p_regs->sw115.scl_matrix_en = pp->scaleing_list_enable_flag;
{
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
if (p_hal->pp->scaleing_list_enable_flag) {
RK_U32 temp = 0;
RK_U32 *ptr = (RK_U32 *)reg_ctx->sclst_ptr;
for (i = 0; i < 6; i++) {
for (j = 0; j < 4; j++) {
temp = (p_hal->qm->bScalingLists4x4[i][4 * j + 0] << 24) |
(p_hal->qm->bScalingLists4x4[i][4 * j + 1] << 16) |
(p_hal->qm->bScalingLists4x4[i][4 * j + 2] << 8) |
(p_hal->qm->bScalingLists4x4[i][4 * j + 3]);
*ptr++ = temp;
}
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 16; j++) {
temp = (p_hal->qm->bScalingLists8x8[i][4 * j + 0] << 24) |
(p_hal->qm->bScalingLists8x8[i][4 * j + 1] << 16) |
(p_hal->qm->bScalingLists8x8[i][4 * j + 2] << 8) |
(p_hal->qm->bScalingLists8x8[i][4 * j + 3]);
*ptr++ = temp;
}
}
}
p_regs->sw61.qtable_st_adr = mpp_buffer_get_fd(reg_ctx->buf);
}
p_regs->sw57.dec_wr_extmen_dis = 0; //!< set defalut 0
p_regs->sw57.addit_ch_fmt_wen = 0;
p_regs->sw57.dec_st_work = 1;
return ret = MPP_OK;
}
/*!
***********************************************************************
* \brief
* init VDPU granite decoder
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_init(void *hal, MppHalCfg *cfg)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
INP_CHECK(ret, NULL == hal);
MEM_CHECK(ret, p_hal->priv = mpp_calloc_size(void,
sizeof(H264dVdpuPriv_t)));
MEM_CHECK(ret, p_hal->reg_ctx = mpp_calloc_size(void, sizeof(H264dVdpuRegCtx_t)));
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
//!< malloc buffers
{
RK_U32 i = 0;
RK_U32 loop = p_hal->fast_mode ? MPP_ARRAY_ELEMS(reg_ctx->reg_buf) : 1;
RK_U32 buf_size = VDPU_CABAC_TAB_SIZE + VDPU_POC_BUF_SIZE + VDPU_SCALING_LIST_SIZE;
for (i = 0; i < loop; i++) {
FUN_CHECK(ret = mpp_buffer_get(p_hal->buf_group, ®_ctx->reg_buf[i].buf, buf_size));
reg_ctx->reg_buf[i].cabac_ptr = mpp_buffer_get_ptr(reg_ctx->reg_buf[i].buf);
reg_ctx->reg_buf[i].poc_ptr = reg_ctx->reg_buf[i].cabac_ptr + VDPU_CABAC_TAB_SIZE;
reg_ctx->reg_buf[i].sclst_ptr = reg_ctx->reg_buf[i].poc_ptr + VDPU_POC_BUF_SIZE;
reg_ctx->reg_buf[i].regs = mpp_calloc_size(void, sizeof(H264dVdpuRegs_t));
//!< copy cabac table bytes
memcpy(reg_ctx->reg_buf[i].cabac_ptr, (void *)vdpu_cabac_table, sizeof(vdpu_cabac_table));
}
}
if (!p_hal->fast_mode) {
reg_ctx->buf = reg_ctx->reg_buf[0].buf;
reg_ctx->cabac_ptr = reg_ctx->reg_buf[0].cabac_ptr;
reg_ctx->poc_ptr = reg_ctx->reg_buf[0].poc_ptr;
reg_ctx->sclst_ptr = reg_ctx->reg_buf[0].sclst_ptr;
reg_ctx->regs = reg_ctx->reg_buf[0].regs;
}
mpp_slots_set_prop(p_hal->frame_slots, SLOTS_HOR_ALIGN, vdpu_hor_align);
mpp_slots_set_prop(p_hal->frame_slots, SLOTS_VER_ALIGN, vdpu_ver_align);
{
// report hw_info to parser
const MppSocInfo *info = mpp_get_soc_info();
const void *hw_info = NULL;
RK_U32 i;
for (i = 0; i < MPP_ARRAY_ELEMS(info->dec_caps); i++) {
if (info->dec_caps[i] && info->dec_caps[i]->type == VPU_CLIENT_VDPU2) {
hw_info = info->dec_caps[i];
break;
}
}
mpp_assert(hw_info);
cfg->hw_info = hw_info;
}
__RETURN:
return MPP_OK;
__FAILED:
vdpu2_h264d_deinit(hal);
return ret;
}
/*!
***********************************************************************
* \brief
* deinit
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_deinit(void *hal)
{
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
RK_U32 i = 0;
RK_U32 loop = p_hal->fast_mode ? MPP_ARRAY_ELEMS(reg_ctx->reg_buf) : 1;
for (i = 0; i < loop; i++) {
MPP_FREE(reg_ctx->reg_buf[i].regs);
mpp_buffer_put(reg_ctx->reg_buf[i].buf);
}
MPP_FREE(p_hal->reg_ctx);
MPP_FREE(p_hal->priv);
return MPP_OK;
}
/*!
***********************************************************************
* \brief
* generate register
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_gen_regs(void *hal, HalTaskInfo *task)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dVdpuPriv_t *priv = NULL;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
INP_CHECK(ret, NULL == p_hal);
p_hal->in_task = &task->dec;
if (task->dec.flags.parse_err ||
task->dec.flags.ref_err) {
goto __RETURN;
}
priv = p_hal->priv;
priv->layed_id = p_hal->pp->curr_layer_id;
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
if (p_hal->fast_mode) {
RK_U32 i = 0;
for (i = 0; i < MPP_ARRAY_ELEMS(reg_ctx->reg_buf); i++) {
if (!reg_ctx->reg_buf[i].valid) {
task->dec.reg_index = i;
reg_ctx->buf = reg_ctx->reg_buf[i].buf;
reg_ctx->cabac_ptr = reg_ctx->reg_buf[i].cabac_ptr;
reg_ctx->poc_ptr = reg_ctx->reg_buf[i].poc_ptr;
reg_ctx->sclst_ptr = reg_ctx->reg_buf[i].sclst_ptr;
reg_ctx->regs = reg_ctx->reg_buf[i].regs;
reg_ctx->reg_buf[i].valid = 1;
break;
}
}
}
FUN_CHECK(ret = adjust_input(priv, &p_hal->slice_long[0], p_hal->pp));
FUN_CHECK(ret = set_device_regs(p_hal, (H264dVdpuRegs_t *)reg_ctx->regs));
FUN_CHECK(ret = set_pic_regs(p_hal, (H264dVdpuRegs_t *)reg_ctx->regs));
FUN_CHECK(ret = set_vlc_regs(p_hal, (H264dVdpuRegs_t *)reg_ctx->regs));
FUN_CHECK(ret = set_ref_regs(p_hal, (H264dVdpuRegs_t *)reg_ctx->regs));
FUN_CHECK(ret = set_asic_regs(p_hal, (H264dVdpuRegs_t *)reg_ctx->regs));
__RETURN:
return ret = MPP_OK;
__FAILED:
return ret;
}
/*!
***********************************************************************
* \brief h
* start hard
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_start(void *hal, HalTaskInfo *task)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
H264dVdpuRegs_t *p_regs = p_hal->fast_mode ?
(H264dVdpuRegs_t *)reg_ctx->reg_buf[task->dec.reg_index].regs :
(H264dVdpuRegs_t *)reg_ctx->regs;
RK_U32 w = p_regs->sw110.pic_mb_w * 16;
RK_U32 h = p_regs->sw110.pic_mb_h * 16;
RK_U32 cache_en = 1;
const char *soc_name = NULL;
if (task->dec.flags.parse_err ||
task->dec.flags.ref_err) {
goto __RETURN;
}
soc_name = mpp_get_soc_name();
if (strstr(soc_name, "rk3326") || strstr(soc_name, "px30") || strstr(soc_name, "rk3228H"))
cache_en = ((w * h) >= (1280 * 720)) ? 1 : 0;
p_regs->sw57.cache_en = cache_en;
p_regs->sw57.pref_sigchan = 1;
p_regs->sw56.bus_pos_sel = 1;
p_regs->sw57.intra_dbl3t = 1;
p_regs->sw57.inter_dblspeed = 1;
p_regs->sw57.intra_dblspeed = 1;
#if DEBUG_REF_LIST
{
char file_name[128];
sprintf(file_name, "/sdcard/test/mpp2_reg_dump_log.txt");
FILE *fp = fopen(file_name, "ab");
char buf[2048];
RK_S32 buf_len = 0, buf_size = sizeof(buf) - 1;
RK_U32 *reg_tmp = (RK_U32*)reg_ctx->regs;
RK_U32 i;
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "=== reg dump fram_num %d ===\n",
p_hal->pp->frame_num);
for (i = 50; i < 116; i++) {
buf_len += snprintf(buf + buf_len, buf_size - buf_len, "Regs[%d] = 0x%08x\n", i, reg_tmp[i]);
}
fprintf(fp, "%s", buf);
fflush(fp);
fclose(fp);
}
#endif
do {
MppDevRegWrCfg wr_cfg;
MppDevRegRdCfg rd_cfg;
RK_U32 reg_size = DEC_VDPU_REGISTERS * sizeof(RK_U32);
wr_cfg.reg = reg_ctx->regs;
wr_cfg.size = reg_size;
wr_cfg.offset = 0;
ret = mpp_dev_ioctl(p_hal->dev, MPP_DEV_REG_WR, &wr_cfg);
if (ret) {
mpp_err_f("set register write failed %d\n", ret);
break;
}
rd_cfg.reg = reg_ctx->regs;
rd_cfg.size = reg_size;
rd_cfg.offset = 0;
ret = mpp_dev_ioctl(p_hal->dev, MPP_DEV_REG_RD, &rd_cfg);
if (ret) {
mpp_err_f("set register read failed %d\n", ret);
break;
}
ret = mpp_dev_ioctl(p_hal->dev, MPP_DEV_CMD_SEND, NULL);
if (ret) {
mpp_err_f("send cmd failed %d\n", ret);
break;
}
} while (0);
__RETURN:
(void)task;
return ret = MPP_OK;
}
/*!
***********************************************************************
* \brief
* wait hard
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_wait(void *hal, HalTaskInfo *task)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
H264dVdpuRegCtx_t *reg_ctx = (H264dVdpuRegCtx_t *)p_hal->reg_ctx;
H264dVdpuRegs_t *p_regs = (H264dVdpuRegs_t *)(p_hal->fast_mode ?
reg_ctx->reg_buf[task->dec.reg_index].regs :
reg_ctx->regs);
if (task->dec.flags.parse_err ||
task->dec.flags.ref_err) {
goto __SKIP_HARD;
}
ret = mpp_dev_ioctl(p_hal->dev, MPP_DEV_CMD_POLL, NULL);
if (ret)
mpp_err_f("poll cmd failed %d\n", ret);
__SKIP_HARD:
if (p_hal->dec_cb) {
DecCbHalDone param;
param.task = (void *)&task->dec;
param.regs = (RK_U32 *)reg_ctx->regs;
param.hard_err = !p_regs->sw55.dec_rdy_sts;
mpp_callback(p_hal->dec_cb, ¶m);
}
memset(&p_regs->sw55, 0, sizeof(RK_U32));
if (p_hal->fast_mode) {
reg_ctx->reg_buf[task->dec.reg_index].valid = 0;
}
(void)task;
return ret = MPP_OK;
}
/*!
***********************************************************************
* \brief
* reset
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_reset(void *hal)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
INP_CHECK(ret, NULL == p_hal);
memset(p_hal->priv, 0, sizeof(H264dVdpuPriv_t));
__RETURN:
return ret = MPP_OK;
}
/*!
***********************************************************************
* \brief
* flush
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_flush(void *hal)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
INP_CHECK(ret, NULL == p_hal);
__RETURN:
return ret = MPP_OK;
}
/*!
***********************************************************************
* \brief
* control
***********************************************************************
*/
//extern "C"
MPP_RET vdpu2_h264d_control(void *hal, MpiCmd cmd_type, void *param)
{
MPP_RET ret = MPP_ERR_UNKNOW;
H264dHalCtx_t *p_hal = (H264dHalCtx_t *)hal;
INP_CHECK(ret, NULL == p_hal);
(void)hal;
(void)cmd_type;
(void)param;
__RETURN:
return ret = MPP_OK;
}
| 21,088 |
945 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cluster.log.manage;
import org.apache.iotdb.cluster.log.Log;
import org.apache.iotdb.cluster.log.Snapshot;
import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog;
import org.apache.iotdb.cluster.log.snapshot.SimpleSnapshot;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class UnCommittedEntryManagerTest {
static class UnCommitEntryManagerTesterBase {
public List<Log> entries;
public long offset;
public UnCommitEntryManagerTesterBase(List<Log> entries, long offset) {
this.entries = entries;
this.offset = offset;
}
}
@Test
public void getFirstUnCommittedIndex() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public long testOffset;
public UnCommittedEntryManagerTester(List<Log> entries, long offset, long testOffset) {
super(entries, offset);
this.testOffset = testOffset;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
}
},
0,
0));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
}
},
5,
5));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(6, 1));
}
},
5,
5));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
long index = instance.getFirstUnCommittedIndex();
assertEquals(test.testOffset, index);
}
}
@Test
public void maybeLastIndex() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public long testIndex;
public UnCommittedEntryManagerTester(List<Log> entries, long offset, long testIndex) {
super(entries, offset);
this.testIndex = testIndex;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
}
},
0,
-1));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
5));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
}
},
5,
6));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
long index = instance.maybeLastIndex();
assertEquals(test.testIndex, index);
}
}
@Test
public void maybeTerm() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public long index;
public long testTerm;
public Class throwClass;
public UnCommittedEntryManagerTester(
List<Log> entries, long offset, long index, long testTerm, Class throwClass) {
super(entries, offset);
this.index = index;
this.testTerm = testTerm;
this.throwClass = throwClass;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
5,
1,
null));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
4,
-1,
null));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 4));
}
},
5,
5,
1,
null));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 4));
}
},
5,
6,
4,
null));
// entries that have been committed;
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 4));
}
},
5,
4,
-1,
null));
// entries which are unavailable.
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
}
},
0,
0,
-1,
null));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
6,
-1,
null));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
try {
long term = instance.maybeTerm(test.index);
if (test.throwClass != null) {
fail("The expected exception is not thrown");
} else {
assertEquals(test.testTerm, term);
}
} catch (Exception e) {
if (!e.getClass().getName().equals(test.throwClass.getName())) {
fail("An unexpected exception was thrown.");
}
}
}
}
@Test
public void stableTo() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public long index;
public long testOffset;
public long testLen;
public UnCommittedEntryManagerTester(
List<Log> entries, long offset, long index, long testOffset, long testLen) {
super(entries, offset);
this.index = index;
this.testOffset = testOffset;
this.testLen = testLen;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
// stable to the first entry
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
5,
6,
0));
// stable to the first entry
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
}
},
5,
5,
6,
1));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
instance.stableTo(test.index);
assertEquals(test.testOffset, instance.getFirstUnCommittedIndex());
assertEquals(test.testLen, instance.getAllEntries().size());
}
}
@Test
public void applyingSnapshot() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public Snapshot snapshot;
public long testOffset;
public UnCommittedEntryManagerTester(
List<Log> entries, long offset, Snapshot snapshot, long testOffset) {
super(entries, offset);
this.snapshot = snapshot;
this.testOffset = testOffset;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
// empty entries
add(
new UnCommittedEntryManagerTester(
new ArrayList<>(), 5, new SimpleSnapshot(6, 6), 7));
// normal case
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new SimpleSnapshot(20, 20),
21));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
instance.applyingSnapshot(test.snapshot);
assertEquals(test.testOffset, instance.getFirstUnCommittedIndex());
assertEquals(0, instance.getAllEntries().size());
}
}
@Test
public void truncateAndAppendSingle() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public Log toAppend;
public long testOffset;
public List<Log> testEntries;
public UnCommittedEntryManagerTester(
List<Log> entries, long offset, Log toAppend, long testOffset, List<Log> testEntries) {
super(entries, offset);
this.toAppend = toAppend;
this.testOffset = testOffset;
this.testEntries = testEntries;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
// append to the end
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new EmptyContentLog(6, 1),
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
}
}));
// replace the uncommitted entries
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new EmptyContentLog(5, 2),
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 2));
}
}));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new EmptyContentLog(4, 2),
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
}));
// truncate the existing entries and append
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
},
5,
new EmptyContentLog(6, 2),
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 2));
}
}));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
},
5,
new EmptyContentLog(7, 2),
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 2));
}
}));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
instance.truncateAndAppend(test.toAppend);
assertEquals(test.testOffset, instance.getFirstUnCommittedIndex());
assertEquals(test.testEntries, instance.getAllEntries());
}
}
@Test
public void truncateAndAppendBatch() {
class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase {
public List<Log> toAppend;
public long testOffset;
public List<Log> testEntries;
public UnCommittedEntryManagerTester(
List<Log> entries,
long offset,
List<Log> toAppend,
long testOffset,
List<Log> testEntries) {
super(entries, offset);
this.toAppend = toAppend;
this.testOffset = testOffset;
this.testEntries = testEntries;
}
}
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
// append to the end
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
}));
// replace the uncommitted entries
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 2));
add(new EmptyContentLog(6, 2));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 2));
add(new EmptyContentLog(6, 2));
}
}));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(4, 2));
add(new EmptyContentLog(5, 2));
add(new EmptyContentLog(6, 2));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
}
}));
// truncate the existing entries and append
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(6, 2));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 2));
}
}));
add(
new UnCommittedEntryManagerTester(
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 1));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(7, 2));
add(new EmptyContentLog(8, 2));
}
},
5,
new ArrayList<Log>() {
{
add(new EmptyContentLog(5, 1));
add(new EmptyContentLog(6, 1));
add(new EmptyContentLog(7, 2));
add(new EmptyContentLog(8, 2));
}
}));
}
};
for (UnCommittedEntryManagerTester test : tests) {
UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries);
instance.truncateAndAppend(test.toAppend);
assertEquals(test.testOffset, instance.getFirstUnCommittedIndex());
assertEquals(test.testEntries, instance.getAllEntries());
}
}
@Test
public void getEntries() {
class UnCommittedEntryManagerTester {
public long low;
public long high;
public List<Log> testEntries;
public UnCommittedEntryManagerTester(long low, long high, List<Log> testEntries) {
this.low = low;
this.high = high;
this.testEntries = testEntries;
}
}
long offset = 100;
long num = 100;
long last = offset + num;
List<Log> entries = new ArrayList<>();
for (int i = 0; i < num; i++) {
entries.add(new EmptyContentLog(offset + i, offset + i));
}
UnCommittedEntryManager instance = new UnCommittedEntryManager(offset, entries);
List<UnCommittedEntryManagerTester> tests =
new ArrayList<UnCommittedEntryManagerTester>() {
{
add(new UnCommittedEntryManagerTester(offset, offset + num, entries));
add(
new UnCommittedEntryManagerTester(
offset - 1,
offset + 1,
new ArrayList<Log>() {
{
add(new EmptyContentLog(offset, offset));
}
}));
add(
new UnCommittedEntryManagerTester(
offset,
offset + 1,
new ArrayList<Log>() {
{
add(new EmptyContentLog(offset, offset));
}
}));
add(
new UnCommittedEntryManagerTester(
last - 1,
last,
new ArrayList<Log>() {
{
add(new EmptyContentLog(last - 1, last - 1));
}
}));
add(
new UnCommittedEntryManagerTester(
last - 1,
last + 1,
new ArrayList<Log>() {
{
add(new EmptyContentLog(last - 1, last - 1));
}
}));
add(new UnCommittedEntryManagerTester(offset, offset, new ArrayList<>()));
add(new UnCommittedEntryManagerTester(last, last + 1, new ArrayList<>()));
add(new UnCommittedEntryManagerTester(last + 1, last + 1, new ArrayList<>()));
}
};
for (UnCommittedEntryManagerTester test : tests) {
List<Log> answer = instance.getEntries(test.low, test.high);
assertEquals(test.testEntries, answer);
}
}
}
| 13,716 |
1,285 | <reponame>PartyPackage/Minestom
package net.minestom.server.adventure;
import java.io.StringReader;
import net.kyori.adventure.util.Codec;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Locale;
import java.util.Objects;
import org.jglrxavpok.hephaistos.nbt.NBT;
import org.jglrxavpok.hephaistos.nbt.NBTException;
import org.jglrxavpok.hephaistos.nbt.SNBTParser;
/**
* Adventure related constants, etc.
*/
public final class MinestomAdventure {
/**
* A codec to convert between strings and NBT.
*/
public static final Codec<NBT, String, NBTException, RuntimeException> NBT_CODEC
= Codec.of(encoded -> new SNBTParser(new StringReader(encoded)).parse(), NBT::toSNBT);
/**
* If components should be automatically translated in outgoing packets.
*/
public static boolean AUTOMATIC_COMPONENT_TRANSLATION = false;
static final Localizable NULL_LOCALIZABLE = () -> null;
private static Locale defaultLocale = Locale.getDefault();
private MinestomAdventure() {
}
/**
* Gets the default locale used to translate components when no overriding locale has been provided.
*
* @return the default locale
*/
public static @NotNull Locale getDefaultLocale() {
return defaultLocale;
}
/**
* Sets the default locale used to translate components when no overriding locale has been provided.
*
* @param defaultLocale the new default, or {@code null} to return to {@link Locale#getDefault()}
*/
public static void setDefaultLocale(@Nullable Locale defaultLocale) {
MinestomAdventure.defaultLocale = Objects.requireNonNullElseGet(defaultLocale, Locale::getDefault);
}
}
| 612 |
319 | /**
* Copyright (c) 2011, The University of Southampton and the individual contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the University of Southampton nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openimaj.hadoop.tools.twitter.token.outputmode.jacard;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringReader;
import java.util.Scanner;
import org.openimaj.io.ReadWriteableASCII;
import com.Ostermiller.util.CSVParser;
import com.Ostermiller.util.CSVPrinter;
/**
* An index encoding the difference between two sets
*
* @author <NAME> (<EMAIL>)
*
*/
public class JacardIndex implements ReadWriteableASCII {
/**
* The number of words forming the intersection between now and historic
* words
*/
public long intersection;
/**
* The number of words forming the union between now and historic words
*/
public long union;
/**
* current time period
*/
public long time;
/**
* The jacard index is: J(A,B) = |intersection(A,B)| / |union(A,B)| for this
* time period
*/
public double jacardIndex;
/**
* @param time
* @param intersection
* @param union
*/
public JacardIndex(long time, long intersection, long union) {
this.time = time;
this.intersection = intersection;
this.union = union;
this.jacardIndex = (double) intersection / (double) union;
}
private JacardIndex() {
}
@Override
public void readASCII(Scanner in) throws IOException {
fromString(in.nextLine(), this);
}
private static void fromString(String nextLine, JacardIndex i) throws IOException {
final StringReader reader = new StringReader(nextLine);
final CSVParser csvreader = new CSVParser(reader);
final String[] line = csvreader.getLine();
i.time = Long.parseLong(line[0]);
i.intersection = Long.parseLong(line[1]);
i.union = Long.parseLong(line[2]);
i.jacardIndex = (double) i.intersection / (double) i.union;
}
@Override
public String asciiHeader() {
return "";
}
@Override
public void writeASCII(PrintWriter out) throws IOException {
final CSVPrinter writer = new CSVPrinter(out);
writer.write(new String[] {
"" + this.time,
"" + intersection,
"" + union
});
}
/**
* Read a new jacard index from a comma separated line
*
* @param next
* @return new JacardIndex
* @throws IOException
*/
public static JacardIndex fromString(String next) throws IOException {
final JacardIndex ind = new JacardIndex();
fromString(next, ind);
return ind;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof JacardIndex))
return false;
final JacardIndex that = (JacardIndex) other;
return that.intersection == this.intersection && that.union == this.union;
}
}
| 1,296 |
352 | <filename>tvaultapi/src/main/java/com/tmobile/cso/vault/api/process/Response.java
// =========================================================================
// Copyright 2019 T-Mobile, US
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// See the readme.txt file for additional language around disclaimer of warranties.
// =========================================================================
package com.tmobile.cso.vault.api.process;
import java.util.List;
import org.springframework.http.HttpStatus;
public class Response {
boolean success;
String response;
List<String> adminPolicies;
public void setSuccess(boolean success) {
this.success = success;
}
public void setResponse(String response) {
this.response = response;
}
public void setHttpstatus(HttpStatus httpstatus) {
this.httpstatus = httpstatus;
}
HttpStatus httpstatus;
public HttpStatus getHttpstatus() {
return httpstatus;
}
public boolean isSuccess() {
return success;
}
public String getResponse() {
if (response != null) {
return response;
}
else {
return "{} ";
}
}
/**
* @return the adminPolicies
*/
public List<String> getAdminPolicies() {
return adminPolicies;
}
/**
* @param adminPolicies the adminPolicies to set
*/
public void setAdminPolicies(List<String> adminPolicies) {
this.adminPolicies = adminPolicies;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "Response [success=" + success + ", response=" + response + ", adminPolicies=" + adminPolicies
+ ", httpstatus=" + httpstatus + "]";
}
}
| 645 |
14,668 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_SIGNIN_SIGNIN_PROMO_H_
#define CHROME_BROWSER_SIGNIN_SIGNIN_PROMO_H_
#include <string>
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
#include "components/signin/public/base/signin_metrics.h"
class GURL;
namespace content {
class BrowserContext;
class StoragePartition;
} // namespace content
namespace user_prefs {
class PrefRegistrySyncable;
}
// Utility functions for sign in promos.
namespace signin {
extern const char kSignInPromoQueryKeyAccessPoint[];
// TODO(https://crbug.com/1205147): Auto close is unused. Remove it.
extern const char kSignInPromoQueryKeyAutoClose[];
extern const char kSignInPromoQueryKeyForceKeepData[];
extern const char kSignInPromoQueryKeyReason[];
#if !BUILDFLAG(IS_CHROMEOS_ASH)
// These functions are only used to unlock the profile from the desktop user
// manager and the windows credential provider.
// Returns the sign in promo URL that can be used in a modal dialog with
// the given arguments in the query.
// |access_point| indicates where the sign in is being initiated.
// |reason| indicates the purpose of using this URL.
// |auto_close| whether to close the sign in promo automatically when done.
GURL GetEmbeddedPromoURL(signin_metrics::AccessPoint access_point,
signin_metrics::Reason reason,
bool auto_close);
// Returns a sign in promo URL specifically for reauthenticating |email| that
// can be used in a modal dialog.
GURL GetEmbeddedReauthURLWithEmail(signin_metrics::AccessPoint access_point,
signin_metrics::Reason reason,
const std::string& email);
#endif // !BUILDFLAG(IS_CHROMEOS_ASH)
// Returns the URL to be used to signin and turn on Sync when DICE is enabled.
// If email is not empty, then it will pass email as hint to the page so that it
// will be autofilled by Gaia.
// If |continue_url| is empty, this may redirect to myaccount.
GURL GetChromeSyncURLForDice(const std::string& email,
const std::string& continue_url);
// Returns the URL to be used to add (secondary) account when DICE is enabled.
// If email is not empty, then it will pass email as hint to the page so that it
// will be autofilled by Gaia.
// If |continue_url| is empty, this may redirect to myaccount.
GURL GetAddAccountURLForDice(const std::string& email,
const std::string& continue_url);
// Gets the partition for the embedded sign in frame/webview.
content::StoragePartition* GetSigninPartition(
content::BrowserContext* browser_context);
// Gets the access point from the query portion of the sign in promo URL.
signin_metrics::AccessPoint GetAccessPointForEmbeddedPromoURL(const GURL& url);
// Gets the sign in reason from the query portion of the sign in promo URL.
signin_metrics::Reason GetSigninReasonForEmbeddedPromoURL(const GURL& url);
// Registers the preferences the Sign In Promo needs.
void RegisterProfilePrefs(user_prefs::PrefRegistrySyncable* registry);
} // namespace signin
#endif // CHROME_BROWSER_SIGNIN_SIGNIN_PROMO_H_
| 1,107 |
575 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file SampleRejectedStatus.hpp
*/
#ifndef _FASTDDS_DDS_QOS_SAMPLEREJECTEDSTATUS_HPP_
#define _FASTDDS_DDS_QOS_SAMPLEREJECTEDSTATUS_HPP_
#include <cstdint>
#include <fastdds/dds/topic/TypeSupport.hpp>
namespace eprosima {
namespace fastdds {
namespace dds {
//! An enum with the possible values for the sample rejected reason
enum SampleRejectedStatusKind
{
//!Default value
NOT_REJECTED,
//! Exceeds the max_instance limit
REJECTED_BY_INSTANCES_LIMIT,
//! Exceeds the max_samples limit
REJECTED_BY_SAMPLES_LIMIT,
//! Exceeds the max_samples_per_instance limit
REJECTED_BY_SAMPLES_PER_INSTANCE_LIMIT
};
//! @brief A struct storing the sample lost status
struct SampleRejectedStatus
{
/**
* Total cumulative count of samples rejected by the DataReader.
*/
uint32_t total_count = 0;
/**
* The incremental number of samples rejected since the last time the listener was called or the status was read.
*/
uint32_t total_count_change = 0;
/**
* Reason for rejecting the last sample rejected.
* If no samples have been rejected, the reason is the special value NOT_REJECTED.
*/
SampleRejectedStatusKind last_reason = NOT_REJECTED;
/**
* Handle to the instance being updated by the last sample that was rejected.
*/
InstanceHandle_t last_instance_handle;
};
} //namespace dds
} //namespace fastdds
} //namespace eprosima
#endif // _FASTDDS_DDS_QOS_SAMPLEREJECTEDSTATUS_HPP_
| 722 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace TracePoints
{
class ReaderWriterLockSlim : private DenyCopy
{
public:
ReaderWriterLockSlim()
: lock_()
{
InitializeSRWLock(&lock_);
}
~ReaderWriterLockSlim()
{
}
void AcquireShared()
{
AcquireSRWLockShared(&lock_);
}
void ReleaseShared()
{
ReleaseSRWLockShared(&lock_);
}
void AcquireExclusive()
{
AcquireSRWLockExclusive(&lock_);
}
void ReleaseExclusive()
{
ReleaseSRWLockExclusive(&lock_);
}
private:
SRWLOCK lock_;
};
}
| 432 |
1,538 | /* ______ ___ ___
* /\ _ \ /\_ \ /\_ \
* \ \ \L\ \\//\ \ \//\ \ __ __ _ __ ___
* \ \ __ \ \ \ \ \ \ \ /'__`\ /'_ `\/\`'__\/ __`\
* \ \ \/\ \ \_\ \_ \_\ \_/\ __//\ \L\ \ \ \//\ \L\ \
* \ \_\ \_\/\____\/\____\ \____\ \____ \ \_\\ \____/
* \/_/\/_/\/____/\/____/\/____/\/___L\ \/_/ \/___/
* /\____/
* \_/__/
*
* Memory management routines.
*
* By <NAME>.
*
* See readme.txt for copyright information.
*/
#include "allegro5/allegro.h"
/* globals */
static ALLEGRO_MEMORY_INTERFACE *mem = NULL;
/* Function: al_set_memory_interface
*/
void al_set_memory_interface(ALLEGRO_MEMORY_INTERFACE *memory_interface)
{
mem = memory_interface;
}
/* Function: al_malloc_with_context
*/
void *al_malloc_with_context(size_t n,
int line, const char *file, const char *func)
{
if (mem)
return mem->mi_malloc(n, line, file, func);
else
return malloc(n);
}
/* Function: al_free_with_context
*/
void al_free_with_context(void *ptr,
int line, const char *file, const char *func)
{
if (mem)
mem->mi_free(ptr, line, file, func);
else
free(ptr);
}
/* Function: al_realloc_with_context
*/
void *al_realloc_with_context(void *ptr, size_t n,
int line, const char *file, const char *func)
{
if (mem)
return mem->mi_realloc(ptr, n, line, file, func);
else
return realloc(ptr, n);
}
/* Function: al_calloc_with_context
*/
void *al_calloc_with_context(size_t count, size_t n,
int line, const char *file, const char *func)
{
if (mem)
return mem->mi_calloc(count, n, line, file, func);
else
return calloc(count, n);
}
/* vim: set ts=8 sts=3 sw=3 et: */
| 904 |
5,169 | {
"name": "YLPNeon",
"version": "1.0.0",
"summary": "Harness the power of Frame with a simplified, chainable and expressive syntax. Supporting iOS Layout!",
"homepage": "https://github.com/Jofranks/YLPNeon",
"license": "MIT",
"authors": {
"Peter": "<EMAIL>"
},
"platforms": {
"ios": "5.0"
},
"source": {
"git": "https://github.com/Jofranks/YLPNeon.git",
"tag": "1.0.0"
},
"source_files": "YLPNeon/YLPNeon.h",
"frameworks": "UIKit",
"requires_arc": true
}
| 216 |
23,220 | package com.alibaba.otter.canal.client.adapter.support;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 类型转换工具类
*
* @author rewerma 2018-8-19 下午06:14:23
* @version 1.0.0
*/
public class JdbcTypeUtil {
private static Logger logger = LoggerFactory.getLogger(JdbcTypeUtil.class);
public static Object getRSData(ResultSet rs, String columnName, int jdbcType) throws SQLException {
if (jdbcType == Types.BIT || jdbcType == Types.BOOLEAN) {
return rs.getByte(columnName);
} else {
return rs.getObject(columnName);
}
}
public static Class<?> jdbcType2javaType(int jdbcType) {
switch (jdbcType) {
case Types.BIT:
case Types.BOOLEAN:
// return Boolean.class;
case Types.TINYINT:
return Byte.TYPE;
case Types.SMALLINT:
return Short.class;
case Types.INTEGER:
return Integer.class;
case Types.BIGINT:
return Long.class;
case Types.DECIMAL:
case Types.NUMERIC:
return BigDecimal.class;
case Types.REAL:
return Float.class;
case Types.FLOAT:
case Types.DOUBLE:
return Double.class;
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
return String.class;
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
case Types.BLOB:
return byte[].class;
case Types.DATE:
return java.sql.Date.class;
case Types.TIME:
return Time.class;
case Types.TIMESTAMP:
return Timestamp.class;
default:
return String.class;
}
}
private static boolean isText(String columnType) {
return "LONGTEXT".equalsIgnoreCase(columnType) || "MEDIUMTEXT".equalsIgnoreCase(columnType)
|| "TEXT".equalsIgnoreCase(columnType) || "TINYTEXT".equalsIgnoreCase(columnType);
}
public static Object typeConvert(String tableName ,String columnName, String value, int sqlType, String mysqlType) {
if (value == null
|| (value.equals("") && !(isText(mysqlType) || sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR))) {
return null;
}
try {
Object res;
switch (sqlType) {
case Types.INTEGER:
res = Integer.parseInt(value);
break;
case Types.SMALLINT:
res = Short.parseShort(value);
break;
case Types.BIT:
case Types.TINYINT:
res = Byte.parseByte(value);
break;
case Types.BIGINT:
if (mysqlType.startsWith("bigint") && mysqlType.endsWith("unsigned")) {
res = new BigInteger(value);
} else {
res = Long.parseLong(value);
}
break;
// case Types.BIT:
case Types.BOOLEAN:
res = !"0".equals(value);
break;
case Types.DOUBLE:
case Types.FLOAT:
res = Double.parseDouble(value);
break;
case Types.REAL:
res = Float.parseFloat(value);
break;
case Types.DECIMAL:
case Types.NUMERIC:
res = new BigDecimal(value);
break;
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
case Types.BLOB:
res = value.getBytes("ISO-8859-1");
break;
case Types.DATE:
if (!value.startsWith("0000-00-00")) {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Date(date.getTime());
} else {
res = null;
}
} else {
res = null;
}
break;
case Types.TIME: {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Time(date.getTime());
} else {
res = null;
}
break;
}
case Types.TIMESTAMP:
if (!value.startsWith("0000-00-00")) {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Timestamp(date.getTime());
} else {
res = null;
}
} else {
res = null;
}
break;
case Types.CLOB:
default:
res = value;
break;
}
return res;
} catch (Exception e) {
logger.error("table: {} column: {}, failed convert type {} to {}", tableName, columnName, value, sqlType);
return value;
}
}
}
| 3,465 |
1,252 | /*
Copyright (C) 2010 <NAME> <<EMAIL>>
Permission to use, copy, modify, and distribute this software
and its documentation for any purpose and without fee is hereby
granted, provided that the above copyright notice appear in all
copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
#include <QImage>
#include <QMutex>
#include <QSize>
#include <QThread>
#include <QWaitCondition>
#include <complex>
enum MandelImpl {
VcImpl, ScalarImpl
};
class MandelBase : public QThread
{
Q_OBJECT
public:
void brot(const QSize &size, float x, float y, float scale);
protected:
MandelBase(QObject* _parent = 0);
~MandelBase();
void emitImage(const QImage &image, quint64 cycles) { emit ready(image, cycles); }
void run();
virtual void mandelMe(QImage &image, float x, float y, float scale, int maxIterations) = 0;
inline bool restart() const { return m_restart; }
signals:
void ready(const QImage &image, quint64 cycles);
private:
QMutex m_mutex;
QWaitCondition m_wait;
QSize m_size;
float m_x, m_y, m_scale;
bool m_restart;
bool m_abort;
};
template<MandelImpl Impl>
class Mandel : public MandelBase
{
public:
Mandel(QObject *_parent = 0);
protected:
void mandelMe(QImage &image, float x, float y, float scale, int maxIterations);
};
| 717 |
2,360 | <filename>var/spack/repos/builtin/packages/r-stringfish/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStringfish(RPackage):
"""Alt String Implementation
Provides an extendable, performant and multithreaded 'alt-string'
implementation backed by 'C++' vectors and strings."""
homepage = "https://github.com/traversc/stringfish"
url = "https://cloud.r-project.org/src/contrib/stringfish_0.14.2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/stringfish"
maintainers = ['dorton21']
version('0.14.2', sha256='9373cfc715cda1527fd20179435977b8e59e19d8c5ef82a31e519f93fb624ced')
depends_on('gmake', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rcppparallel', type=('build', 'run'))
| 397 |
694 | <reponame>akluth/io<gh_stars>100-1000
// metadoc Collector copyright <NAME>, 2006
// metadoc Collector license BSD revised
// metadoc Collector description A tricolor collector using a Baker treadmill.
#ifndef Collector_DEFINED
#define Collector_DEFINED 1
#include "CollectorMarker.h"
#ifdef __cplusplus
extern "C" {
#endif
#define COLLECTOR_FOREACH(self, v, code) \
COLLECTMARKER_FOREACH(self->whites, v, code;); \
COLLECTMARKER_FOREACH(self->grays, v, code;); \
COLLECTMARKER_FOREACH(self->blacks, v, code;);
//#define COLLECTOR_RECYCLE_FREED 1
#define COLLECTOR_USE_NONINCREMENTAL_MARK_SWEEP 1
typedef enum {
COLLECTOR_INITIAL_WHITE,
COLLECTOR_GRAY,
COLLECTOR_INITIAL_BLACK,
COLLECTOR_FREE
} CollectorColor;
typedef int(CollectorMarkFunc)(void *);
typedef void(CollectorWillFreeFunc)(void *);
typedef void(CollectorFreeFunc)(void *);
typedef int(CollectorCheckFunc)(void *);
typedef struct {
List *retainedValues;
void *markBeforeSweepValue;
int pauseCount;
CollectorMarker *blacks;
CollectorMarker *grays;
CollectorMarker *whites;
CollectorMarker *freed;
float marksPerAlloc;
float queuedMarks;
size_t allocated;
size_t allocatedSweepLevel;
float allocatedStep;
CollectorMarkFunc *markFunc;
CollectorWillFreeFunc *willFreeFunc;
CollectorFreeFunc *freeFunc;
long clocksUsed;
size_t sweepCount;
int debugOn;
int safeMode;
#ifdef COLLECTOR_USE_NONINCREMENTAL_MARK_SWEEP
int newMarkerCount;
int allocsPerSweep;
#endif
} Collector;
COLLECTOR_API Collector *Collector_new(void);
COLLECTOR_API void Collector_free(Collector *self);
COLLECTOR_API void Collector_check(Collector *self);
COLLECTOR_API void Collector_checkObjectPointers(
Collector *self); // if not 0, then memory is hosed
COLLECTOR_API void Collector_checkObjectsWith_(Collector *self,
CollectorCheckFunc *func);
COLLECTOR_API void Collector_setMarkBeforeSweepValue_(Collector *self, void *v);
// callbacks
COLLECTOR_API void Collector_setMarkFunc_(Collector *self,
CollectorMarkFunc *func);
COLLECTOR_API void Collector_setWillFreeFunc_(Collector *self,
CollectorWillFreeFunc *func);
COLLECTOR_API void Collector_setFreeFunc_(Collector *self,
CollectorFreeFunc *func);
// marks per alloc
COLLECTOR_API void Collector_setMarksPerAlloc_(Collector *self, float n);
COLLECTOR_API float Collector_marksPerAlloc(Collector *self);
// marks per sweep
COLLECTOR_API void Collector_setAllocatedStep_(Collector *self, float n);
COLLECTOR_API float Collector_allocatedStep(Collector *self);
#ifdef COLLECTOR_USE_NONINCREMENTAL_MARK_SWEEP
COLLECTOR_API void Collector_setAllocsPerSweep_(Collector *self, int n);
COLLECTOR_API float Collector_allocsPerSweep(Collector *self);
#endif
// debug
COLLECTOR_API void Collector_setDebug_(Collector *self, int b);
COLLECTOR_API void Collector_setSafeModeOn_(Collector *self, int b);
// retaining
COLLECTOR_API void *Collector_retain_(Collector *self, void *v);
COLLECTOR_API void Collector_stopRetaining_(Collector *self, void *v);
COLLECTOR_API void Collector_removeAllRetainedValues(Collector *self);
// adding
COLLECTOR_API CollectorMarker *Collector_newMarker(Collector *self);
COLLECTOR_API void Collector_addValue_(Collector *self, void *v);
// collection
COLLECTOR_API void Collector_initPhase(Collector *self);
COLLECTOR_API size_t Collector_sweep(Collector *self);
COLLECTOR_API size_t Collector_sweepPhase(Collector *self);
COLLECTOR_API void Collector_markPhase(Collector *self);
COLLECTOR_API size_t Collector_collect(Collector *self);
COLLECTOR_API size_t Collector_freeAllValues(Collector *self);
// changing colors
#define Collector_shouldMark_(self, v) Collector_makeGrayIfWhite_(self, v)
// void Collector_makeGrayIfWhite_(Collector *self, void *v);
// void Collector_makeWhite_(Collector *self, CollectorMarker *v);
// void Collector_makeGray_(Collector *self, CollectorMarker *v);
// void Collector_makeBlack_(Collector *self, CollectorMarker *v);
// int Collector_markerIsWhite_(Collector *self, CollectorMarker *m);
// int Collector_markerIsGray_(Collector *self, CollectorMarker *m);
// int Collector_markerIsBlack_(Collector *self, CollectorMarker *m);
COLLECTOR_API char *Collector_colorNameFor_(Collector *self, void *v);
// void *Collector_value_addingRefTo_(Collector *self, void *v, void *ref);
// pause/resume stack
COLLECTOR_API void Collector_pushPause(Collector *self);
COLLECTOR_API void Collector_popPause(Collector *self);
COLLECTOR_API int Collector_isPaused(Collector *self);
COLLECTOR_API double Collector_timeUsed(Collector *self);
#include "Collector_inline.h"
#ifdef __cplusplus
}
#endif
#endif
| 1,886 |
1,682 | /*
Copyright (c) 2012 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* $Id: $
*/
package com.linkedin.restli.examples.greetings.server;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.examples.greetings.api.ComplexArray;
import com.linkedin.restli.examples.greetings.api.Greeting;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.ActionParam;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* @author <NAME>
* @version $Revision: $
*/
@RestLiCollection(name = "complexArray", namespace = "com.linkedin.restli.examples.greetings.client")
public class ComplexArrayResource extends ComplexKeyResourceTemplate<ComplexArray, ComplexArray, Greeting>
{
private static Greeting DEFAULT_GREETING = new Greeting();
@Override
public Greeting get(ComplexResourceKey<ComplexArray, ComplexArray> key)
{
key.getKey().getArray();
key.getKey().getNext().getArray();
key.getParams().getArray();
key.getParams().getNext().getArray();
return DEFAULT_GREETING;
}
@Override
public Map<ComplexResourceKey<ComplexArray, ComplexArray>, Greeting> batchGet(Set<ComplexResourceKey<ComplexArray, ComplexArray>> keys)
{
Map<ComplexResourceKey<ComplexArray, ComplexArray>, Greeting> map = new HashMap<>();
for(ComplexResourceKey<ComplexArray, ComplexArray> key: keys)
{
map.put(key, get(key));
}
return map;
}
@Finder("finder")
public List<Greeting> finder(@QueryParam("array") ComplexArray array)
{
array.getArray();
array.getNext().getArray();
List<Greeting> list = new ArrayList<>();
list.add(DEFAULT_GREETING);
return list;
}
@Action(name = "action")
public int action(@ActionParam("array") ComplexArray array)
{
array.getArray();
array.getNext().getArray();
return 1;
}
}
| 895 |
831 | /*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.adtui.model;
import static com.google.common.truth.Truth.assertThat;
import org.junit.Test;
public class RangedSeriesTest {
@Test
public void testGetSeriesUsingCache() {
Range queryRange = new Range(0, 100);
DefaultDataSeries<Long> testSeries = new DefaultDataSeries<>();
for (int i = 0; i < 50; i++) {
testSeries.add(i, (long)i);
}
RangedSeries rangedSeries = new RangedSeries<>(queryRange, testSeries);
// Check that a first time a query is made, we always return the data from the underlying data series.
assertThat(rangedSeries.getSeries()).hasSize(50);
for (int i = 50; i < 100; i++) {
testSeries.add(i, (long)i);
}
// Adding data along without updating the query range should keep utilizing the cached data.
assertThat(rangedSeries.getSeries()).hasSize(50);
// Updating the query range would fetch from the underlying data series again.
queryRange.setMax(200);
assertThat(rangedSeries.getSeries()).hasSize(100);
}
@Test
public void testGetSeriesIgnoreCache() {
Range queryRange = new Range(0, 100);
DefaultDataSeries<Long> testSeries = new DefaultDataSeries<>();
for (int i = 0; i < 50; i++) {
testSeries.add(i, (long)i);
}
RangedSeries rangedSeries = new RangedSeries<>(queryRange, testSeries);
// Check that a first time a query is made, we always return the data from the underlying data series.
assertThat(rangedSeries.getSeries()).hasSize(50);
// Always pull from the underlying data series and ignore the cache if the range is set to Long.MAX_VALUE.
queryRange.setMax(Long.MAX_VALUE);
for (int i = 50; i < 75; i++) {
testSeries.add(i, (long)i);
}
assertThat(rangedSeries.getSeries()).hasSize(75);
for (int i = 75; i < 100; i++) {
testSeries.add(i, (long)i);
}
assertThat(rangedSeries.getSeries()).hasSize(100);
}
} | 827 |
1,198 | /*
Copyright 2017-2019 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS-IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "lullaby/viewer/src/builders/jsonnet.h"
#include "lullaby/tools/common/jsonnet_utils.h"
#include "lullaby/viewer/src/file_manager.h"
namespace lull {
namespace tool {
extern FileManager* g_file_manager;
static bool DefaultJsonnetImporter(const char* filename, std::string* data) {
return g_file_manager->LoadFile(filename, data);
}
std::string ConvertJsonnetToJson(const std::string& jsonnet,
const std::string& filename) {
return ConvertJsonnetToJson(jsonnet, DefaultJsonnetImporter, filename);
}
} // namespace tool
} // namespace lull
| 375 |
2,977 | class SocketIOError(Exception):
pass
class ConnectionError(SocketIOError):
pass
class ConnectionRefusedError(ConnectionError):
"""Connection refused exception.
This exception can be raised from a connect handler when the connection
is not accepted. The positional arguments provided with the exception are
returned with the error packet to the client.
"""
def __init__(self, *args):
if len(args) == 0:
self.error_args = {'message': 'Connection rejected by server'}
elif len(args) == 1:
self.error_args = {'message': str(args[0])}
else:
self.error_args = {'message': str(args[0])}
if len(args) == 2:
self.error_args['data'] = args[1]
else:
self.error_args['data'] = args[1:]
class TimeoutError(SocketIOError):
pass
class BadNamespaceError(SocketIOError):
pass
| 367 |
1,690 | <gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
from mock import Mock, patch
from sagemaker import image_uris
from sagemaker.amazon.kmeans import KMeans, KMeansPredictor
from sagemaker.amazon.amazon_estimator import RecordSet
ROLE = "myrole"
INSTANCE_COUNT = 1
INSTANCE_TYPE = "ml.c4.xlarge"
K = 2
COMMON_TRAIN_ARGS = {
"role": ROLE,
"instance_count": INSTANCE_COUNT,
"instance_type": INSTANCE_TYPE,
}
ALL_REQ_ARGS = dict({"k": K}, **COMMON_TRAIN_ARGS)
REGION = "us-west-2"
BUCKET_NAME = "Some-Bucket"
DESCRIBE_TRAINING_JOB_RESULT = {"ModelArtifacts": {"S3ModelArtifacts": "s3://bucket/model.tar.gz"}}
ENDPOINT_DESC = {"EndpointConfigName": "test-endpoint"}
ENDPOINT_CONFIG_DESC = {"ProductionVariants": [{"ModelName": "model-1"}, {"ModelName": "model-2"}]}
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name="boto_session", region_name=REGION)
sms = Mock(
name="sagemaker_session",
boto_session=boto_mock,
region_name=REGION,
config=None,
local_mode=False,
s3_client=None,
s3_resource=None,
)
sms.boto_region_name = REGION
sms.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
sms.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=DESCRIBE_TRAINING_JOB_RESULT
)
sms.sagemaker_client.describe_endpoint = Mock(return_value=ENDPOINT_DESC)
sms.sagemaker_client.describe_endpoint_config = Mock(return_value=ENDPOINT_CONFIG_DESC)
return sms
def test_init_required_positional(sagemaker_session):
kmeans = KMeans(ROLE, INSTANCE_COUNT, INSTANCE_TYPE, K, sagemaker_session=sagemaker_session)
assert kmeans.role == ROLE
assert kmeans.instance_count == INSTANCE_COUNT
assert kmeans.instance_type == INSTANCE_TYPE
assert kmeans.k == K
def test_init_required_named(sagemaker_session):
kmeans = KMeans(sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
assert kmeans.role == COMMON_TRAIN_ARGS["role"]
assert kmeans.instance_count == INSTANCE_COUNT
assert kmeans.instance_type == COMMON_TRAIN_ARGS["instance_type"]
assert kmeans.k == ALL_REQ_ARGS["k"]
def test_all_hyperparameters(sagemaker_session):
kmeans = KMeans(
sagemaker_session=sagemaker_session,
init_method="random",
max_iterations=3,
tol=0.5,
num_trials=5,
local_init_method="kmeans++",
half_life_time_size=0,
epochs=10,
center_factor=2,
eval_metrics=["msd", "ssd"],
**ALL_REQ_ARGS,
)
assert kmeans.hyperparameters() == dict(
k=str(ALL_REQ_ARGS["k"]),
init_method="random",
local_lloyd_max_iter="3",
local_lloyd_tol="0.5",
local_lloyd_num_trials="5",
local_lloyd_init_method="kmeans++",
half_life_time_size="0",
epochs="10",
extra_center_factor="2",
eval_metrics='["msd", "ssd"]',
force_dense="True",
)
def test_image(sagemaker_session):
kmeans = KMeans(sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
assert image_uris.retrieve("kmeans", REGION) == kmeans.training_image_uri()
@pytest.mark.parametrize("required_hyper_parameters, value", [("k", "string")])
def test_required_hyper_parameters_type(sagemaker_session, required_hyper_parameters, value):
with pytest.raises(ValueError):
test_params = ALL_REQ_ARGS.copy()
test_params[required_hyper_parameters] = value
KMeans(sagemaker_session=sagemaker_session, **test_params)
@pytest.mark.parametrize("required_hyper_parameters, value", [("k", 0)])
def test_required_hyper_parameters_value(sagemaker_session, required_hyper_parameters, value):
with pytest.raises(ValueError):
test_params = ALL_REQ_ARGS.copy()
test_params[required_hyper_parameters] = value
KMeans(sagemaker_session=sagemaker_session, **test_params)
@pytest.mark.parametrize("iterable_hyper_parameters, value", [("eval_metrics", 0)])
def test_iterable_hyper_parameters_type(sagemaker_session, iterable_hyper_parameters, value):
with pytest.raises(TypeError):
test_params = ALL_REQ_ARGS.copy()
test_params.update({iterable_hyper_parameters: value})
KMeans(sagemaker_session=sagemaker_session, **test_params)
@pytest.mark.parametrize(
"optional_hyper_parameters, value",
[
("init_method", 0),
("max_iterations", "string"),
("tol", "string"),
("num_trials", "string"),
("local_init_method", 0),
("half_life_time_size", "string"),
("epochs", "string"),
("center_factor", "string"),
],
)
def test_optional_hyper_parameters_type(sagemaker_session, optional_hyper_parameters, value):
with pytest.raises(ValueError):
test_params = ALL_REQ_ARGS.copy()
test_params.update({optional_hyper_parameters: value})
KMeans(sagemaker_session=sagemaker_session, **test_params)
@pytest.mark.parametrize(
"optional_hyper_parameters, value",
[
("init_method", "string"),
("max_iterations", 0),
("tol", -0.1),
("tol", 1.1),
("num_trials", 0),
("local_init_method", "string"),
("half_life_time_size", -1),
("epochs", 0),
("center_factor", 0),
],
)
def test_optional_hyper_parameters_value(sagemaker_session, optional_hyper_parameters, value):
with pytest.raises(ValueError):
test_params = ALL_REQ_ARGS.copy()
test_params.update({optional_hyper_parameters: value})
KMeans(sagemaker_session=sagemaker_session, **test_params)
PREFIX = "prefix"
FEATURE_DIM = 10
MINI_BATCH_SIZE = 200
@patch("sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.fit")
def test_call_fit(base_fit, sagemaker_session):
kmeans = KMeans(base_job_name="kmeans", sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
kmeans.fit(data, MINI_BATCH_SIZE)
base_fit.assert_called_once()
assert len(base_fit.call_args[0]) == 2
assert base_fit.call_args[0][0] == data
assert base_fit.call_args[0][1] == MINI_BATCH_SIZE
def test_prepare_for_training_no_mini_batch_size(sagemaker_session):
kmeans = KMeans(base_job_name="kmeans", sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
kmeans._prepare_for_training(data)
assert kmeans.mini_batch_size == 5000
def test_prepare_for_training_wrong_type_mini_batch_size(sagemaker_session):
kmeans = KMeans(base_job_name="kmeans", sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
with pytest.raises((TypeError, ValueError)):
kmeans._prepare_for_training(data, "some")
def test_prepare_for_training_wrong_value_mini_batch_size(sagemaker_session):
kmeans = KMeans(base_job_name="kmeans", sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
with pytest.raises(ValueError):
kmeans._prepare_for_training(data, 0)
def test_model_image(sagemaker_session):
kmeans = KMeans(sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
kmeans.fit(data, MINI_BATCH_SIZE)
model = kmeans.create_model()
assert image_uris.retrieve("kmeans", REGION) == model.image_uri
def test_predictor_type(sagemaker_session):
kmeans = KMeans(sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
kmeans.fit(data, MINI_BATCH_SIZE)
model = kmeans.create_model()
predictor = model.deploy(1, INSTANCE_TYPE)
assert isinstance(predictor, KMeansPredictor)
def test_predictor_custom_serialization(sagemaker_session):
kmeans = KMeans(sagemaker_session=sagemaker_session, **ALL_REQ_ARGS)
data = RecordSet(
"s3://{}/{}".format(BUCKET_NAME, PREFIX),
num_records=1,
feature_dim=FEATURE_DIM,
channel="train",
)
kmeans.fit(data, MINI_BATCH_SIZE)
model = kmeans.create_model()
custom_serializer = Mock()
custom_deserializer = Mock()
predictor = model.deploy(
1,
INSTANCE_TYPE,
serializer=custom_serializer,
deserializer=custom_deserializer,
)
assert isinstance(predictor, KMeansPredictor)
assert predictor.serializer is custom_serializer
assert predictor.deserializer is custom_deserializer
| 4,227 |
14,668 | <gh_stars>1000+
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_LACROS_CERT_DB_INITIALIZER_FACTORY_H_
#define CHROME_BROWSER_LACROS_CERT_DB_INITIALIZER_FACTORY_H_
#include "base/no_destructor.h"
#include "components/keyed_service/content/browser_context_keyed_service_factory.h"
class CertDbInitializer;
// Factory that manages creation of CertDbInitializer. The initialization is
// handled differently depending on the environment:
// * On real ChromeOS devices with TPMs:
// ** if the user is affiliated: CertDbInitializer is automatically
// created right after its profile is created. It receives a path to software
// cert database and slot IDs for Chaps from Ash and uses them.
// ** if the user is not affiliated: TODO(b/197082753): not officially supported
// yet, handled as if there's no TPM.
// * In emulated environments (e.g. when running ChromeOS on Linux) and in the
// future on ChromeOS without TPMs: Same as real ChromeOS, but Ash only sends
// the software database path.
// * In browsertests: CertDbInitializer is not created by default because it
// requires crosapi mojo interface. It is configured through the
// `SetCreateWithBrowserContextForTesting()` method. This can be overridden by
// individual tests or they can create their own instances of the service.
// * In unittests: CertDbInitializer is not created by default (see
// `ServiceIsNULLWhileTesting()`).
class CertDbInitializerFactory : public BrowserContextKeyedServiceFactory {
public:
static CertDbInitializerFactory* GetInstance();
static CertDbInitializer* GetForBrowserContext(
content::BrowserContext* context);
// Configures whether CertDbInitializer should be automatically created on
// profile creation in browser tests.
// Currently it is configured that in browser tests the service is not created
// by default. Individual tests can override it when needed.
void SetCreateWithBrowserContextForTesting(bool should_create);
private:
friend class base::NoDestructor<CertDbInitializerFactory>;
CertDbInitializerFactory();
~CertDbInitializerFactory() override = default;
// BrowserStateKeyedServiceFactory
bool ServiceIsCreatedWithBrowserContext() const override;
KeyedService* BuildServiceInstanceFor(
content::BrowserContext* context) const override;
bool ServiceIsNULLWhileTesting() const override;
bool should_create_with_browser_context_ = true;
};
#endif // CHROME_BROWSER_LACROS_CERT_DB_INITIALIZER_FACTORY_H_
| 720 |
402 | {
"id": "dependency-parsing",
"name": "Dependency Parsing",
"description": "Dependency parsing is the task of analyzing the grammatical structure of a sentence and establishing the relationships between \"head\" words and the words which modify those heads.",
"expected_inputs": "The task expects an input sentence.",
"expected_outputs": null,
"scope_and_limitations": null,
"examples": [
{"sentence" : "James ate some cheese whilst thinking about the play."},
{"sentence" : "She decided not to take the house she'd viewed yesterday."},
{"sentence" : "The proportion of PepsiCo’s revenue coming from healthier food and beverages has risen from 38% in 2006 to 50%."},
{"sentence" : "CRISPR-Cas9 is a versatile genome editing technology for studying the functions of genetic elements."}
],
}
| 308 |
383 | #include "stable.h"
#include "d3dutil.h" | 19 |
1,682 | <filename>d2/src/main/java/com/linkedin/d2/balancer/clients/RequestTimeoutClient.java
/*
Copyright (c) 2018 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.linkedin.d2.balancer.clients;
import com.linkedin.common.callback.Callback;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.MapUtil;
import com.linkedin.d2.balancer.D2Client;
import com.linkedin.d2.balancer.D2ClientDelegator;
import com.linkedin.d2.balancer.LoadBalancer;
import com.linkedin.d2.balancer.ServiceUnavailableException;
import com.linkedin.d2.balancer.properties.PropertyKeys;
import com.linkedin.d2.balancer.util.LoadBalancerUtil;
import com.linkedin.r2.filter.R2Constants;
import com.linkedin.r2.message.Request;
import com.linkedin.r2.message.RequestContext;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestResponse;
import com.linkedin.r2.message.stream.StreamRequest;
import com.linkedin.r2.message.stream.StreamResponse;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.r2.transport.http.client.TimeoutCallback;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for:
* 1) give the guarantee to the caller that the call will always return within the
* time HE/SHE specified, even if internals will behave differently
* 2) adjusting the Internal REQUEST_TIMEOUT coming from the caller if it wouldn't be
* an acceptable value for the Internal layers. (see implementation details description below)
* 3) in the case caller's point of view on the REQUEST_TIMEOUT, and the Internal's one are different,
* set CLIENT_REQUEST_TIMEOUT_VIEW to reflect the caller's one in the internal stack
*
* Parameters: setting the following parameters in the RequestContext, will trigger behaviors in the following class:
* 1) {@code R2Constants.REQUEST_TIMEOUT} to set an higher/lower timeout than default
* 2) {@code R2Constants.REQUEST_TIMEOUT_IGNORE_IF_HIGHER_THAN_DEFAULT} to enforce never passing to the lower
* layers the caller's REQUEST_TIMEOUT value if it is higher than default one.
* E.g. in the case some caller have a deadline within a function has to return, it never wants
* the rest calls to take longer than the default max value the downstream service has set
*
* @author <NAME> (<EMAIL>)
*/
public class RequestTimeoutClient extends D2ClientDelegator
{
private static final Logger LOG = LoggerFactory.getLogger(RequestTimeoutClient.class);
private final D2Client _d2Client;
private final LoadBalancer _balancer;
private final ScheduledExecutorService _scheduler;
public RequestTimeoutClient(D2Client d2Client, LoadBalancer balancer, ScheduledExecutorService scheduler)
{
super(d2Client);
_d2Client = d2Client;
_balancer = balancer;
_scheduler = scheduler;
}
@Override
public Future<RestResponse> restRequest(RestRequest request)
{
return restRequest(request, new RequestContext());
}
@Override
public Future<RestResponse> restRequest(RestRequest request, RequestContext requestContext)
{
final FutureCallback<RestResponse> future = new FutureCallback<>();
restRequest(request, requestContext, future);
return future;
}
@Override
public void restRequest(RestRequest request, Callback<RestResponse> callback)
{
restRequest(request, new RequestContext(), callback);
}
@Override
public void restRequest(final RestRequest request, final RequestContext requestContext,
final Callback<RestResponse> callback)
{
final Callback<RestResponse> transportCallback =
decorateCallbackWithRequestTimeout(callback, request, requestContext);
_d2Client.restRequest(request, requestContext, transportCallback);
}
@Override
public void streamRequest(StreamRequest request, Callback<StreamResponse> callback)
{
streamRequest(request, new RequestContext(), callback);
}
@Override
public void streamRequest(StreamRequest request, RequestContext requestContext, Callback<StreamResponse> callback)
{
final Callback<StreamResponse> transportCallback =
decorateCallbackWithRequestTimeout(callback, request, requestContext);
_d2Client.streamRequest(request, requestContext, transportCallback);
}
/**
* Enforces the user timeout to the layer below if necessary.
*
* The layer below must have the guarantee that the request timeout is always greater or equal than the one set by
* D2 to not impact the D2 load balancing policies. This avoids that the degrader/loadbalancer are never triggered.
*
* If the value is higher, instead, it will have an impact on the degrader/loadbalancer. If it skews too much the
* latency and triggers too many times degrader and loadbalancer, those values should be adjusted. In this way
* we give the guarantee that in the worst case the policies are triggered too much, instead of the opposite (never
* triggering) which could cause a melt down.
*
* The callback has the guarantee to be called at most once, no matter if the call succeeds or times out
*
* Note: CLIENT_REQUEST_TIMEOUT_VIEW or REQUEST_TIMEOUT, one of the two or both should always be set,
* to guarantee that any part in code can know the client expectation on the request timeout.
* CLIENT_REQUEST_TIMEOUT_VIEW always reflects the caller point of view and takes precedence
* over REQUEST_TIMEOUT's value if the goal is determining the client expectation
*/
private <RES> Callback<RES> decorateCallbackWithRequestTimeout(Callback<RES> callback, Request request,
RequestContext requestContext)
{
// First, find default timeout value. We get the service properties for this uri
String serviceName = LoadBalancerUtil.getServiceNameFromUri(request.getURI());
Map<String, Object> transportClientProperties;
try
{
transportClientProperties =
_balancer.getLoadBalancedServiceProperties(serviceName).getTransportClientProperties();
} catch (ServiceUnavailableException e)
{
return callback;
}
int defaultRequestTimeout = MapUtil.getWithDefault(transportClientProperties, PropertyKeys.HTTP_REQUEST_TIMEOUT,
HttpClientFactory.DEFAULT_REQUEST_TIMEOUT, Integer.class);
// Start handling per request timeout
Number perRequestTimeout = ((Number) requestContext.getLocalAttr(R2Constants.REQUEST_TIMEOUT));
if (perRequestTimeout == null)
{
requestContext.putLocalAttr(R2Constants.CLIENT_REQUEST_TIMEOUT_VIEW, defaultRequestTimeout);
return callback;
}
if (perRequestTimeout.longValue() >= defaultRequestTimeout)
{
// if higher value is not allowed, let's just remove the REQUEST_TIMEOUT
Boolean requestTimeoutIgnoreIfHigher = ((Boolean) requestContext.getLocalAttr(R2Constants.REQUEST_TIMEOUT_IGNORE_IF_HIGHER_THAN_DEFAULT));
if (requestTimeoutIgnoreIfHigher != null && requestTimeoutIgnoreIfHigher)
{
// client has no intention to adjust default timeout in R2 layer
requestContext.putLocalAttr(R2Constants.CLIENT_REQUEST_TIMEOUT_VIEW, defaultRequestTimeout);
requestContext.removeLocalAttr(R2Constants.REQUEST_TIMEOUT);
}
// if REQUEST_TIMEOUT_IGNORE_IF_HIGHER_THAN_DEFAULT is not true, just return. The R2 client further down will pick up the longer timeout.
return callback;
}
// if the request timeout is lower than the one set in d2, we will remove the timeout value to prevent R2 client from picking it up
requestContext.removeLocalAttr(R2Constants.REQUEST_TIMEOUT);
// we put the client experienced timeout in requestContext so client further down will always be aware of the client expectation
requestContext.putLocalAttr(R2Constants.CLIENT_REQUEST_TIMEOUT_VIEW, perRequestTimeout);
// we will create a timeout callback which will simulate a shorter timeout behavior
TimeoutCallback<RES> timeoutCallback =
new TimeoutCallback<>(_scheduler, perRequestTimeout.longValue(), TimeUnit.MILLISECONDS, callback,
"per request timeout");
return timeoutCallback;
}
} | 2,628 |
369 | // Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
//
// Created by mati on 08.05.19.
//
#include <stddef.h>
#include <string.h>
#include <stdint.h>
#include <time.h>
#include "memory/usermem.h"
void free(void *pv)
{
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_free((uint32_t)pv,caller);
#endif
return userfree(pv);
}
void *malloc(size_t xWantedSize)
{
void * ptr = usermalloc(xWantedSize);
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_malloc((uint32_t)ptr,(uint32_t)caller,xWantedSize);
#endif
return ptr;
}
void* _malloc_r (struct _reent *r, size_t sz)
{
void * ptr = usermalloc(sz);
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_malloc((uint32_t)ptr,(uint32_t)caller,sz);
#endif
return ptr;
}
void* calloc (size_t num, size_t size)
{
size_t total = num * size;
void * p = usermalloc(total);
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_malloc((uint32_t)p,(uint32_t)caller,total);
#endif
if (!p) return NULL;
return memset(p, 0, total);
}
void *realloc(void *aptr, size_t nbytes)
{
return userrealloc(aptr, nbytes);
}
void* _calloc_r (struct _reent *r, size_t a, size_t b)
{
size_t total = a * b;
void * p = usermalloc(total);
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_malloc((uint32_t)p,(uint32_t)caller,total);
#endif
if (!p) return NULL;
return memset(p, 0, total);
}
void _free_r (struct _reent *r, void* x)
{
#if PROJECT_CONFIG_MEM_LEAKS_CHECKS == 1
uint32_t caller = (uint32_t)__builtin_return_address (0);
memleaks_log_free((uint32_t)x,caller);
#endif
return userfree(x);
}
void* _realloc_r (struct _reent *r, void* x, size_t sz)
{
return realloc(x, sz);
}
void _putchar(char character)
{
// Use of printf is banned
}
| 986 |
331 | #include <catch.hpp>
#include <algorithm>
#include <vector>
#include <cliopts.hpp>
#include "test_utils.hpp"
TEST_CASE("1: Parse command line arguments", "cli-parse") {
const int argc = 20;
const char* argv[] = {
"prog-name",
"-f", "/path/to/a/file",
"--file", "/path/to/another/file",
"-o", "/path/to/output",
"-I", "/path/to/include",
"--include", "/file/to/include",
"-D", "SHORT_DEF",
"--define", "LONG_DEF",
"-U", "SHORT_UDEF",
"--undefine", "LONG_UDEF",
"--verbose"
};
auto result = fuzzypp::cliopts::CliOptions::parse_command_line(argc, const_cast<char**>(argv));
REQUIRE(result.has_value());
std::vector<std::string> expected_files {
fuzzypp::tests::to_native_path("/path/to/a/file"),
fuzzypp::tests::to_native_path("/path/to/another/file")
};
REQUIRE(std::equal(result->files.cbegin(), result->files.cend(), expected_files.cbegin()));
REQUIRE(result->output_directory == fuzzypp::tests::to_native_path("/path/to/output"));
std::vector<std::string> expected_incl_path { fuzzypp::tests::to_native_path("/path/to/include") };
REQUIRE(std::equal(result->include_paths.cbegin(), result->include_paths.cend(), expected_incl_path.cbegin()));
std::vector<std::string> expected_incl_file { fuzzypp::tests::to_native_path("/file/to/include") };
REQUIRE(std::equal(result->include_files.cbegin(), result->include_files.cend(), expected_incl_file.cbegin()));
std::vector<std::string> expected_defs { "SHORT_DEF", "LONG_DEF" };
REQUIRE(std::equal(result->defines.cbegin(), result->defines.cend(), expected_defs.cbegin()));
std::vector<std::string> expected_udefs { "SHORT_UDEF", "LONG_UDEF" };
REQUIRE(std::equal(result->undefines.cbegin(), result->undefines.cend(), expected_udefs.cbegin()));
REQUIRE(result->verbose);
}
TEST_CASE("2: Fail to parse invalid arguments", "cli-parse") {
const int argc = 2;
const char* argv[] = {
"prog-name",
"-f"
};
auto result = fuzzypp::cliopts::CliOptions::parse_command_line(argc, const_cast<char**>(argv));
REQUIRE_FALSE(result.has_value());
}
TEST_CASE("3: Ignore unknown command line arguments", "cli-parse") {
const int argc = 5;
const char* argv[] = {
"prog-name",
"-f", "/path/to/a/file",
"--unknown",
"-z"
};
auto result = fuzzypp::cliopts::CliOptions::parse_command_line(argc, const_cast<char**>(argv));
REQUIRE(result.has_value());
std::vector<std::string> expected_files { fuzzypp::tests::to_native_path("/path/to/a/file") };
REQUIRE(std::equal(result->files.cbegin(), result->files.cend(), expected_files.cbegin()));
}
TEST_CASE("1: Pass validation", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/1/source.cpp");
auto header_file = fuzzypp::tests::create_temp_file("cli-validate/1/source.hpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/1/other/header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/1/output" };
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { header_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE_FALSE(result.has_value());
}
TEST_CASE("2: Fail if an input file contains '..'", "cli-validate") {
auto header_file = fuzzypp::tests::create_temp_file("cli-validate/2/source.hpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/2/other/header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/2/output" };
auto bad_input_file = std::filesystem::temp_directory_path() /= "cli-validate/2/..";
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { bad_input_file },
std::vector<std::filesystem::path> { header_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "File paths must not contain '..'.");
}
TEST_CASE("3: Fail if no input files are specified", "cli-validate") {
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { },
std::vector<std::filesystem::path> { },
std::vector<std::filesystem::path> { },
std::vector<std::string> { },
std::vector<std::string> { },
std::filesystem::path {},
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "At least one input file must be specified.");
}
TEST_CASE("4: Fail if no output files are specified", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/4/source.cpp");
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { },
std::vector<std::filesystem::path> { },
std::vector<std::string> { },
std::vector<std::string> { },
std::filesystem::path {},
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "The output directory must be specified.");
}
TEST_CASE("5: Fail if no output files are specified", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/5/source.cpp");
auto lying_output_directory = fuzzypp::tests::create_temp_file("cli-validate/5/outdir");
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { },
std::vector<std::filesystem::path> { },
std::vector<std::string> { },
std::vector<std::string> { },
lying_output_directory,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "The specified output directory must be a directory.");
}
TEST_CASE("6: Fail if an input file does not exist", "cli-validate") {
auto header_file = fuzzypp::tests::create_temp_file("cli-validate/6/source.hpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/6/other/header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/6/output" };
auto fake_input_file = std::filesystem::temp_directory_path() /= "cli-validate/6/source.cpp";
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { fake_input_file },
std::vector<std::filesystem::path> { header_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "File [" + fake_input_file.string() + "] does not exist.");
}
TEST_CASE("7: Fail if an included file does not exist", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/7/source.cpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/7/other/header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/7/output" };
auto fake_include_file = std::filesystem::temp_directory_path() /= "cli-validate/7/source.hpp";
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { fake_include_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "Include file [" + fake_include_file.string() + "] does not exist.");
}
TEST_CASE("8: Fail if an included file contains '..'", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/8/source.cpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/8/other/header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/8/output" };
auto bad_include_file = std::filesystem::temp_directory_path() /= "cli-validate/8/..";
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { bad_include_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "Include file paths must not contain '..'.");
}
TEST_CASE("9: Fail if an included path does not exist", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/9/source.cpp");
auto header_file = fuzzypp::tests::create_temp_file("cli-validate/9/source.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/9/output" };
auto fake_include_path = std::filesystem::temp_directory_path() /= "cli-validate/9/other";
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { header_file },
std::vector<std::filesystem::path> { fake_include_path },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "Include path [" + fake_include_path.string() + "] does not exist.");
}
TEST_CASE("10: Fail if an included path contains '..'", "cli-validate") {
auto source_file = fuzzypp::tests::create_temp_file("cli-validate/10/source.cpp");
auto header_file = fuzzypp::tests::create_temp_file("cli-validate/10/source.hpp");
auto other_header = fuzzypp::tests::create_temp_file("cli-validate/10/other/../header.hpp");
auto output_path = std::filesystem::temp_directory_path() /= std::filesystem::path { "cli-validate/10/output" };
auto opts = fuzzypp::cliopts::CliOptions {
std::vector<std::filesystem::path> { source_file },
std::vector<std::filesystem::path> { header_file },
std::vector<std::filesystem::path> { other_header.parent_path() },
std::vector<std::string> { "DEF" },
std::vector<std::string> { "UNDEF" },
output_path,
false
};
auto result = opts.validate_options();
REQUIRE(result.has_value());
REQUIRE(*result == "Include paths must not contain '..'.");
}
| 4,607 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-8cq5-h2jx-4m9x",
"modified": "2022-05-01T02:27:42Z",
"published": "2022-05-01T02:27:42Z",
"aliases": [
"CVE-2005-4499"
],
"details": "The Downloadable RADIUS ACLs feature in Cisco PIX and VPN 3000 concentrators, when creating an ACL on the Cisco Secure Access Control Server (CS ACS), generates a random internal name for an ACL that is also used as a hidden user name and password, which allows remote attackers to gain privileges by sniffing the username from the cleartext portion of a RADIUS session, then using the password to log in to another device that uses CS ACS.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2005-4499"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/18141"
},
{
"type": "WEB",
"url": "http://www.cisco.com/en/US/products/sw/secursw/ps2086/products_field_notice09186a00805bf1c4.shtml"
},
{
"type": "WEB",
"url": "http://www.osvdb.org/22193"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/420020/100/0/threaded"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/420103/100/0/threaded"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/16025"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 669 |
585 | <reponame>KevinKecc/caffe2<filename>caffe2/mobile/contrib/opengl/core/GLImageAllocator.cc
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GLImageAllocator.h"
#include "arm_neon_support.h"
template <class T>
GLImageVector<T>* GLImageAllocator<T>::newImage(
int num_images, int width, int height, int channels, int tile_x, int tile_y, bool is_output) {
GLImageVector<T>* images =
new GLImageVector<T>(num_images, width, height, channels, tile_x, tile_y);
for (int i = 0; i < num_images; i++) {
images->push_back(
new GLImage<T>(width, height, channels, tile_x, tile_y, [&](int slice) -> const GLTexture* {
bool usePadding = is_output;
return new GLPlainTexture(type, nullptr, width * tile_x, height * tile_y, usePadding);
}));
}
return images;
}
template <class T>
GLImageVector<T>* GLImageAllocator<T>::newImage(
int num_images,
int width,
int height,
int channels,
int tile_x,
int tile_y,
std::function<const GLTexture*(const int width, const int height)> textureAllocator) {
GLImageVector<T>* images =
new GLImageVector<T>(num_images, width, height, channels, tile_x, tile_y);
for (int i = 0; i < num_images; i++) {
images->push_back(
new GLImage<T>(width, height, channels, tile_x, tile_y, [&](int slice) -> const GLTexture* {
return textureAllocator(width, height);
}));
}
return images;
}
template <class T>
GLImageVector<T>* GLImageAllocator<T>::ShareTexture(const GLuint textureID,
int num_images,
int width,
int height,
int channels,
int tile_x,
int tile_y) {
GLImageVector<T>* images =
new GLImageVector<T>(num_images, width, height, channels, tile_x, tile_y);
for (int i = 0; i < num_images; i++) {
images->push_back(
new GLImage<T>(width, height, channels, tile_x, tile_y, [&](int slice) -> const GLTexture* {
return new GLPlainTexture(
GLImageAllocator<T>::type, textureID, width * tile_x, height * tile_y);
}));
}
return images;
}
template <>
const GLTexture::Type& GLImageAllocator<float16_t>::type = GLTexture::FP16;
template <>
const GLTexture::Type& GLImageAllocator<uint8_t>::type = GLTexture::UI8;
template class GLImageAllocator<float16_t>;
template class GLImageAllocator<uint8_t>;
| 1,373 |
14,668 | <filename>chrome/browser/android/omnibox/autocomplete_controller_android.cc
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/android/omnibox/autocomplete_controller_android.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/bind.h"
#include "base/check.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/memory/singleton.h"
#include "base/metrics/field_trial_params.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/string_util.h"
#include "base/task/post_task.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "chrome/browser/android/autocomplete/tab_matcher_android.h"
#include "chrome/browser/android/tab_android.h"
#include "chrome/browser/autocomplete/chrome_autocomplete_provider_client.h"
#include "chrome/browser/autocomplete/chrome_autocomplete_scheme_classifier.h"
#include "chrome/browser/autocomplete/shortcuts_backend_factory.h"
#include "chrome/browser/predictors/autocomplete_action_predictor.h"
#include "chrome/browser/predictors/autocomplete_action_predictor_factory.h"
#include "chrome/browser/profiles/incognito_helpers.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/profiles/profile_android.h"
#include "chrome/browser/profiles/profile_manager.h"
#include "chrome/browser/search_engines/template_url_service_factory.h"
#include "chrome/browser/ui/android/omnibox/jni_headers/AutocompleteController_jni.h"
#include "chrome/common/webui_url_constants.h"
#include "components/browser_ui/util/android/url_constants.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
#include "components/omnibox/browser/autocomplete_classifier.h"
#include "components/omnibox/browser/autocomplete_input.h"
#include "components/omnibox/browser/autocomplete_match.h"
#include "components/omnibox/browser/autocomplete_match_type.h"
#include "components/omnibox/browser/autocomplete_provider.h"
#include "components/omnibox/browser/autocomplete_provider_client.h"
#include "components/omnibox/browser/autocomplete_result.h"
#include "components/omnibox/browser/base_search_provider.h"
#include "components/omnibox/browser/omnibox_controller_emitter.h"
#include "components/omnibox/browser/omnibox_event_global_tracker.h"
#include "components/omnibox/browser/omnibox_log.h"
#include "components/omnibox/browser/suggestion_answer.h"
#include "components/omnibox/browser/voice_suggest_provider.h"
#include "components/omnibox/common/omnibox_features.h"
#include "components/open_from_clipboard/clipboard_recent_content.h"
#include "components/search_engines/omnibox_focus_type.h"
#include "components/search_engines/template_url.h"
#include "components/search_engines/template_url_service.h"
#include "components/sessions/content/session_tab_helper.h"
#include "content/public/browser/browser_task_traits.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/render_process_host.h"
#include "content/public/browser/web_contents.h"
#include "content/public/common/url_constants.h"
#include "net/cookies/cookie_util.h"
#include "third_party/metrics_proto/omnibox_event.pb.h"
#include "ui/base/page_transition_types.h"
#include "ui/base/window_open_disposition.h"
#include "url/android/gurl_android.h"
#include "url/gurl.h"
using base::android::AttachCurrentThread;
using base::android::ConvertJavaStringToUTF16;
using base::android::ConvertJavaStringToUTF8;
using base::android::ConvertUTF16ToJavaString;
using base::android::ConvertUTF8ToJavaString;
using base::android::JavaParamRef;
using base::android::JavaRef;
using base::android::ScopedJavaLocalRef;
using metrics::OmniboxEventProto;
namespace {
// The delay between the Omnibox being opened and a spare renderer being
// started. Starting a spare renderer is a very expensive operation, so this
// value must always be great enough for the Omnibox to be fully rendered and
// otherwise not doing anything important but not so great that the user
// navigates before it occurs. Experimentation between 1s, 2s, 3s found that 1s
// was the most ideal.
static constexpr int OMNIBOX_SPARE_RENDERER_DELAY_MS = 1000;
void RecordClipboardMetrics(AutocompleteMatchType::Type match_type) {
if (match_type != AutocompleteMatchType::CLIPBOARD_URL &&
match_type != AutocompleteMatchType::CLIPBOARD_TEXT &&
match_type != AutocompleteMatchType::CLIPBOARD_IMAGE) {
return;
}
base::TimeDelta age =
ClipboardRecentContent::GetInstance()->GetClipboardContentAge();
UMA_HISTOGRAM_LONG_TIMES_100("MobileOmnibox.PressedClipboardSuggestionAge",
age);
if (match_type == AutocompleteMatchType::CLIPBOARD_URL) {
UMA_HISTOGRAM_LONG_TIMES_100(
"MobileOmnibox.PressedClipboardSuggestionAge.URL", age);
} else if (match_type == AutocompleteMatchType::CLIPBOARD_TEXT) {
UMA_HISTOGRAM_LONG_TIMES_100(
"MobileOmnibox.PressedClipboardSuggestionAge.TEXT", age);
} else if (match_type == AutocompleteMatchType::CLIPBOARD_IMAGE) {
UMA_HISTOGRAM_LONG_TIMES_100(
"MobileOmnibox.PressedClipboardSuggestionAge.IMAGE", age);
}
}
/**
* A prefetcher class responsible for triggering zero suggest prefetch.
* The prefetch occurs as a side-effect of calling OnOmniboxFocused() on
* the AutocompleteController object.
*/
class ZeroSuggestPrefetcher {
public:
explicit ZeroSuggestPrefetcher(Profile* profile);
private:
void SelfDestruct();
std::unique_ptr<AutocompleteController> controller_;
base::OneShotTimer expire_timer_;
};
ZeroSuggestPrefetcher::ZeroSuggestPrefetcher(Profile* profile)
: controller_(new AutocompleteController(
std::make_unique<ChromeAutocompleteProviderClient>(profile),
AutocompleteProvider::TYPE_ZERO_SUGGEST)) {
AutocompleteInput input(std::u16string(), metrics::OmniboxEventProto::NTP,
ChromeAutocompleteSchemeClassifier(profile));
input.set_current_url(GURL(chrome::kChromeUINewTabURL));
input.set_focus_type(OmniboxFocusType::ON_FOCUS);
controller_->Start(input);
// Delete ourselves after 10s. This is enough time to cache results or
// give up if the results haven't been received.
expire_timer_.Start(FROM_HERE, base::Milliseconds(10000), this,
&ZeroSuggestPrefetcher::SelfDestruct);
}
void ZeroSuggestPrefetcher::SelfDestruct() {
delete this;
}
} // namespace
AutocompleteControllerAndroid::AutocompleteControllerAndroid(
Profile* profile,
std::unique_ptr<ChromeAutocompleteProviderClient> client)
: profile_{profile},
java_controller_{Java_AutocompleteController_Constructor(
AttachCurrentThread(),
ProfileAndroid::FromProfile(profile)->GetJavaObject(),
reinterpret_cast<intptr_t>(this))},
provider_client_{client.get()},
autocomplete_controller_{std::make_unique<AutocompleteController>(
std::move(client),
AutocompleteClassifier::DefaultOmniboxProviders())} {
autocomplete_controller_->AddObserver(this);
OmniboxControllerEmitter* emitter =
OmniboxControllerEmitter::GetForBrowserContext(profile_);
if (emitter)
autocomplete_controller_->AddObserver(emitter);
}
void AutocompleteControllerAndroid::Start(JNIEnv* env,
const JavaRef<jstring>& j_text,
jint j_cursor_pos,
const JavaRef<jstring>& j_desired_tld,
const JavaRef<jstring>& j_current_url,
jint j_page_classification,
bool prevent_inline_autocomplete,
bool prefer_keyword,
bool allow_exact_keyword_match,
bool want_asynchronous_matches) {
autocomplete_controller_->result().DestroyJavaObject();
std::string desired_tld;
GURL current_url;
if (!j_current_url.is_null())
current_url = GURL(ConvertJavaStringToUTF16(env, j_current_url));
if (!j_desired_tld.is_null())
desired_tld = base::android::ConvertJavaStringToUTF8(env, j_desired_tld);
std::u16string text = ConvertJavaStringToUTF16(env, j_text);
size_t cursor_pos = j_cursor_pos == -1 ? std::u16string::npos : j_cursor_pos;
input_ = AutocompleteInput(
text, cursor_pos, desired_tld,
OmniboxEventProto::PageClassification(j_page_classification),
ChromeAutocompleteSchemeClassifier(profile_));
input_.set_current_url(current_url);
input_.set_prevent_inline_autocomplete(prevent_inline_autocomplete);
input_.set_prefer_keyword(prefer_keyword);
input_.set_allow_exact_keyword_match(allow_exact_keyword_match);
input_.set_want_asynchronous_matches(want_asynchronous_matches);
autocomplete_controller_->Start(input_);
}
ScopedJavaLocalRef<jobject> AutocompleteControllerAndroid::Classify(
JNIEnv* env,
const JavaParamRef<jstring>& j_text,
bool focused_from_fakebox) {
// The old AutocompleteResult is about to be invalidated.
autocomplete_controller_->result().DestroyJavaObject();
inside_synchronous_start_ = true;
Start(env, j_text, -1, nullptr, nullptr, true, false, false, false,
focused_from_fakebox);
inside_synchronous_start_ = false;
DCHECK(autocomplete_controller_->done());
const AutocompleteResult& result = autocomplete_controller_->result();
if (result.empty())
return ScopedJavaLocalRef<jobject>();
return ScopedJavaLocalRef<jobject>(
result.begin()->GetOrCreateJavaObject(env));
}
void AutocompleteControllerAndroid::OnOmniboxFocused(
JNIEnv* env,
const JavaParamRef<jstring>& j_omnibox_text,
const JavaParamRef<jstring>& j_current_url,
jint j_page_classification,
const JavaParamRef<jstring>& j_current_title) {
// Prevents double triggering of zero suggest when OnOmniboxFocused is issued
// in quick succession (due to odd timing in the Android focus callbacks).
if (!autocomplete_controller_->done())
return;
std::u16string url = ConvertJavaStringToUTF16(env, j_current_url);
std::u16string current_title = ConvertJavaStringToUTF16(env, j_current_title);
const GURL current_url = GURL(url);
std::u16string omnibox_text = ConvertJavaStringToUTF16(env, j_omnibox_text);
// If omnibox text is empty, set it to the current URL for the purposes of
// populating the verbatim match.
if (omnibox_text.empty() && !current_url.SchemeIs(content::kChromeUIScheme) &&
!current_url.SchemeIs(browser_ui::kChromeUINativeScheme))
omnibox_text = url;
auto page_class =
OmniboxEventProto::PageClassification(j_page_classification);
// Proactively start up a renderer, to reduce the time to display search
// results, especially if a Service Worker is used. This is done in a PostTask
// with a experiment-configured delay so that the CPU usage associated with
// starting a new renderer process does not impact the Omnibox initialization.
// Note that there's a small chance the renderer will be started after the
// next navigation if the delay is too long, but the spare renderer will
// probably get used anyways by a later navigation.
if (!profile_->IsOffTheRecord() &&
base::FeatureList::IsEnabled(omnibox::kOmniboxSpareRenderer) &&
page_class != OmniboxEventProto::ANDROID_SEARCH_WIDGET &&
page_class != OmniboxEventProto::START_SURFACE_HOMEPAGE &&
page_class != OmniboxEventProto::START_SURFACE_NEW_TAB &&
!BaseSearchProvider::IsNTPPage(page_class)) {
auto renderer_delay_ms = base::GetFieldTrialParamByFeatureAsInt(
omnibox::kOmniboxSpareRenderer, "omnibox_spare_renderer_delay_ms",
OMNIBOX_SPARE_RENDERER_DELAY_MS);
base::PostDelayedTask(
FROM_HERE, {content::BrowserThread::UI},
base::BindOnce(&AutocompleteControllerAndroid::WarmUpRenderProcess,
weak_ptr_factory_.GetWeakPtr()),
base::Milliseconds(renderer_delay_ms));
}
input_ = AutocompleteInput(omnibox_text, page_class,
ChromeAutocompleteSchemeClassifier(profile_));
input_.set_current_url(current_url);
input_.set_current_title(current_title);
input_.set_focus_type(OmniboxFocusType::ON_FOCUS);
autocomplete_controller_->Start(input_);
}
void AutocompleteControllerAndroid::Stop(JNIEnv* env,
bool clear_results) {
autocomplete_controller_->Stop(clear_results);
}
void AutocompleteControllerAndroid::ResetSession(JNIEnv* env) {
autocomplete_controller_->ResetSession();
}
void AutocompleteControllerAndroid::OnSuggestionSelected(
JNIEnv* env,
jint selected_index,
const jint j_window_open_disposition,
const JavaParamRef<jstring>& j_current_url,
jint j_page_classification,
jlong elapsed_time_since_first_modified,
jint completed_length,
const JavaParamRef<jobject>& j_web_contents) {
std::u16string url = ConvertJavaStringToUTF16(env, j_current_url);
const GURL current_url = GURL(url);
const base::TimeTicks& now(base::TimeTicks::Now());
content::WebContents* web_contents =
content::WebContents::FromJavaWebContents(j_web_contents);
const auto& match =
autocomplete_controller_->result().match_at(selected_index);
SuggestionAnswer::LogAnswerUsed(match.answer);
TemplateURLService* template_url_service =
TemplateURLServiceFactory::GetForProfile(profile_);
if (template_url_service &&
template_url_service->IsSearchResultsPageFromDefaultSearchProvider(
match.destination_url)) {
UMA_HISTOGRAM_BOOLEAN("Omnibox.Search.OffTheRecord",
profile_->IsOffTheRecord());
}
RecordClipboardMetrics(match.type);
// The following histogram should be recorded for both TYPED and pasted
// URLs, but should still exclude reloads.
if (ui::PageTransitionTypeIncludingQualifiersIs(match.transition,
ui::PAGE_TRANSITION_TYPED) ||
ui::PageTransitionTypeIncludingQualifiersIs(match.transition,
ui::PAGE_TRANSITION_LINK)) {
net::cookie_util::RecordCookiePortOmniboxHistograms(match.destination_url);
}
AutocompleteMatch::LogSearchEngineUsed(
match, TemplateURLServiceFactory::GetForProfile(profile_));
OmniboxLog log(
// For zero suggest, record an empty input string instead of the
// current URL.
input_.focus_type() != OmniboxFocusType::DEFAULT ? std::u16string()
: input_.text(),
false, /* don't know */
input_.type(), false, /* not keyword mode */
OmniboxEventProto::INVALID, true, selected_index,
static_cast<WindowOpenDisposition>(j_window_open_disposition), false,
sessions::SessionTabHelper::IdForTab(web_contents),
OmniboxEventProto::PageClassification(j_page_classification),
base::Milliseconds(elapsed_time_since_first_modified), completed_length,
now - autocomplete_controller_->last_time_default_match_changed(),
autocomplete_controller_->result());
autocomplete_controller_->AddProviderAndTriggeringLogs(&log);
OmniboxEventGlobalTracker::GetInstance()->OnURLOpened(&log);
predictors::AutocompleteActionPredictorFactory::GetForProfile(profile_)
->OnOmniboxOpenedUrl(log);
}
void AutocompleteControllerAndroid::DeleteSuggestion(JNIEnv* env, jint index) {
const auto& match = autocomplete_controller_->result().match_at(index);
if (match.SupportsDeletion())
autocomplete_controller_->DeleteMatch(match);
}
ScopedJavaLocalRef<jobject> AutocompleteControllerAndroid::
UpdateMatchDestinationURLWithAdditionalAssistedQueryStats(
JNIEnv* env,
jint selected_index,
jlong elapsed_time_since_input_change,
const JavaParamRef<jstring>& jnew_query_text,
const JavaParamRef<jobjectArray>& jnew_query_params) {
AutocompleteMatch match(
autocomplete_controller_->result().match_at(selected_index));
if (!jnew_query_text.is_null()) {
std::u16string query =
base::android::ConvertJavaStringToUTF16(env, jnew_query_text);
if (!match.search_terms_args) {
match.search_terms_args =
std::make_unique<TemplateURLRef::SearchTermsArgs>(query);
} else {
match.search_terms_args->search_terms = query;
}
}
if (!jnew_query_params.is_null() && match.search_terms_args) {
std::vector<std::string> params;
base::android::AppendJavaStringArrayToStringVector(env, jnew_query_params,
¶ms);
// The query params are from the query tiles server and doesn't need to be
// escaped.
match.search_terms_args->additional_query_params =
base::JoinString(params, "&");
}
autocomplete_controller_
->UpdateMatchDestinationURLWithAdditionalAssistedQueryStats(
base::Milliseconds(elapsed_time_since_input_change), &match);
return url::GURLAndroid::FromNativeGURL(env, match.destination_url);
}
ScopedJavaLocalRef<jobject>
AutocompleteControllerAndroid::GetMatchingTabForSuggestion(JNIEnv* env,
jint index) {
const AutocompleteMatch& match =
autocomplete_controller_->result().match_at(index);
return match.GetMatchingJavaTab().get(env);
}
void AutocompleteControllerAndroid::Shutdown() {
// Cancel all pending actions and clear any remaining matches.
autocomplete_controller_.reset();
Java_AutocompleteController_notifyNativeDestroyed(AttachCurrentThread(),
java_controller_);
}
void AutocompleteControllerAndroid::SetVoiceMatches(
JNIEnv* env,
const JavaParamRef<jobjectArray>& j_voice_matches,
const JavaParamRef<jfloatArray>& j_confidence_scores) {
auto* const voice_suggest_provider =
autocomplete_controller_->voice_suggest_provider();
DCHECK(voice_suggest_provider)
<< "Voice matches received with no registered VoiceSuggestProvider. "
<< "Either disable voice input, or provision VoiceSuggestProvider.";
std::vector<std::u16string> voice_matches;
std::vector<float> confidence_scores;
AppendJavaStringArrayToStringVector(env, j_voice_matches, &voice_matches);
JavaFloatArrayToFloatVector(env, j_confidence_scores, &confidence_scores);
DCHECK(voice_matches.size() == confidence_scores.size());
voice_suggest_provider->ClearCache();
for (size_t index = 0; index < voice_matches.size(); ++index) {
voice_suggest_provider->AddVoiceSuggestion(voice_matches[index],
confidence_scores[index]);
}
}
ScopedJavaLocalRef<jobject> AutocompleteControllerAndroid::GetJavaObject()
const {
return ScopedJavaLocalRef<jobject>(java_controller_);
}
AutocompleteControllerAndroid::~AutocompleteControllerAndroid() = default;
void AutocompleteControllerAndroid::OnResultChanged(
AutocompleteController* controller,
bool default_match_changed) {
if (!inside_synchronous_start_)
NotifySuggestionsReceived(autocomplete_controller_->result());
}
void AutocompleteControllerAndroid::NotifySuggestionsReceived(
const AutocompleteResult& autocomplete_result) {
JNIEnv* env = AttachCurrentThread();
autocomplete_controller_->SetTailSuggestContentPrefixes();
// Get the inline-autocomplete text.
std::u16string inline_autocompletion;
if (auto* default_match = autocomplete_result.default_match())
inline_autocompletion = default_match->inline_autocompletion;
ScopedJavaLocalRef<jstring> inline_text =
ConvertUTF16ToJavaString(env, inline_autocompletion);
Java_AutocompleteController_onSuggestionsReceived(
env, java_controller_, autocomplete_result.GetOrCreateJavaObject(env),
inline_text);
}
void AutocompleteControllerAndroid::WarmUpRenderProcess() const {
// It is ok for this to get called multiple times since all the requests
// will get de-duplicated to the first one.
content::RenderProcessHost::WarmupSpareRenderProcessHost(profile_);
}
// static
AutocompleteControllerAndroid*
AutocompleteControllerAndroid::Factory::GetForProfile(Profile* profile) {
return static_cast<AutocompleteControllerAndroid*>(
GetInstance()->GetServiceForBrowserContext(profile, true));
}
AutocompleteControllerAndroid::Factory*
AutocompleteControllerAndroid::Factory::GetInstance() {
return base::Singleton<AutocompleteControllerAndroid::Factory>::get();
}
content::BrowserContext*
AutocompleteControllerAndroid::Factory::GetBrowserContextToUse(
content::BrowserContext* context) const {
return chrome::GetBrowserContextOwnInstanceInIncognito(context);
}
AutocompleteControllerAndroid::Factory::Factory()
: BrowserContextKeyedServiceFactory(
"AutocompleteControllerAndroid",
BrowserContextDependencyManager::GetInstance()) {
DependsOn(TemplateURLServiceFactory::GetInstance());
DependsOn(ShortcutsBackendFactory::GetInstance());
}
AutocompleteControllerAndroid::Factory::~Factory() = default;
KeyedService* AutocompleteControllerAndroid::Factory::BuildServiceInstanceFor(
content::BrowserContext* context) const {
auto* profile = static_cast<Profile*>(context);
return new AutocompleteControllerAndroid(
profile, std::make_unique<ChromeAutocompleteProviderClient>(profile));
}
static ScopedJavaLocalRef<jobject> JNI_AutocompleteController_GetForProfile(
JNIEnv* env,
const JavaParamRef<jobject>& jprofile) {
AutocompleteControllerAndroid* native_bridge =
AutocompleteControllerAndroid::Factory::GetForProfile(
ProfileAndroid::FromProfileAndroid(jprofile));
if (!native_bridge)
return {};
return native_bridge->GetJavaObject();
}
static void JNI_AutocompleteController_PrefetchZeroSuggestResults(JNIEnv* env) {
Profile* profile = ProfileManager::GetActiveUserProfile();
if (!profile)
return;
// ZeroSuggestPrefetcher deletes itself after it's done prefetching.
new ZeroSuggestPrefetcher(profile);
}
| 8,282 |
5,169 | <reponame>Gantios/Specs
{
"name": "MTNetworkService",
"version": "1.0.1",
"swift_versions": "5.0",
"summary": "MTNetworkService is a framework that allow consume web service.",
"description": "I created this library to other developers, who wants implement web service in his/her proyects",
"homepage": "https://github.com/MT-Technology/NetworkService",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"MT-Technology": "<EMAIL>"
},
"source": {
"git": "https://github.com/MT-Technology/NetworkService.git",
"tag": "1.0.1"
},
"platforms": {
"ios": "11.0"
},
"source_files": "MTNetworkService/Classes/**/*",
"swift_version": "5.0"
}
| 268 |
799 | {
"defaultIncidentType": "TheHive",
"feed": false,
"id": "TheHive - Classifier",
"keyTypeMap": {
"case": "TheHive"
},
"name": "TheHive - Classifier",
"propagationLabels": [
"all"
],
"transformer": {
"complex": null,
"simple": "_type"
},
"type": "classification",
"version": -1,
"fromVersion": "6.0.0",
"description": "Default classifier for TheHive Project cases"
}
| 168 |
1,097 | // Commander X16 Emulator
// Copyright (c) 2019, 2020 <NAME>
// All rights reserved. License: 2-clause BSD
#define VER "38"
#define VER_NAME "Kyoto"
#define VER_INFO "### Release 38 (\"Kyoto\")\n"
| 70 |
1,041 | <reponame>GiannisMP/ebean
package io.ebeaninternal.server.querydefn;
import io.ebeaninternal.api.BindValuesKey;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class BindValuesKeyTest {
@Test
public void update_with_null() {
BindValuesKey hash = new BindValuesKey();
hash.add(1).add(null).add("hello");
BindValuesKey hash2 = new BindValuesKey();
hash2.add(1).add(null).add("hello");
assertThat(hash).isEqualTo(hash2);
}
@Test
public void notEqual() {
BindValuesKey hash = new BindValuesKey();
hash.add(1).add(null).add("hello");
BindValuesKey hash2 = new BindValuesKey();
hash2.add(1).add("hello");
BindValuesKey hash3 = new BindValuesKey();
hash2.add(1).add(null);
assertThat(hash).isNotEqualTo(hash2);
assertThat(hash).isNotEqualTo(hash3);
assertThat(hash2).isNotEqualTo(hash3);
}
}
| 351 |
937 | <gh_stars>100-1000
package com.oath.cyclops.internal.stream.spliterators.push;
import cyclops.reactive.ReactiveSeq;
import cyclops.reactive.Spouts;
import java.util.function.BiPredicate;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
/**
* Created by johnmcclean on 12/01/2017.
*/
public class CombineOperator<T,A,R> extends BaseOperator<T,ReactiveSeq<T>> {
private final BiPredicate<? super T, ? super T> predicate;
private final BinaryOperator<T> accumulator;
static final Object UNSET = new Object();
public CombineOperator(Operator<T> source, BiPredicate<? super T, ? super T> predicate, BinaryOperator<T> accumulator){
super(source);
this.predicate = predicate;
this.accumulator = accumulator;
}
@Override
public StreamSubscription subscribe(Consumer<? super ReactiveSeq<T>> onNext, Consumer<? super Throwable> onError, Runnable onComplete) {
final Object[] current = {UNSET};
StreamSubscription[] upstream = {null};
StreamSubscription sub = new StreamSubscription(){
@Override
public void request(long n) {
if(n<=0) {
onError.accept(new IllegalArgumentException("3.9 While the Subscription is not cancelled, Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the argument is <= 0."));
return;
}
if(!isOpen)
return;
super.request(n);
upstream[0].request(n );
}
@Override
public void cancel() {
upstream[0].cancel();
super.cancel();
}
};
upstream[0] = source.subscribe(next-> {
try {
if(current[0]== UNSET){
current[0]=next;
} else if (predicate.test((T)current[0], next)) {
current[0] = accumulator.apply((T)current[0], next);
} else {
final T result = (T)current[0];
current[0] = (T) UNSET;
onNext.accept(Spouts.of(result, next));
return;
}
request( upstream,1l);
} catch (Throwable t) {
onError.accept(t);
}
}
,t->{onError.accept(t);
sub.requested.decrementAndGet();
if(sub.isActive())
request( upstream,1);
},()->{
if(current[0]!= UNSET)
onNext.accept(Spouts.of((T)current[0]));
onComplete.run();
});
return sub;
}
volatile int test = 0;
@Override
public void subscribeAll(Consumer<? super ReactiveSeq<T>> onNext, Consumer<? super Throwable> onError, Runnable onCompleteDs) {
final Object[] current = {UNSET};
boolean[] completed = {false};
source.subscribeAll(next-> {
try {
if(current[0]== UNSET){
current[0]=next;
} else if (predicate.test((T)current[0], next)) {
current[0] = accumulator.apply((T)current[0], next);
} else {
final T result = (T)current[0];
current[0] = (T) UNSET;
onNext.accept(Spouts.of(result, next));
return;
}
} catch (Throwable t) {
onError.accept(t);
}
}
,onError,()->{
if(!completed[0]) {
if (current[0] != UNSET)
onNext.accept(Spouts.of((T) current[0]));
onCompleteDs.run();
completed[0]=true;
}
});
}
}
| 2,350 |
2,293 | <filename>dynaconf/vendor/click/globals.py
_A=None
from threading import local
_local=local()
def get_current_context(silent=False):
try:return _local.stack[-1]
except (AttributeError,IndexError):
if not silent:raise RuntimeError('There is no active click context.')
def push_context(ctx):_local.__dict__.setdefault('stack',[]).append(ctx)
def pop_context():_local.stack.pop()
def resolve_color_default(color=_A):
A=color
if A is not _A:return A
B=get_current_context(silent=True)
if B is not _A:return B.color | 186 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Saint-Porchaire","circ":"5ème circonscription","dpt":"Charente-Maritime","inscrits":1276,"abs":715,"votants":561,"blancs":39,"nuls":8,"exp":514,"res":[{"nuance":"DIV","nom":"<NAME>","voix":277},{"nuance":"LR","nom":"<NAME>","voix":237}]} | 116 |
2,151 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/appcache/appcache_interceptor.h"
#include <utility>
#include "content/browser/appcache/appcache_backend_impl.h"
#include "content/browser/appcache/appcache_host.h"
#include "content/browser/appcache/appcache_request_handler.h"
#include "content/browser/appcache/appcache_service_impl.h"
#include "content/browser/appcache/appcache_url_request.h"
#include "content/browser/appcache/appcache_url_request_job.h"
#include "content/browser/appcache/chrome_appcache_service.h"
#include "content/browser/bad_message.h"
#include "content/browser/loader/resource_message_filter.h"
#include "content/browser/loader/resource_requester_info.h"
#include "content/common/appcache_interfaces.h"
#include "net/url_request/url_request.h"
static int kHandlerKey; // Value is not used.
namespace content {
void AppCacheInterceptor::SetHandler(
net::URLRequest* request,
std::unique_ptr<AppCacheRequestHandler> handler) {
request->SetUserData(&kHandlerKey, std::move(handler));
}
AppCacheRequestHandler* AppCacheInterceptor::GetHandler(
net::URLRequest* request) {
return static_cast<AppCacheRequestHandler*>(
request->GetUserData(&kHandlerKey));
}
void AppCacheInterceptor::SetExtraRequestInfo(net::URLRequest* request,
AppCacheServiceImpl* service,
int process_id,
int host_id,
ResourceType resource_type,
bool should_reset_appcache) {
if (!service || (host_id == kAppCacheNoHostId))
return;
AppCacheBackendImpl* backend = service->GetBackend(process_id);
if (!backend)
return;
// TODO(michaeln): An invalid host id is indicative of bad data
// from a child process. How should we handle that here?
AppCacheHost* host = backend->GetHost(host_id);
if (!host)
return;
SetExtraRequestInfoForHost(request, host, resource_type,
should_reset_appcache);
}
void AppCacheInterceptor::SetExtraRequestInfoForHost(
net::URLRequest* request,
AppCacheHost* host,
ResourceType resource_type,
bool should_reset_appcache) {
// Create a handler for this request and associate it with the request.
std::unique_ptr<AppCacheRequestHandler> handler =
host->CreateRequestHandler(AppCacheURLRequest::Create(request),
resource_type, should_reset_appcache);
if (handler)
SetHandler(request, std::move(handler));
}
void AppCacheInterceptor::GetExtraResponseInfo(net::URLRequest* request,
int64_t* cache_id,
GURL* manifest_url) {
DCHECK(*cache_id == kAppCacheNoCacheId);
DCHECK(manifest_url->is_empty());
AppCacheRequestHandler* handler = GetHandler(request);
if (handler)
handler->GetExtraResponseInfo(cache_id, manifest_url);
}
AppCacheInterceptor::AppCacheInterceptor() {
}
AppCacheInterceptor::~AppCacheInterceptor() {
}
net::URLRequestJob* AppCacheInterceptor::MaybeInterceptRequest(
net::URLRequest* request, net::NetworkDelegate* network_delegate) const {
AppCacheRequestHandler* handler = GetHandler(request);
if (!handler)
return nullptr;
AppCacheJob* job = handler->MaybeLoadResource(network_delegate);
return job ? job->AsURLRequestJob() : nullptr;
}
net::URLRequestJob* AppCacheInterceptor::MaybeInterceptRedirect(
net::URLRequest* request,
net::NetworkDelegate* network_delegate,
const GURL& location) const {
AppCacheRequestHandler* handler = GetHandler(request);
if (!handler)
return nullptr;
AppCacheJob* job =
handler->MaybeLoadFallbackForRedirect(network_delegate, location);
return job ? job->AsURLRequestJob() : nullptr;
}
net::URLRequestJob* AppCacheInterceptor::MaybeInterceptResponse(
net::URLRequest* request, net::NetworkDelegate* network_delegate) const {
AppCacheRequestHandler* handler = GetHandler(request);
if (!handler)
return nullptr;
AppCacheJob* job = handler->MaybeLoadFallbackForResponse(network_delegate);
return job ? job->AsURLRequestJob() : nullptr;
}
} // namespace content
| 1,659 |
305 | // Test without serialization:
// RUN: %clang_cc1 -triple x86_64-pc-linux -fdouble-square-bracket-attributes \
// RUN: -Wno-deprecated-declarations -ast-dump -ast-dump-filter Test %s \
// RUN: | FileCheck --strict-whitespace %s
//
// Test with serialization:
// RUN: %clang_cc1 -triple x86_64-pc-linux -fdouble-square-bracket-attributes \
// RUN: -Wno-deprecated-declarations -emit-pch -o %t %s
// RUN: %clang_cc1 -x c -triple x86_64-pc-linux -fdouble-square-bracket-attributes \
// RUN: -Wno-deprecated-declarations -include-pch %t -ast-dump-all -ast-dump-filter Test /dev/null \
// RUN: | sed -e "s/ <undeserialized declarations>//" -e "s/ imported//" \
// RUN: | FileCheck --strict-whitespace %s
int Test1 [[deprecated]];
// CHECK: VarDecl{{.*}}Test1
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:13> "" ""
enum [[deprecated("Frobble")]] Test2 {
Test3 [[deprecated]]
};
// CHECK: EnumDecl{{.*}}Test2
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:8, col:28> "Frobble" ""
// CHECK-NEXT: EnumConstantDecl{{.*}}Test3
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:11> "" ""
struct [[deprecated]] Test4 {
[[deprecated("Frobble")]] int Test5, Test6;
int Test7 [[deprecated]] : 12;
};
// CHECK: RecordDecl{{.*}}Test4
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:10> "" ""
// CHECK-NEXT: FieldDecl{{.*}}Test5
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:5, col:25> "Frobble" ""
// CHECK-NEXT: FieldDecl{{.*}}Test6
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:5, col:25> "Frobble" ""
// CHECK-NEXT: FieldDecl{{.*}}Test7
// CHECK-NEXT: Constant{{.*}}'int'
// CHECK-NEXT: IntegerLiteral{{.*}}'int' 12
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:15> "" ""
struct [[deprecated]] Test8;
// CHECK: RecordDecl{{.*}}Test8
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:10> "" ""
[[deprecated]] void Test9(int Test10 [[deprecated]]);
// CHECK: FunctionDecl{{.*}}Test9
// CHECK-NEXT: ParmVarDecl{{.*}}Test10
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:40> "" ""
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:3> "" ""
void Test11 [[deprecated]](void);
// CHECK: FunctionDecl{{.*}}Test11
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:15> "" ""
void Test12(void) [[deprecated]] {}
// CHECK: FunctionDecl{{.*}}Test12
// CHECK-NEXT: CompoundStmt
// CHECK-NEXT: DeprecatedAttr 0x{{[^ ]*}} <col:21> "" ""
| 1,106 |
626 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <memory>
#include "QirRuntimeApi_I.hpp"
#include "QSharpSimApi_I.hpp"
namespace Microsoft
{
namespace Quantum
{
class CustomSimulator : public IRuntimeDriver, public IQuantumGateSet, public IDiagnostics
{
public:
CustomSimulator()
{
}
~CustomSimulator() override
{
}
///
/// Implementation of IRuntimeDriver
///
void ReleaseResult(Result r) override
{
}
bool AreEqualResults(Result r1, Result r2) override
{
}
ResultValue GetResultValue(Result r) override
{
}
Result UseZero() override
{
}
Result UseOne() override
{
}
Qubit AllocateQubit() override
{
}
void ReleaseQubit(Qubit q) override
{
}
std::string QubitToString(Qubit q) override
{
}
///
/// Implementation of IDiagnostics
///
bool Assert(long numTargets, PauliId* bases, Qubit* targets, Result result, const char* failureMessage) override
{
}
bool AssertProbability(long numTargets, PauliId bases[], Qubit targets[], double probabilityOfZero, double precision, const char* failureMessage) override
{
}
// Deprecated, use `DumpMachine()` and `DumpRegister()` instead.
void GetState(TGetStateCallback callback) override
{
}
void DumpMachine(const void* location) override
{
}
void DumpRegister(const void* location, const QirArray* qubits) override
{
}
///
/// Implementation of IQuantumGateSet
///
void X(Qubit q) override
{
}
void ControlledX(long numControls, Qubit controls[], Qubit target) override
{
}
void Y(Qubit q) override
{
}
void ControlledY(long numControls, Qubit controls[], Qubit target) override
{
}
void Z(Qubit q) override
{
}
void ControlledZ(long numControls, Qubit controls[], Qubit target) override
{
}
void H(Qubit q) override
{
}
void ControlledH(long numControls, Qubit controls[], Qubit target) override
{
}
void S(Qubit q) override
{
}
void ControlledS(long numControls, Qubit controls[], Qubit target) override
{
}
void AdjointS(Qubit q) override
{
}
void ControlledAdjointS(long numControls, Qubit controls[], Qubit target) override
{
}
void T(Qubit q) override
{
}
void ControlledT(long numControls, Qubit controls[], Qubit target) override
{
}
void AdjointT(Qubit q) override
{
}
void ControlledAdjointT(long numControls, Qubit controls[], Qubit target) override
{
}
void R(PauliId axis, Qubit target, double theta) override
{
}
void ControlledR(long numControls, Qubit controls[], PauliId axis, Qubit target, double theta) override
{
}
void Exp(long numTargets, PauliId paulis[], Qubit targets[], double theta) override
{
}
void ControlledExp(long numControls, Qubit controls[], long numTargets, PauliId paulis[], Qubit targets[], double theta) override
{
}
Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override
{
}
}; // class CustomSimulator
std::unique_ptr<IRuntimeDriver> CreateCustomSimulator()
{
return std::make_unique<CustomSimulator>();
}
} // namespace Quantum
} // namespace Microsoft
| 1,845 |
843 | # Generated by Django 2.2.6 on 2019-11-07 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blocklist', '0002_auto_20191107_1302'),
]
operations = [
migrations.RemoveField(
model_name='block',
name='addon',
),
migrations.AlterField(
model_name='block',
name='guid',
field=models.CharField(max_length=255, unique=True),
),
]
| 237 |
3,710 | #include <memory>
// TnzCore includes
#include "tgl.h"
// TnzExt includes
#include "ext/meshtexturizer.h"
// tcg includes
#include "tcg/tcg_list.h"
// Qt includes
#include <QString>
#include <QCache>
#include <QMutex>
#include <QMutexLocker>
#include "ext/ttexturesstorage.h"
//***************************************************************************************
// Local namespace - structures
//***************************************************************************************
struct TexturesContainer {
MeshTexturizer
m_texturizer; //!< The mesh texturizer - actual textures container
tcg::list<QString> m_keys; //!< Keys in the storage
public:
TexturesContainer() {}
private:
TexturesContainer(const TexturesContainer &);
TexturesContainer &operator=(const TexturesContainer &);
};
//***************************************************************************************
// Local namespace - variables
//***************************************************************************************
namespace {
QMutex l_mutex(QMutex::Recursive); // A mutex is needed to synchronize access
// to the following objects
std::map<int, TexturesContainer *>
l_texturesContainers; // Texture Containers by display lists space id
QCache<QString, DrawableTextureDataP> l_objects(500 * 1024); // 500 MB cache
// for now - NOTE:
// MUST be
// allocated
// before the
// following
} // namespace
//***************************************************************************************
// Local namespace - global functions
//***************************************************************************************
namespace {
inline QString textureString(int dlSpaceId, const std::string &texId) {
return QString::number(dlSpaceId) + "_" + QString::fromStdString(texId);
}
//-------------------------------------------------------------------------------------
inline void deleteTexturesContainer(
const std::pair<int, TexturesContainer *> &pair) {
delete pair.second;
}
}
//***************************************************************************************
// DrawableTextureData implementation
//***************************************************************************************
DrawableTextureData::~DrawableTextureData() {
QMutexLocker locker(&l_mutex);
TexturesContainer *texContainer = l_texturesContainers[m_dlSpaceId];
if (m_dlSpaceId >= 0) {
// Load the container's display lists space (remember current OpenGL
// context, too)
TGLDisplayListsProxy *proxy =
TGLDisplayListsManager::instance()->dlProxy(m_dlSpaceId);
TGlContext currentContext = tglGetCurrentContext();
// Unbind the textures
{
QMutexLocker locker(proxy->mutex());
proxy->makeCurrent();
texContainer->m_texturizer.unbindTexture(m_texId);
}
// Restore OpenGL context - equivalent to tglDoneCurrent if currentContext
// == TGlContext()
tglMakeCurrent(currentContext);
} else
// Temporary - use current OpenGL context directly
texContainer->m_texturizer.unbindTexture(m_texId);
texContainer->m_keys.erase(m_objIdx);
}
//***************************************************************************************
// TTexturesStorage implementation
//***************************************************************************************
TTexturesStorage::TTexturesStorage() {
// This singleton is dependent on TGLDisplayListsManager
TGLDisplayListsManager::instance()->addObserver(this);
}
//-------------------------------------------------------------------------------------
TTexturesStorage::~TTexturesStorage() {
l_objects.clear();
std::for_each(l_texturesContainers.begin(), l_texturesContainers.end(),
deleteTexturesContainer);
}
//-------------------------------------------------------------------------------------
TTexturesStorage *TTexturesStorage::instance() {
static TTexturesStorage theInstance;
return &theInstance;
}
//-------------------------------------------------------------------------------------
DrawableTextureDataP TTexturesStorage::loadTexture(const std::string &textureId,
const TRaster32P &ras,
const TRectD &geometry) {
// Try to retrieve the proxy associated to current OpenGL context
TGlContext currentContext = tglGetCurrentContext();
int dlSpaceId =
TGLDisplayListsManager::instance()->displayListsSpaceId(currentContext);
QString texString(::textureString(dlSpaceId, textureId));
// Deal with containers
QMutexLocker locker(&l_mutex);
// If necessary, allocate a textures container
std::map<int, TexturesContainer *>::iterator it =
l_texturesContainers.find(dlSpaceId);
if (it == l_texturesContainers.end())
it = l_texturesContainers
.insert(std::make_pair(dlSpaceId, new TexturesContainer))
.first;
MeshTexturizer &texturizer = it->second->m_texturizer;
DrawableTextureDataP dataPtr = std::make_shared<DrawableTextureData>();
DrawableTextureData *data = dataPtr.get();
data->m_dlSpaceId = dlSpaceId;
data->m_texId = texturizer.bindTexture(ras, geometry);
data->m_objIdx = it->second->m_keys.push_back(texString);
data->m_textureData = texturizer.getTextureData(data->m_texId);
l_objects.insert(texString, new DrawableTextureDataP(dataPtr),
(ras->getLx() * ras->getLy() * ras->getPixelSize()) >> 10);
if (dlSpaceId < 0) {
// obj is a temporary. It was pushed in the cache to make space for it -
// however, it must not be
// stored. Remove it now.
l_objects.remove(texString);
}
return dataPtr;
}
//-------------------------------------------------------------------------------------
void TTexturesStorage::unloadTexture(const std::string &textureId) {
QMutexLocker locker(&l_mutex);
// Remove the specified texture from ALL the display lists spaces
std::map<int, TexturesContainer *>::iterator it,
iEnd(l_texturesContainers.end());
for (it = l_texturesContainers.begin(); it != iEnd; ++it)
l_objects.remove(::textureString(it->first, textureId));
}
//-----------------------------------------------------------------------------------
void TTexturesStorage::onDisplayListDestroyed(int dlSpaceId) {
QMutexLocker locker(&l_mutex);
// Remove the textures container associated with dlSpaceId
std::map<int, TexturesContainer *>::iterator it =
l_texturesContainers.find(dlSpaceId);
if (it == l_texturesContainers.end()) return;
tcg::list<QString>::iterator st, sEnd(it->second->m_keys.end());
for (st = it->second->m_keys.begin(); st != sEnd;) // Note that the increment
// is performed BEFORE the
// texture is removed.
l_objects.remove(*st++); // This is because texture removal may destroy the
// key being addressed,
// whose iterator would then be invalidated.
delete it->second;
l_texturesContainers.erase(it);
}
//-------------------------------------------------------------------------------------
DrawableTextureDataP TTexturesStorage::getTextureData(
const std::string &textureId) {
// Get current display lists space
TGlContext currentContext = tglGetCurrentContext();
int dlSpaceId =
TGLDisplayListsManager::instance()->displayListsSpaceId(currentContext);
// If there is no known associated display lists space, the texture cannot be
// stored.
if (dlSpaceId < 0) return DrawableTextureDataP();
QMutexLocker locker(&l_mutex);
// Search the texture object
QString texString(::textureString(dlSpaceId, textureId));
if (!l_objects.contains(texString)) return DrawableTextureDataP();
return *l_objects.object(texString);
}
| 2,780 |
348 | {"nom":"Clermont-Ferrand","circ":"1ère circonscription","dpt":"Puy-de-Dôme","inscrits":48124,"abs":29417,"votants":18707,"blancs":992,"nuls":451,"exp":17264,"res":[{"nuance":"REM","nom":"<NAME>","voix":9589},{"nuance":"FI","nom":"M. <NAME>","voix":7675}]} | 105 |
333 | package com.alipay.api.response;
import java.util.List;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.internal.mapping.ApiListField;
import com.alipay.api.AlipayResponse;
/**
* ALIPAY API: alipay.data.zbdm.lineage.query response.
*
* @author <NAME>
* @since 1.0, 2021-02-02 14:19:24
*/
public class AlipayDataZbdmLineageQueryResponse extends AlipayResponse {
private static final long serialVersionUID = 3796544224483852889L;
/**
* 参数名:返回血缘探索的边集合
应用场景:返回边的起始id,终止id已经反向
如何获取:geabase中获取
*/
@ApiListField("edges")
@ApiField("string")
private List<String> edges;
/**
* 参数名:返回血缘探索的点集合
应用场景:返回一张表/字段上下游20层以内的表/字段
如何获取:从geabase中获取
*/
@ApiListField("vertices")
@ApiField("string")
private List<String> vertices;
public void setEdges(List<String> edges) {
this.edges = edges;
}
public List<String> getEdges( ) {
return this.edges;
}
public void setVertices(List<String> vertices) {
this.vertices = vertices;
}
public List<String> getVertices( ) {
return this.vertices;
}
}
| 615 |
2,517 | // This file is part of CAF, the C++ Actor Framework. See the file LICENSE in
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#define CAF_SUITE intrusive_ptr
#include "caf/intrusive_ptr.hpp"
#include "core-test.hpp"
// this test doesn't verify thread-safety of intrusive_ptr
// however, it is thread safe since it uses atomic operations only
#include <vector>
#include <cstddef>
#include "caf/ref_counted.hpp"
#include "caf/make_counted.hpp"
using namespace caf;
namespace {
int class0_instances = 0;
int class1_instances = 0;
class class0;
class class1;
using class0ptr = intrusive_ptr<class0>;
using class1ptr = intrusive_ptr<class1>;
class class0 : public ref_counted {
public:
explicit class0(bool subtype = false) : subtype_(subtype) {
if (!subtype) {
++class0_instances;
}
}
~class0() override {
if (!subtype_) {
--class0_instances;
}
}
bool is_subtype() const {
return subtype_;
}
virtual class0ptr create() const {
return make_counted<class0>();
}
private:
bool subtype_;
};
class class1 : public class0 {
public:
class1() : class0(true) {
++class1_instances;
}
~class1() override {
--class1_instances;
}
class0ptr create() const override {
return make_counted<class1>();
}
};
class0ptr get_test_rc() {
return make_counted<class0>();
}
class0ptr get_test_ptr() {
return get_test_rc();
}
struct fixture {
~fixture() {
CAF_CHECK_EQUAL(class0_instances, 0);
CAF_CHECK_EQUAL(class1_instances, 0);
}
};
} // namespace
CAF_TEST_FIXTURE_SCOPE(atom_tests, fixture)
CAF_TEST(make_counted) {
auto p = make_counted<class0>();
CAF_CHECK_EQUAL(class0_instances, 1);
CAF_CHECK(p->unique());
}
CAF_TEST(reset) {
class0ptr p;
p.reset(new class0, false);
CAF_CHECK_EQUAL(class0_instances, 1);
CAF_CHECK(p->unique());
}
CAF_TEST(get_test_rc) {
class0ptr p1;
p1 = get_test_rc();
class0ptr p2 = p1;
CAF_CHECK_EQUAL(class0_instances, 1);
CAF_CHECK_EQUAL(p1->unique(), false);
}
CAF_TEST(list) {
std::vector<class0ptr> pl;
pl.push_back(get_test_ptr());
pl.push_back(get_test_rc());
pl.push_back(pl.front()->create());
CAF_CHECK(pl.front()->unique());
CAF_CHECK_EQUAL(class0_instances, 3);
}
CAF_TEST(full_test) {
auto p1 = make_counted<class0>();
CAF_CHECK_EQUAL(p1->is_subtype(), false);
CAF_CHECK_EQUAL(p1->unique(), true);
CAF_CHECK_EQUAL(class0_instances, 1);
CAF_CHECK_EQUAL(class1_instances, 0);
p1.reset(new class1, false);
CAF_CHECK_EQUAL(p1->is_subtype(), true);
CAF_CHECK_EQUAL(p1->unique(), true);
CAF_CHECK_EQUAL(class0_instances, 0);
CAF_CHECK_EQUAL(class1_instances, 1);
auto p2 = make_counted<class1>();
p1 = p2;
CAF_CHECK_EQUAL(p1->unique(), false);
CAF_CHECK_EQUAL(class0_instances, 0);
CAF_CHECK_EQUAL(class1_instances, 1);
CAF_CHECK_EQUAL(p1, static_cast<class0*>(p2.get()));
}
CAF_TEST_FIXTURE_SCOPE_END()
| 1,269 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Helsinki","circ":"3ème circonscription","dpt":"Français établis hors de France","inscrits":1760,"abs":1370,"votants":390,"blancs":6,"nuls":3,"exp":381,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":210},{"nuance":"SOC","nom":"Mme <NAME>","voix":171}]} | 121 |
1,379 | <filename>clients/cpp/src/SocketStoreClientFactory.cpp
/* -*- C++ -*-; c-basic-offset: 4; indent-tabs-mode: nil */
/*
* Implementation for SocketStoreClientFactory class.
*
* Copyright (c) 2009 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
#include <voldemort/BootstrapFailureException.h>
#include <voldemort/SocketStoreClientFactory.h>
#include "SocketStore.h"
#include "RoutedStore.h"
#include "RequestFormat.h"
#include "DefaultStoreClient.h"
#include "Cluster.h"
#include "InconsistencyResolvingStore.h"
#include "TimeBasedInconsistencyResolver.h"
#include "VectorClockInconsistencyResolver.h"
#include "RoundRobinRoutingStrategy.h"
#include <iostream>
#include <exception>
#include <ctype.h>
namespace Voldemort {
using namespace boost;
using namespace std;
static const string METADATA_STORE_NAME("metadata");
static const string CLUSTER_KEY("cluster.xml");
static const string STORES_KEY("stores.xml");
static const string ROLLBACK_CLUSTER_KEY("rollback.cluster.xml");
class SocketStoreClientFactoryImpl {
public:
SocketStoreClientFactoryImpl(ClientConfig& conf);
~SocketStoreClientFactoryImpl();
/**
* Get a raw socket store object
*/
Store* getStore(const string& storeName,
const string& host,
int port,
RequestFormat::RequestFormatType type);
/**
* Retrieve the given metadata key using the bootstrap list
*/
VersionedValue bootstrapMetadata(const string& key);
Store* getStore(const string& storeName,
const string& host,
int port,
RequestFormat::RequestFormatType type,
bool shouldReroute);
shared_ptr<ClientConfig> config;
shared_ptr<ConnectionPool> connPool;
RequestFormat::RequestFormatType requestFormatType;
};
SocketStoreClientFactoryImpl::SocketStoreClientFactoryImpl(ClientConfig& conf)
: config(new ClientConfig(conf)), connPool(new ConnectionPool(config)),
requestFormatType(RequestFormat::PROTOCOL_BUFFERS)
{
}
SocketStoreClientFactoryImpl::~SocketStoreClientFactoryImpl() {
}
Store* SocketStoreClientFactoryImpl::getStore(const string& storeName,
const string& host,
int port,
RequestFormat::RequestFormatType type,
bool shouldReroute) {
return new SocketStore(storeName,
host,
port,
config,
connPool,
type,
shouldReroute);
}
#define THROW_BOOTSTRAP \
throw BootstrapFailureException("Invalid bootstrap URL " + url + \
": Expected tcp://host:port");
#define EXPECT_C(char, next) \
if (tolower(*it) != char) \
THROW_BOOTSTRAP; \
state = next;
static void parseBootstrapUrl(const string& url, string& host, int& port) {
static const int STATE_T = 0;
static const int STATE_C = 1;
static const int STATE_P = 2;
static const int STATE_COL1 = 3;
static const int STATE_SL1 = 4;
static const int STATE_SL2 = 5;
static const int STATE_HOSTSTRING = 6;
static const int STATE_PORTSTRING = 7;
stringstream hostStr, portStr;
int state = STATE_T;
string::const_iterator it;
for (it = url.begin(); it != url.end(); ++it) {
switch(state) {
case STATE_T:
EXPECT_C('t', STATE_C);
break;
case STATE_C:
EXPECT_C('c', STATE_P);
break;
case STATE_P:
EXPECT_C('p', STATE_COL1);
break;
case STATE_COL1:
EXPECT_C(':', STATE_SL1);
break;
case STATE_SL1:
EXPECT_C('/', STATE_SL2);
break;
case STATE_SL2:
EXPECT_C('/', STATE_HOSTSTRING);
break;
case STATE_HOSTSTRING:
if (isalnum(*it) || *it == '.' || *it == '-')
hostStr << *it;
else if (*it == ':')
state = STATE_PORTSTRING;
else
THROW_BOOTSTRAP;
break;
case STATE_PORTSTRING:
if (isdigit(*it))
portStr << *it;
else
THROW_BOOTSTRAP;
break;
}
}
if (hostStr.str().length() == 0)
THROW_BOOTSTRAP;
if (portStr.str().length() == 0)
THROW_BOOTSTRAP;
host = hostStr.str();
portStr >> port;
if (port == 0)
THROW_BOOTSTRAP;
}
VersionedValue SocketStoreClientFactoryImpl::bootstrapMetadata(const string& key) {
std::list<std::string>* boots = config->getBootstrapUrls();
std::list<std::string>::const_iterator it;
for (it = boots->begin(); it != boots->end(); ++it) {
try {
string host;
int port;
parseBootstrapUrl(*it, host, port);
auto_ptr<Store> store(getStore(METADATA_STORE_NAME,
host,
port,
requestFormatType,
false));
auto_ptr<std::list<VersionedValue> > vvs(store->get(key));
if (vvs->size() == 1) {
return vvs->front();
}
} catch (std::exception& e) {
/* XXX - TODO Need a real logging mechanism */
cerr << "Warning: Could not bootstrap '" << *it << "': "
<< e.what() << endl;
}
}
throw BootstrapFailureException("No available bootstrap servers found!");
}
SocketStoreClientFactory::SocketStoreClientFactory(ClientConfig& conf) {
pimpl_ = new SocketStoreClientFactoryImpl(conf);
}
SocketStoreClientFactory::~SocketStoreClientFactory() {
if (pimpl_)
delete pimpl_;
}
StoreClient* SocketStoreClientFactory::getStoreClient(const std::string& storeName) {
shared_ptr<InconsistencyResolver> nullResolver;
return getStoreClient(storeName, nullResolver);
}
StoreClient* SocketStoreClientFactory::
getStoreClient(const std::string& storeName,
shared_ptr<InconsistencyResolver>& resolver) {
shared_ptr<Store> store(getRawStore(storeName, resolver));
return new DefaultStoreClient(store,
resolver,
pimpl_->config,
this);
}
Store* SocketStoreClientFactory::getRawStore(const std::string& storeName,
shared_ptr<InconsistencyResolver>& resolver) {
VersionedValue clustervv = pimpl_->bootstrapMetadata(CLUSTER_KEY);
const std::string* clusterXml = clustervv.getValue();
shared_ptr<Cluster> cluster(new Cluster(*clusterXml));
shared_ptr<std::map<int, shared_ptr<Store> > >
clusterMap(new std::map<int, shared_ptr<Store> >());
const std::map<int, boost::shared_ptr<Node> >* nodeMap = cluster->getNodeMap();
std::map<int, boost::shared_ptr<Node> >::const_iterator it;
for (it = nodeMap->begin(); it != nodeMap->end(); ++it) {
shared_ptr<Store> store(pimpl_->getStore(storeName,
it->second->getHost(),
it->second->getSocketPort(),
pimpl_->requestFormatType,
true));
(*clusterMap)[it->second->getId()] = store;
}
//VersionedValue storevv = pimpl_->bootstrapMetadata(STORES_KEY);
//const std::string* storesXml = storevv.getValue();
// Routed store
shared_ptr<RoutingStrategy>
routingStrategy(new RoundRobinRoutingStrategy(pimpl_->config,
cluster));
shared_ptr<Store> routedStore(new RoutedStore(storeName,
pimpl_->config,
cluster,
clusterMap,
routingStrategy));
InconsistencyResolvingStore* conStore = new InconsistencyResolvingStore(routedStore);
try {
shared_ptr<InconsistencyResolver>
vcResolver(new VectorClockInconsistencyResolver());
conStore->addResolver(vcResolver);
if (resolver.get()) {
conStore->addResolver(resolver);
} else {
shared_ptr<InconsistencyResolver>
tbResolver(new TimeBasedInconsistencyResolver());
conStore->addResolver(tbResolver);
}
} catch (...) {
if (conStore) delete conStore;
throw;
}
return conStore;
}
} /* namespace Voldemort */
| 4,545 |
1,821 | <reponame>hangqiu/pixie
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "src/carnot/planner/compiler/ast_visitor.h"
#include "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.pb.h"
namespace px {
namespace carnot {
namespace planner {
namespace compiler {
/**
* Take a tracepoint specification in PXL format, and compiles it to a logical tracepoint Program.
*/
StatusOr<carnot::planner::dynamic_tracing::ir::logical::TracepointDeployment> CompileTracepoint(
std::string_view query);
} // namespace compiler
} // namespace planner
} // namespace carnot
} // namespace px
| 377 |
1,682 | <reponame>haroldl/rest.li<filename>restli-int-test-server/src/main/java/com/linkedin/restli/examples/greetings/server/NullGreetingsResourceImpl.java
/*
Copyright (c) 2014 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.linkedin.restli.examples.greetings.server;
import com.linkedin.common.callback.Callback;
import com.linkedin.data.template.StringArray;
import com.linkedin.parseq.BaseTask;
import com.linkedin.parseq.Task;
import com.linkedin.parseq.promise.Promise;
import com.linkedin.parseq.promise.Promises;
import com.linkedin.parseq.promise.SettablePromise;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.examples.greetings.api.Empty;
import com.linkedin.restli.examples.greetings.api.Greeting;
import com.linkedin.restli.examples.greetings.api.SearchMetadata;
import com.linkedin.restli.examples.greetings.api.Tone;
import com.linkedin.restli.server.ActionResult;
import com.linkedin.restli.server.BatchCreateRequest;
import com.linkedin.restli.server.BatchCreateResult;
import com.linkedin.restli.server.BatchDeleteRequest;
import com.linkedin.restli.server.BatchPatchRequest;
import com.linkedin.restli.server.BatchResult;
import com.linkedin.restli.server.BatchUpdateRequest;
import com.linkedin.restli.server.BatchUpdateResult;
import com.linkedin.restli.server.CollectionResult;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.CallbackParam;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.PagingContextParam;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.annotations.RestMethod;
import com.linkedin.restli.server.resources.CollectionResourceTemplate;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* Tests to observe restli's resilience for resource methods returning null. We are simply reusing
* the Greetings model here for our own null-generating purposes.
*
* @author <NAME>
*/
@RestLiCollection(name = "nullGreeting", namespace = "com.linkedin.restli.examples.greetings.client")
public class NullGreetingsResourceImpl extends CollectionResourceTemplate<Long, Greeting>
{
private static final String[] GREETINGS =
{"Good morning!", "Guten Morgen!", "Buenos dias!", "Bon jour!", "Buon Giorno!"};
private static final Tone[] TONES = {Tone.FRIENDLY, Tone.SINCERE, Tone.INSULTING};
private static final int INITIAL_SIZE = 20;
private static final String[] INITIAL_MESSAGES = new String[INITIAL_SIZE];
private static final Tone[] INITIAL_TONES = new Tone[INITIAL_SIZE];
private static Long ID_SEQ = 0l;
private static final Map<Long, Greeting> DB = new HashMap<>();
private static final ScheduledExecutorService SCHEDULER = Executors.newScheduledThreadPool(1);
private static final int DELAY = 100;
static
{
// generate some "random" initial data
for (int i = 0; i < INITIAL_SIZE; i++)
{
INITIAL_MESSAGES[i] = GREETINGS[i % GREETINGS.length];
}
for (int i = 0; i < INITIAL_SIZE; i++)
{
INITIAL_TONES[i] = TONES[i % TONES.length];
}
for (int i = 0; i < INITIAL_SIZE; i++)
{
Greeting g =
new Greeting().setId(ID_SEQ++).setMessage(INITIAL_MESSAGES[i]).setTone(INITIAL_TONES[i]);
DB.put(g.getId(), g);
}
}
public NullGreetingsResourceImpl()
{
}
@RestMethod.Create
public CreateResponse create(Greeting entity)
{
//Based off of the message in the greeting, we send back various types of nulls
if (entity.getMessage().equalsIgnoreCase("nullCreateResponse"))
{
//Return a null CreateResponse
return null;
}
else
{
//Return a valid CreateResponse but with a null HttpStatus
final HttpStatus nullStatus = null;
return new CreateResponse(nullStatus);
}
//Note, we don't need a test for returning a null entityID
}
@Finder("searchReturnNullList")
public List<Greeting> searchReturnNullList(@PagingContextParam PagingContext ctx, @QueryParam("tone") Tone tone)
{
if (tone == Tone.INSULTING)
{
//return a null list
return null;
}
else
{
//return a list with a null element in it
final List<Greeting> greetings = new ArrayList<>();
greetings.add(null);
greetings.add(DB.get(1));
return greetings;
}
}
@Finder("searchReturnNullCollectionList")
public CollectionResult<Greeting, SearchMetadata> searchReturnNullCollectionList(@PagingContextParam PagingContext ctx,
@QueryParam("tone") Tone tone)
{
if (tone == Tone.INSULTING)
{
//return a null CollectionResult
return null;
}
else if (tone == Tone.SINCERE)
{
//return a CollectionResult with a null list
return new CollectionResult<>(null);
}
else
{
//return a CollectionResult with a list that has a null element in it
final List<Greeting> greetings = new ArrayList<>();
greetings.add(null);
greetings.add(DB.get(1));
return new CollectionResult<>(greetings);
}
}
@RestMethod.Get
public Greeting get(Long key)
{
return null;
}
@RestMethod.GetAll
public CollectionResult<Greeting, Empty> getAllCollectionResult(@PagingContextParam PagingContext ctx)
{
return null;
}
@RestMethod.BatchGet
public BatchResult<Long, Greeting> batchGetBatchResult(Set<Long> ids)
{
final Map<Long, Greeting> greetingMap = new HashMap<>();
greetingMap.put(0l, DB.get(0l));
if (ids.contains(1l))
{
//Return null BatchResult
return null;
}
else if (ids.contains(2l))
{
//Return BatchResult with null maps
return new BatchResult<>(null, null, null);
}
else if (ids.contains(3l))
{
//Return a BatchResult with a null key in the status map.
final Map<Long, HttpStatus> statusMap = new HashMap<>();
statusMap.put(null, null);
return new BatchResult<>(greetingMap, statusMap, null);
}
else if (ids.contains(4l))
{
//Return a BatchResult that has a map with a null key.
greetingMap.put(null, null);
return new BatchResult<>(greetingMap, null, null);
}
else
{
/*
* Return a BatchResult with java.util.concurrent.ConcurrentHashMaps.
* This test is in place because certain map implementations, such as ConcurrentHashMap, can throw an NPE when
* calling contains(null). We want to verify that checking for the existence of nulls in maps returned by
* Rest.li resource methods do not cause such NPEs.
* This is one of the few cases in this file where an error will not be generated by Rest.li.
*/
final Map<Long, Greeting> concurrentGreetingMap = new ConcurrentHashMap<>(greetingMap);
return new BatchResult<>(concurrentGreetingMap,
new ConcurrentHashMap<>(), new ConcurrentHashMap<>());
}
}
@RestMethod.Update
public UpdateResponse update(Long key, Greeting entity)
{
if (key == 1l)
{
//Return null UpdateResponse
return null;
}
else
{
//Return an UpdateResponse with a null HttpStatus
return new UpdateResponse(null);
}
}
@RestMethod.BatchCreate
public BatchCreateResult<Long, Greeting> batchCreate(BatchCreateRequest<Long, Greeting> entities)
{
List<CreateResponse> responses = new ArrayList<>(1);
if (entities.getInput().size() == 0)
{
//Return null
return null;
}
else if (entities.getInput().size() == 1)
{
//Return a new BatchCreateResult with a null list
return new BatchCreateResult<>(null);
}
else
{
//Return a new BatchCreateResult with a response list that has a null inside of it
responses.add(new CreateResponse(1l));
responses.add(null);
return new BatchCreateResult<>(responses);
}
}
@RestMethod.BatchUpdate
public BatchUpdateResult<Long, Greeting> batchUpdate(BatchUpdateRequest<Long, Greeting> entities)
{
final Map<Long, UpdateResponse> responseMap = new HashMap<>();
responseMap.put(3l, new UpdateResponse(HttpStatus.S_201_CREATED));
final Map<Long, RestLiServiceException> errorsMap = new HashMap<>();
errorsMap.put(8l, new RestLiServiceException(HttpStatus.S_202_ACCEPTED));
if (entities.getData().containsKey(1l))
{
//Return a null BatchUpdateResult
return null;
}
else if (entities.getData().containsKey(2l))
{
//Return a BatchUpdateResult with a null results Map
return new BatchUpdateResult<>(null);
}
else if (entities.getData().containsKey(3l))
{
//Return a BatchUpdateResult with a null errors Map
return new BatchUpdateResult<>(responseMap, null);
}
else if (entities.getData().containsKey(4l))
{
//Return a BatchUpdateResult with a errors Map that has a null key in it
errorsMap.put(null, new RestLiServiceException(HttpStatus.S_202_ACCEPTED));
return new BatchUpdateResult<>(responseMap, errorsMap);
}
else if (entities.getData().containsKey(5l))
{
//Return a BatchUpdateResult with a errors Map that has a null value in it
errorsMap.put(9l, null);
return new BatchUpdateResult<>(responseMap, errorsMap);
}
else if (entities.getData().containsKey(6l))
{
//Return a BatchUpdateResult with a map that has a null key in it
responseMap.put(null, new UpdateResponse(HttpStatus.S_201_CREATED));
return new BatchUpdateResult<>(responseMap);
}
else
{
/*
* Return a BatchUpdateResult with java.util.concurrent.ConcurrentHashMap(s).
* This test is in place because certain map implementations, such as ConcurrentHashMap, can throw an NPE when
* calling contains(null). We want to verify that checking for the existence of nulls in maps returned by
* Rest.li resource methods do not cause such NPEs.
* This is one of the few cases in this file where an error will not be generated by Rest.li.
*/
final Map<Long, UpdateResponse> concurrentResponseMap = new ConcurrentHashMap<>(responseMap);
return new BatchUpdateResult<>(concurrentResponseMap, new ConcurrentHashMap<>());
}
}
@RestMethod.BatchPartialUpdate
public BatchUpdateResult<Long, Greeting> batchUpdate(BatchPatchRequest<Long, Greeting> entityUpdates)
{
final Map<Long, UpdateResponse> responseMap = new HashMap<>();
responseMap.put(3l, new UpdateResponse(HttpStatus.S_201_CREATED));
if (entityUpdates.getData().containsKey(1l))
{
//Return a null BatchUpdateResult
return null;
}
else if (entityUpdates.getData().containsKey(2l))
{
//Return a BatchUpdateResult with a null results Map
return new BatchUpdateResult<>(null);
}
else if (entityUpdates.getData().containsKey(3l))
{
//Return a BatchUpdateResult with a null errors Map
return new BatchUpdateResult<>(responseMap, null);
}
else
{
//Return a BatchUpdateResult with a map that has a null key in it
responseMap.put(null, new UpdateResponse(HttpStatus.S_201_CREATED));
return new BatchUpdateResult<>(responseMap);
}
}
@RestMethod.BatchDelete
public BatchUpdateResult<Long, Greeting> batchDelete(BatchDeleteRequest<Long, Greeting> deleteRequest)
{
return null;
}
@RestMethod.Delete
public UpdateResponse delete(Long key)
{
return null;
}
@Action(name = "returnNullStringArray")
public StringArray returnNullStringArray()
{
return null;
}
@Action(name = "returnStringArrayWithNullElement")
public StringArray returnStringArrayWithNullElement()
{
//Return a StringArray with a null element
return new StringArray("abc", null, "def");
}
@Action(name = "returnNullActionResult")
public ActionResult<Integer> returnNull()
{
return null;
}
@Action(name = "returnActionResultWithNullValue")
public ActionResult<Integer> returnActionResultWithNullValue()
{
//Return an ActionResult with a null Value
final Integer nullInteger = null;
return new ActionResult<>(nullInteger);
}
@Action(name = "returnActionResultWithNullStatus")
public ActionResult<Integer> returnActionResultWithNullStatus()
{
//Return an ActionResult with a null HttpStatus
return new ActionResult<>(3, null);
}
@Finder("finderCallbackNullList")
public void finderCallbackNull(@PagingContextParam final PagingContext a, @QueryParam("tone") final Tone b,
@CallbackParam final Callback<List<Greeting>> callback)
{
final Runnable requestHandler = new Runnable()
{
public void run()
{
try
{
//Depending on the tone, we return a null list or a list with a null element
callback.onSuccess(searchReturnNullList(a, b));
}
catch (final Throwable throwable)
{
callback.onError(throwable);
}
}
};
SCHEDULER.schedule(requestHandler, DELAY, TimeUnit.MILLISECONDS);
}
@Finder("finderPromiseNullList")
public Promise<List<Greeting>> finderPromiseNullList(@PagingContextParam final PagingContext a, @QueryParam("tone") final Tone b)
{
final SettablePromise<List<Greeting>> result = Promises.settable();
final Runnable requestHandler = new Runnable()
{
public void run()
{
try
{
//Depending on the tone, we return a null list or a list with a null element
result.done(searchReturnNullList(a, b));
}
catch (final Throwable throwable)
{
result.fail(throwable);
}
}
};
SCHEDULER.schedule(requestHandler, DELAY, TimeUnit.MILLISECONDS);
return result;
}
@Finder("finderTaskNullList")
public Task<List<Greeting>> finderTaskNullList(@PagingContextParam final PagingContext a, @QueryParam("tone") final Tone b)
{
return new BaseTask<List<Greeting>>()
{
protected Promise<List<Greeting>> run(final com.linkedin.parseq.Context context) throws Exception
{
//Depending on the tone, we return a null list or a list with a null element
return Promises.value(searchReturnNullList(a, b));
}
};
}
}
| 5,483 |
370 | <filename>ehreader/src/tw/skyarrow/ehreader/app/download/DownloadDeleteConfirmDialog.java
package tw.skyarrow.ehreader.app.download;
import android.app.AlertDialog;
import android.app.Dialog;
import android.content.DialogInterface;
import android.os.Bundle;
import android.support.v4.app.DialogFragment;
import tw.skyarrow.ehreader.R;
/**
* Created by SkyArrow on 2014/2/2.
*/
public class DownloadDeleteConfirmDialog extends DialogFragment {
public static final String TAG = "DownloadDeleteConfirmDialog";
public static final String EXTRA_GALLERY = "id";
private long galleryId;
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
AlertDialog.Builder dialog = new AlertDialog.Builder(getActivity());
Bundle args = getArguments();
galleryId = args.getLong(EXTRA_GALLERY);
dialog.setTitle(R.string.delete_gallery_title)
.setMessage(R.string.delete_gallery_msg)
.setPositiveButton(R.string.delete, onSubmitClick)
.setNegativeButton(R.string.cancel, null);
return dialog.create();
}
private DialogInterface.OnClickListener onSubmitClick = new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialogInterface, int i) {
DialogFragment dialog = new DownloadDeleteDialog();
Bundle args = new Bundle();
args.putLong(DownloadDeleteDialog.EXTRA_GALLERY, galleryId);
dialog.setArguments(args);
dialog.show(getActivity().getSupportFragmentManager(), DownloadDeleteDialog.TAG);
}
};
}
| 599 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.gesturenav;
import static org.chromium.chrome.browser.gesturenav.NavigationSheetCoordinator.NAVIGATION_LIST_ITEM_TYPE_ID;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.drawable.BitmapDrawable;
import android.graphics.drawable.Drawable;
import android.text.TextUtils;
import android.view.View;
import org.chromium.chrome.R;
import org.chromium.chrome.browser.flags.ChromeFeatureList;
import org.chromium.chrome.browser.profiles.Profile;
import org.chromium.chrome.browser.ui.favicon.FaviconHelper;
import org.chromium.chrome.browser.ui.favicon.FaviconUtils;
import org.chromium.components.browser_ui.widget.RoundedIconGenerator;
import org.chromium.components.browser_ui.widget.TintedDrawable;
import org.chromium.components.embedder_support.util.UrlConstants;
import org.chromium.components.embedder_support.util.UrlUtilities;
import org.chromium.content_public.browser.NavigationEntry;
import org.chromium.content_public.browser.NavigationHistory;
import org.chromium.ui.modelutil.MVCListAdapter.ListItem;
import org.chromium.ui.modelutil.MVCListAdapter.ModelList;
import org.chromium.ui.modelutil.PropertyKey;
import org.chromium.ui.modelutil.PropertyModel;
import org.chromium.ui.modelutil.PropertyModel.WritableObjectPropertyKey;
import org.chromium.url.GURL;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* Mediator class for navigation sheet.
*/
class NavigationSheetMediator {
private static final String INCOGNITO_HISTORY_ENTRIES_FLAG =
ChromeFeatureList.UPDATE_HISTORY_ENTRY_POINTS_IN_INCOGNITO;
private final ClickListener mClickListener;
private final FaviconHelper mFaviconHelper;
private final RoundedIconGenerator mIconGenerator;
private final int mFaviconSize;
private final ModelList mModelList;
private final Drawable mHistoryIcon;
private final Drawable mDefaultIcon;
private final Drawable mIncognitoIcon;
private final String mNewTabText;
private final String mNewIncognitoTabText;
private final Profile mProfile;
private NavigationHistory mHistory;
/**
* Performs an action when a navigation item is clicked.
*/
interface ClickListener {
/**
* @param index Index from {@link NavigationEntry#getIndex()}.
* @param position Position of the clicked item in the list, starting from 0.
*/
void click(int position, int index);
}
static class ItemProperties {
/** The favicon for the list item. */
public static final WritableObjectPropertyKey<Drawable> ICON =
new WritableObjectPropertyKey<>();
/** The text shown next to the favicon. */
public static final WritableObjectPropertyKey<String> LABEL =
new WritableObjectPropertyKey<>();
/** {@link View#OnClickListener} to execute when each item is clicked. */
public static final WritableObjectPropertyKey<View.OnClickListener> CLICK_LISTENER =
new WritableObjectPropertyKey<>();
public static final PropertyKey[] ALL_KEYS = {ICON, LABEL, CLICK_LISTENER};
}
NavigationSheetMediator(
Context context, ModelList modelList, Profile profile, ClickListener listener) {
mModelList = modelList;
mClickListener = listener;
mProfile = profile;
mFaviconHelper = new FaviconHelper();
mIconGenerator = FaviconUtils.createCircularIconGenerator(context.getResources());
mFaviconSize = context.getResources().getDimensionPixelSize(R.dimen.default_favicon_size);
mHistoryIcon = TintedDrawable.constructTintedDrawable(
context, R.drawable.ic_history_googblue_24dp, R.color.default_icon_color_tint_list);
mDefaultIcon = TintedDrawable.constructTintedDrawable(
context, R.drawable.ic_chrome, R.color.default_icon_color_tint_list);
mIncognitoIcon = TintedDrawable.constructTintedDrawable(
context, R.drawable.incognito_small, R.color.default_icon_color_tint_list);
mNewTabText = context.getResources().getString(R.string.menu_new_tab);
mNewIncognitoTabText = context.getResources().getString(R.string.menu_new_incognito_tab);
}
/**
* Populate the sheet with the navigation history.
* @param history {@link NavigationHistory} object.
*/
void populateEntries(NavigationHistory history) {
mHistory = history;
Set<GURL> requestedUrls = new HashSet<>();
for (int i = 0; i < mHistory.getEntryCount(); i++) {
PropertyModel model = new PropertyModel(Arrays.asList(ItemProperties.ALL_KEYS));
NavigationEntry entry = mHistory.getEntryAtIndex(i);
model.set(ItemProperties.LABEL, getEntryText(entry));
final int position = i;
model.set(ItemProperties.CLICK_LISTENER,
(view) -> { mClickListener.click(position, entry.getIndex()); });
mModelList.add(new ListItem(NAVIGATION_LIST_ITEM_TYPE_ID, model));
if (entry.getFavicon() != null) continue;
final GURL pageUrl = entry.getUrl();
if (!requestedUrls.contains(pageUrl)) {
FaviconHelper.FaviconImageCallback imageCallback =
(bitmap, iconUrl) -> onFaviconAvailable(pageUrl, bitmap);
if (!pageUrl.getSpec().equals(UrlConstants.HISTORY_URL)) {
mFaviconHelper.getLocalFaviconImageForURL(
mProfile, pageUrl, mFaviconSize, imageCallback);
requestedUrls.add(pageUrl);
} else {
mModelList.get(i).model.set(ItemProperties.ICON, mHistoryIcon);
}
}
}
}
/**
* Remove the property model.
*/
void clear() {
mModelList.clear();
}
/**
* Called when favicon data requested by {@link #initializeFavicons()} is retrieved.
* @param pageUrl the page for which the favicon was retrieved.
* @param favicon the favicon data.
*/
private void onFaviconAvailable(GURL pageUrl, Bitmap favicon) {
// This callback can come after the sheet is hidden (which clears modelList).
// Do nothing if that happens.
if (mModelList.size() == 0) return;
for (int i = 0; i < mHistory.getEntryCount(); i++) {
if (pageUrl.equals(mHistory.getEntryAtIndex(i).getUrl())) {
Drawable drawable;
if (favicon == null) {
drawable = UrlUtilities.isNTPUrl(pageUrl)
? getNTPIcon()
: new BitmapDrawable(mIconGenerator.generateIconForUrl(pageUrl));
} else {
drawable = new BitmapDrawable(favicon);
}
mModelList.get(i).model.set(ItemProperties.ICON, drawable);
}
}
}
private String getEntryText(NavigationEntry entry) {
String entryText = entry.getTitle();
if (UrlUtilities.isNTPUrl(entry.getUrl())) entryText = getNTPText();
if (TextUtils.isEmpty(entryText)) entryText = entry.getVirtualUrl().getSpec();
if (TextUtils.isEmpty(entryText)) entryText = entry.getUrl().getSpec();
return entryText;
}
private Drawable getNTPIcon() {
return mProfile.isOffTheRecord()
&& ChromeFeatureList.isEnabled(INCOGNITO_HISTORY_ENTRIES_FLAG)
? mIncognitoIcon
: mDefaultIcon;
}
private String getNTPText() {
return mProfile.isOffTheRecord()
&& ChromeFeatureList.isEnabled(INCOGNITO_HISTORY_ENTRIES_FLAG)
? mNewIncognitoTabText
: mNewTabText;
}
}
| 3,277 |
1,760 | #include "bits/stdc++.h"
using namespace std;
void setIO(string s) {
ios_base::sync_with_stdio(0); cin.tie(0);
freopen((s+".in").c_str(),"r",stdin);
freopen((s+".out").c_str(),"w",stdout);
}
int N;
bool G[300][300],GG[300][300];
long long ans;
void rot() {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
GG[N-1-j][i] = G[i][j];
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
G[i][j] = GG[i][j];
}
void solve() { // corner in diagonal with sum a, other two vertices in diagonal with sum b
for (int a = 0; a < 2*N-1; ++a)
for (int b = a+2; b < 2*N-1; b += 2) {
int dif = (b-a)/2, st = max(0,a-(N-1)), en = min(a,N-1);
int cur = 0;
for (int i = st; i <= en; ++i) {
if (i == st) // consider (i,a-i) -> stuff in row b
for (int j = max(i,b-(N-1)); j < min(i+dif,N-dif); ++j)
cur += G[j][b-j] && G[j+dif][b-j-dif];
if (G[i][a-i]) ans += cur;
if (i+2*dif < N && b-(i+dif) < N)
cur += G[i+dif][b-i-dif] && G[i+2*dif][b-i-2*dif];
if (i+dif < N && b-i < N)
cur -= G[i][b-i] && G[i+dif][b-i-dif];
}
}
}
int main() {
setIO("triangles");
cin >> N;
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j) {
char c; cin >> c;
G[i][j] = c == '*';
}
for (int i = 0; i < 4; ++i) solve(), rot();
cout << ans << "\n";
} | 733 |
8,865 | <reponame>hnakamur/golang-1.13-race-detector-runtime-deb
//===-- dd_rtl.h ----------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef DD_RTL_H
#define DD_RTL_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __dsan {
typedef DDFlags Flags;
struct Mutex {
DDMutex dd;
};
struct Thread {
DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
bool ignore_interceptors;
};
struct Callback : DDCallback {
Thread *thr;
Callback(Thread *thr);
u32 Unwind() override;
};
typedef AddrHashMap<Mutex, 31051> MutexHashMap;
struct Context {
DDetector *dd;
BlockingMutex report_mutex;
MutexHashMap mutex_map;
};
inline Flags* flags() {
static Flags flags;
return &flags;
}
void Initialize();
void InitializeInterceptors();
void ThreadInit(Thread *thr);
void ThreadDestroy(Thread *thr);
void MutexBeforeLock(Thread *thr, uptr m, bool writelock);
void MutexAfterLock(Thread *thr, uptr m, bool writelock, bool trylock);
void MutexBeforeUnlock(Thread *thr, uptr m, bool writelock);
void MutexDestroy(Thread *thr, uptr m);
} // namespace __dsan
#endif // DD_RTL_H
| 581 |
356 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
def execute_rule(**kwargs):
sql = kwargs.get("sql")
m = 0
n = 0
sql_content = []
sqlbegin = 0
sqlend = 0
str_len = len(sql)
for k in range(str_len):
if sql[k] == "(":
m = m + 1
if sql[k] == ")":
m = m - 1
if sql[k: k + 6] == "select" and m == 0:
sqlbegin = k + 7
n = n + 1
if sql[k: k + 4] == "from" and m == 0:
sqlend = k - 1
sql_content.append(sql[sqlbegin:sqlend])
for value in sql_content:
if "select" in value:
return True
return False
| 356 |
368 | /*
Plugin-SDK (Grand Theft Auto 3) source file
Authors: GTA Community. See more here
https://github.com/DK22Pac/plugin-sdk
Do not delete this comment block. Respect others' work!
*/
#include "CWanted.h"
PLUGIN_SOURCE_FILE
PLUGIN_VARIABLE int &CWanted::MaximumWantedLevel = *reinterpret_cast<int *>(GLOBAL_ADDRESS_BY_VERSION(0x5F7714, 0x5F74FC, 0x6044F4));
PLUGIN_VARIABLE int &CWanted::nMaximumWantedLevel = *reinterpret_cast<int *>(GLOBAL_ADDRESS_BY_VERSION(0x5F7718, 0x5F7500, 0x6044F8));
int ctor_addr(CCrimeBeingQd) = ADDRESS_BY_VERSION(0x4EFB20, 0x4EFBD0, 0x4EFB60);
int ctor_gaddr(CCrimeBeingQd) = GLOBAL_ADDRESS_BY_VERSION(0x4EFB20, 0x4EFBD0, 0x4EFB60);
int addrof_o(CCrimeBeingQd::operator=, void (CCrimeBeingQd::*)(CCrimeBeingQd const &)) = ADDRESS_BY_VERSION(0x5966A0, 0x596950, 0x596840);
int gaddrof_o(CCrimeBeingQd::operator=, void (CCrimeBeingQd::*)(CCrimeBeingQd const &)) = GLOBAL_ADDRESS_BY_VERSION(0x5966A0, 0x596950, 0x596840);
void CCrimeBeingQd::operator=(CCrimeBeingQd const &right) {
plugin::CallMethodDynGlobal<CCrimeBeingQd *, CCrimeBeingQd const &>(gaddrof_o(CCrimeBeingQd::operator=, void (CCrimeBeingQd::*)(CCrimeBeingQd const &)), this, right);
}
int addrof(CWanted::AddCrimeToQ) = ADDRESS_BY_VERSION(0x4ADFD0, 0x4AE0C0, 0x4AE050);
int gaddrof(CWanted::AddCrimeToQ) = GLOBAL_ADDRESS_BY_VERSION(0x4ADFD0, 0x4AE0C0, 0x4AE050);
bool CWanted::AddCrimeToQ(eCrimeType crimeType, int crimeId, CVector const &pos, bool bAlreadyReported, bool bPoliceDontReallyCare) {
return plugin::CallMethodAndReturnDynGlobal<bool, CWanted *, eCrimeType, int, CVector const &, bool, bool>(gaddrof(CWanted::AddCrimeToQ), this, crimeType, crimeId, pos, bAlreadyReported, bPoliceDontReallyCare);
}
int addrof(CWanted::AreArmyRequired) = ADDRESS_BY_VERSION(0x4ADBE0, 0x4ADCD0, 0x4ADC60);
int gaddrof(CWanted::AreArmyRequired) = GLOBAL_ADDRESS_BY_VERSION(0x4ADBE0, 0x4ADCD0, 0x4ADC60);
bool CWanted::AreArmyRequired() {
return plugin::CallMethodAndReturnDynGlobal<bool, CWanted *>(gaddrof(CWanted::AreArmyRequired), this);
}
int addrof(CWanted::AreFbiRequired) = ADDRESS_BY_VERSION(0x4ADBC0, 0x4ADCB0, 0x4ADC40);
int gaddrof(CWanted::AreFbiRequired) = GLOBAL_ADDRESS_BY_VERSION(0x4ADBC0, 0x4ADCB0, 0x4ADC40);
bool CWanted::AreFbiRequired() {
return plugin::CallMethodAndReturnDynGlobal<bool, CWanted *>(gaddrof(CWanted::AreFbiRequired), this);
}
int addrof(CWanted::AreSwatRequired) = ADDRESS_BY_VERSION(0x4ADBA0, 0x4ADC90, 0x4ADC20);
int gaddrof(CWanted::AreSwatRequired) = GLOBAL_ADDRESS_BY_VERSION(0x4ADBA0, 0x4ADC90, 0x4ADC20);
bool CWanted::AreSwatRequired() {
return plugin::CallMethodAndReturnDynGlobal<bool, CWanted *>(gaddrof(CWanted::AreSwatRequired), this);
}
int addrof(CWanted::ClearQdCrimes) = ADDRESS_BY_VERSION(0x4ADF20, 0x4AE010, 0x4ADFA0);
int gaddrof(CWanted::ClearQdCrimes) = GLOBAL_ADDRESS_BY_VERSION(0x4ADF20, 0x4AE010, 0x4ADFA0);
void CWanted::ClearQdCrimes() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::ClearQdCrimes), this);
}
int addrof(CWanted::Initialise) = ADDRESS_BY_VERSION(0x4AD6E0, 0x4AD7D0, 0x4AD760);
int gaddrof(CWanted::Initialise) = GLOBAL_ADDRESS_BY_VERSION(0x4AD6E0, 0x4AD7D0, 0x4AD760);
void CWanted::Initialise() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::Initialise), this);
}
int addrof(CWanted::NumOfHelisRequired) = ADDRESS_BY_VERSION(0x4ADC00, 0x4ADCF0, 0x4ADC80);
int gaddrof(CWanted::NumOfHelisRequired) = GLOBAL_ADDRESS_BY_VERSION(0x4ADC00, 0x4ADCF0, 0x4ADC80);
int CWanted::NumOfHelisRequired() {
return plugin::CallMethodAndReturnDynGlobal<int, CWanted *>(gaddrof(CWanted::NumOfHelisRequired), this);
}
int addrof(CWanted::RegisterCrime) = ADDRESS_BY_VERSION(0x4AD9F0, 0x4ADAE0, 0x4ADA70);
int gaddrof(CWanted::RegisterCrime) = GLOBAL_ADDRESS_BY_VERSION(0x4AD9F0, 0x4ADAE0, 0x4ADA70);
void CWanted::RegisterCrime(eCrimeType crimeType, CVector const &pos, unsigned int crimeId, bool bPoliceDontReallyCare) {
plugin::CallMethodDynGlobal<CWanted *, eCrimeType, CVector const &, unsigned int, bool>(gaddrof(CWanted::RegisterCrime), this, crimeType, pos, crimeId, bPoliceDontReallyCare);
}
int addrof(CWanted::RegisterCrime_Immediately) = ADDRESS_BY_VERSION(0x4ADA10, 0x4ADB00, 0x4ADA90);
int gaddrof(CWanted::RegisterCrime_Immediately) = GLOBAL_ADDRESS_BY_VERSION(0x4ADA10, 0x4ADB00, 0x4ADA90);
void CWanted::RegisterCrime_Immediately(eCrimeType crimeType, CVector const &pos, unsigned int crimeId, bool bPoliceDontReallyCare) {
plugin::CallMethodDynGlobal<CWanted *, eCrimeType, CVector const &, unsigned int, bool>(gaddrof(CWanted::RegisterCrime_Immediately), this, crimeType, pos, crimeId, bPoliceDontReallyCare);
}
int addrof(CWanted::ReportCrimeNow) = ADDRESS_BY_VERSION(0x4AE110, 0x4AE200, 0x4AE190);
int gaddrof(CWanted::ReportCrimeNow) = GLOBAL_ADDRESS_BY_VERSION(0x4AE110, 0x4AE200, 0x4AE190);
void CWanted::ReportCrimeNow(eCrimeType crimeType, CVector const &pos, bool bPoliceDontReallyCare) {
plugin::CallMethodDynGlobal<CWanted *, eCrimeType, CVector const &, bool>(gaddrof(CWanted::ReportCrimeNow), this, crimeType, pos, bPoliceDontReallyCare);
}
int addrof(CWanted::Reset) = ADDRESS_BY_VERSION(0x4AD790, 0x4AD880, 0x4AD810);
int gaddrof(CWanted::Reset) = GLOBAL_ADDRESS_BY_VERSION(0x4AD790, 0x4AD880, 0x4AD810);
void CWanted::Reset() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::Reset), this);
}
int addrof(CWanted::ResetPolicePursuit) = ADDRESS_BY_VERSION(0x4ADC40, 0x4ADD30, 0x4ADCC0);
int gaddrof(CWanted::ResetPolicePursuit) = GLOBAL_ADDRESS_BY_VERSION(0x4ADC40, 0x4ADD30, 0x4ADCC0);
void CWanted::ResetPolicePursuit() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::ResetPolicePursuit), this);
}
int addrof(CWanted::SetWantedLevel) = ADDRESS_BY_VERSION(0x4ADA50, 0x4ADB40, 0x4ADAD0);
int gaddrof(CWanted::SetWantedLevel) = GLOBAL_ADDRESS_BY_VERSION(0x4ADA50, 0x4ADB40, 0x4ADAD0);
void CWanted::SetWantedLevel(int level) {
plugin::CallMethodDynGlobal<CWanted *, int>(gaddrof(CWanted::SetWantedLevel), this, level);
}
int addrof(CWanted::SetWantedLevelNoDrop) = ADDRESS_BY_VERSION(0x4ADAC0, 0x4ADBB0, 0x4ADB40);
int gaddrof(CWanted::SetWantedLevelNoDrop) = GLOBAL_ADDRESS_BY_VERSION(0x4ADAC0, 0x4ADBB0, 0x4ADB40);
void CWanted::SetWantedLevelNoDrop(int level) {
plugin::CallMethodDynGlobal<CWanted *, int>(gaddrof(CWanted::SetWantedLevelNoDrop), this, level);
}
int addrof(CWanted::Update) = ADDRESS_BY_VERSION(0x4AD7B0, 0x4AD8A0, 0x4AD830);
int gaddrof(CWanted::Update) = GLOBAL_ADDRESS_BY_VERSION(0x4AD7B0, 0x4AD8A0, 0x4AD830);
void CWanted::Update() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::Update), this);
}
int addrof(CWanted::UpdateCrimesQ) = ADDRESS_BY_VERSION(0x4AE090, 0x4AE180, 0x4AE110);
int gaddrof(CWanted::UpdateCrimesQ) = GLOBAL_ADDRESS_BY_VERSION(0x4AE090, 0x4AE180, 0x4AE110);
void CWanted::UpdateCrimesQ() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::UpdateCrimesQ), this);
}
int addrof(CWanted::UpdateWantedLevel) = ADDRESS_BY_VERSION(0x4AD900, 0x4AD9F0, 0x4AD980);
int gaddrof(CWanted::UpdateWantedLevel) = GLOBAL_ADDRESS_BY_VERSION(0x4AD900, 0x4AD9F0, 0x4AD980);
void CWanted::UpdateWantedLevel() {
plugin::CallMethodDynGlobal<CWanted *>(gaddrof(CWanted::UpdateWantedLevel), this);
}
int addrof(CWanted::SetMaximumWantedLevel) = ADDRESS_BY_VERSION(0x4ADAE0, 0x4ADBD0, 0x4ADB60);
int gaddrof(CWanted::SetMaximumWantedLevel) = GLOBAL_ADDRESS_BY_VERSION(0x4ADAE0, 0x4ADBD0, 0x4ADB60);
void CWanted::SetMaximumWantedLevel(int level) {
plugin::CallDynGlobal<int>(gaddrof(CWanted::SetMaximumWantedLevel), level);
}
int addrof(CWanted::WorkOutPolicePresence) = ADDRESS_BY_VERSION(0x4ADD00, 0x4ADDF0, 0x4ADD80);
int gaddrof(CWanted::WorkOutPolicePresence) = GLOBAL_ADDRESS_BY_VERSION(0x4ADD00, 0x4ADDF0, 0x4ADD80);
int CWanted::WorkOutPolicePresence(CVector pos, float radius) {
return plugin::CallAndReturnDynGlobal<int, CVector, float>(gaddrof(CWanted::WorkOutPolicePresence), pos, radius);
}
| 3,407 |
456 | <filename>disabled/atom_flight_manual.json
{
"index_name": "atom_flight_manual",
"start_urls": [
"https://docsearch-atom-flight-manual.netlify.com/"
],
"stop_urls": [],
"selectors": {
"lvl0": {
"selector": "//div[contains(@class, 'toc')]//li[contains(concat(' ', @class, ' '), ' selected ')][1]/preceding::h4[1]",
"type": "xpath",
"global": true,
"default_value": "Documentation"
},
"lvl1": ".document-content h3",
"lvl2": ".document-content h4",
"lvl3": ".document-content h5",
"lvl4": ".document-content h6",
"text": ".document-content p, .document-content li"
},
"nb_hits": 1779
}
| 288 |
5,098 | /* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Google LLC.
*/
#ifndef PLATFORMS_ASIC_SW_KERNEL_COMMON_GASKET_GASKET_TYPES_H_
#define PLATFORMS_ASIC_SW_KERNEL_COMMON_GASKET_GASKET_TYPES_H_
#ifdef __KERNEL__
#include <linux/types.h>
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef unsigned long long uint64;
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef long long int64;
#else
#include <stddef.h>
#include "base/integral_types.h"
#endif
#endif
| 241 |
2,113 | //-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#ifndef _RENDERTERRAINMGR_H_
#define _RENDERTERRAINMGR_H_
#ifndef _RENDERBINMANAGER_H_
#include "renderInstance/renderBinManager.h"
#endif
#ifndef _GFXVERTEXBUFFER_H_
#include "gfx/gfxVertexBuffer.h"
#endif
#ifndef _GFXPRIMITIVEBUFFER_H_
#include "gfx/gfxPrimitiveBuffer.h"
#endif
class TerrCell;
class GFXTextureObject;
class TerrainCellMaterial;
/// The render instance for terrain cells.
struct TerrainRenderInst : public RenderInst
{
GFXVertexBuffer *vertBuff;
GFXPrimitiveBuffer *primBuff;
GFXPrimitive prim;
BaseMatInstance *mat;
const MatrixF *objectToWorldXfm;
TerrainCellMaterial *cellMat;
/// The lights we pass to the material for
/// this cell in order light importance.
LightInfo *lights[8];
void clear()
{
dMemset( this, 0, sizeof( TerrainRenderInst ) );
type = RenderPassManager::RIT_Terrain;
}
};
///
class RenderTerrainMgr : public RenderBinManager
{
typedef RenderBinManager Parent;
protected:
Vector<TerrainRenderInst*> mInstVector;
static bool smRenderWireframe;
static S32 smCellsRendered;
static S32 smOverrideCells;
static S32 smDrawCalls;
static bool _clearStats( GFXDevice::GFXDeviceEventType type );
// RenderBinManager
virtual void internalAddElement( RenderInst *inst );
public:
RenderTerrainMgr();
RenderTerrainMgr( F32 renderOrder, F32 processAddOrder );
virtual ~RenderTerrainMgr();
// ConsoleObject
static void initPersistFields();
DECLARE_CONOBJECT(RenderTerrainMgr);
// RenderBinManager
virtual void sort();
virtual void render( SceneRenderState *state );
virtual void clear();
};
#endif // _RENDERTERRAINMGR_H_
| 901 |
2,338 | // RUN: %clang_cc1 -x c++ %s -fblocks -fsyntax-only -Wcast-function-type -triple x86_64-- -verify
int x(long);
typedef int (f1)(long);
typedef int (f2)(void*);
typedef int (f3)(...);
typedef void (f4)(...);
typedef void (f5)(void);
typedef int (f6)(long, int);
typedef int (f7)(long,...);
typedef int (&f8)(long, int);
f1 *a;
f2 *b;
f3 *c;
f4 *d;
f5 *e;
f6 *f;
f7 *g;
struct S
{
void foo (int*);
void bar (int);
};
typedef void (S::*mf)(int);
void foo() {
a = (f1 *)x;
b = (f2 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f2 *' (aka 'int (*)(void *)') converts to incompatible function type}} */
b = reinterpret_cast<f2 *>(x); /* expected-warning {{cast from 'int (*)(long)' to 'f2 *' (aka 'int (*)(void *)') converts to incompatible function type}} */
c = (f3 *)x;
d = (f4 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f4 *' (aka 'void (*)(...)') converts to incompatible function type}} */
e = (f5 *)x;
f = (f6 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f6 *' (aka 'int (*)(long, int)') converts to incompatible function type}} */
g = (f7 *)x;
mf p1 = (mf)&S::foo; /* expected-warning {{cast from 'void (S::*)(int *)' to 'mf' (aka 'void (S::*)(int)') converts to incompatible function type}} */
f8 f2 = (f8)x; /* expected-warning {{cast from 'int (long)' to 'f8' (aka 'int (&)(long, int)') converts to incompatible function type}} */
(void)f2;
int (^y)(long);
f = (f6 *)y; /* expected-warning {{cast from 'int (^)(long)' to 'f6 *' (aka 'int (*)(long, int)') converts to incompatible function type}} */
}
| 630 |
351 | <reponame>joshtburdick/deepTools
import sys
import os
from deeptoolsintervals import GTF
from deeptools.bamHandler import openBam
import matplotlib as mpl
mpl.use('Agg')
from deeptools import cm # noqa: F401
import numpy as np
debug = 0
def smartLabel(label):
"""
Given a file name, likely with a path, return the file name without the path
and with the file extension removed. Thus, something like /path/to/some.special.file
should return some.special, since only the first extension (if present)
should be stripped.
"""
lab = os.path.splitext(os.path.basename(label))[0]
if lab == '':
# Maybe we have a dot file?
lab = os.path.basename(label)
return lab
def smartLabels(labels):
return [smartLabel(x) for x in labels]
def convertCmap(c, vmin=0, vmax=1):
cmap = mpl.cm.get_cmap(c)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cmap_rgb = []
for i in range(255):
k = mpl.colors.colorConverter.to_rgb(cmap(norm(i)))
cmap_rgb.append(k)
h = 1.0 / 254
colorScale = []
for k in range(255):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
colorScale.append([k * h, 'rgb' + str((C[0], C[1], C[2]))])
return colorScale
def getTLen(read, notAbs=False):
"""
Get the observed template length of a read. For a paired-end read, this is
normally just the TLEN field. For SE reads this is the observed coverage of
the genome (excluding splicing).
"""
if abs(read.template_length) > 0:
if notAbs:
return read.template_length
return abs(read.template_length)
tlen = 0
try:
# the cigartuples property apparently didn't always exist
for op, opLen in read.cigartuples:
if op == 0:
tlen += opLen
elif op == 2:
tlen += opLen
elif op == 7:
tlen += opLen
elif op == 8:
tlen += opLen
except:
pass
return tlen
def getGC_content(tb, chrom, fragStart, fragEnd, fraction=True):
bases = tb.bases(chrom, fragStart, fragEnd, fraction=False)
if fragEnd > tb.chroms(chrom):
fragEnd = tb.chroms(chrom)
if sum(bases.values()) < 0.95 * (fragEnd - fragStart):
raise Exception("WARNING: too many NNNs present in {}:{}-{}".format(chrom, fragStart, fragEnd))
return None
if fraction:
return (bases['G'] + bases['C']) / float(fragEnd - fragStart)
return bases['G'] + bases['C']
def tbitToBamChrName(tbitNames, bamNames):
""" checks if the chromosome names from the two-bit and bam file coincide.
In case they do not coincide, a fix is tried. If successful, then
a mapping table is returned.
tbitNames and bamNames should be lists
"""
chrNameBitToBam = dict((x, x) for x in tbitNames)
if set(bamNames) != set(tbitNames):
sys.stderr.write("Bam and 2bit do not have matching "
"chromosome names:\n2bit:{}\n\nbam:{}"
"\n\n".format(tbitNames, bamNames))
if len(set(bamNames).intersection(set(tbitNames))) > 0:
sys.stderr.write("Using the following common chromosomes between "
"bam chromosome names and 2bit chromosome "
"names:\n")
for item in set(bamNames).intersection(set(tbitNames)):
sys.stderr.write(item + "\n")
chrNameBitToBam = dict([(x, x) for x in
set(bamNames).intersection(set(tbitNames))])
elif set(["chr" + x if x != 'dmel_mitochondrion_genome'
else 'chrM' for x in bamNames]) == set(tbitNames):
sys.stderr.write("Adding 'chr' seems to solve the problem. "
"Continuing ...")
chrNameBitToBam = dict([("chr" + x
if x != 'dmel_mitochondrion_genome'
else 'chrM', x) for x in bamNames])
elif set([x for x in tbitNames if x.count('random') == 0 and
x.count('chrM') == 0]) == set(bamNames):
if debug:
print("Removing random and mitochondrial chromosomes"
"fixes the problem")
chrNameBitToBam = dict([(x, x) for x in tbitNames
if x.count('random') == 0 and
x.count('chrM') == 0])
elif len(set(["chr" + x for x in bamNames if x != 'dmel_mitochondrion_genome']).intersection(set(tbitNames))) > 0:
bamNames2 = ["chr" + x for x in bamNames if x != 'dmel_mitochondrion_genome']
sys.stderr.write("Adding 'chr' seems to solve the problem for the following "
"chromosomes...")
for item in set(bamNames2).intersection(set(tbitNames)):
sys.stderr.write(item + "\n")
chrNameBitToBam = {"chrM": "MT"}
for i in range(len(bamNames)):
if bamNames2[i] in tbitNames:
chrNameBitToBam.update({bamNames2[i]: bamNames[i]})
elif len(set([x[3:] for x in bamNames if x.startswith("chr")]).intersection(set(tbitNames))) > 0:
bamNames = [x for x in bamNames]
bamNames2 = [x[3:] for x in bamNames if x.startswith("chr")]
if debug:
sys.stderr.write("Removing 'chr' seems to solve the problem for the following "
"chromosomes...")
for item in set(bamNames).intersection(set(tbitNames)):
sys.stderr.write(item + "\n")
chrNameBitToBam = {"MT": "chrM"}
for i in range(len(bamNames)):
if bamNames2[i] in tbitNames:
chrNameBitToBam.update({bamNames2[i]: bamNames[i]})
else:
if debug:
print("Index and reference do not have matching ")
"chromosome names"
exit(0)
return chrNameBitToBam
def getCommonChrNames(bamFileHandles, verbose=True):
r"""
Compares the names and lengths of a list of bam file handles.
The input is list of pysam file handles.
The function returns a duple containing the common chromosome names
and the common chromome lengths.
Hopefully, only _random and chrM are not common.
"""
def get_chrom_and_size(bam_handler):
"""
Reads the chromosome/scaffold name and the length from
the bam file and returns a list of (chromname, size) tuples
:param bam_handler:
:return: list of (chrom, size) tuples
"""
try:
# BAM file
return [(x, y) for x, y in zip(bam_handler.references, bam_handler.lengths)]
except:
return [(k, v) for k, v in bam_handler.chroms().items()]
def print_chr_names_and_size(chr_set):
sys.stderr.write("chromosome\tlength\n")
for name, size in chr_set:
sys.stderr.write("{0:>15}\t{1:>10}\n".format(name, size))
common_chr = set(get_chrom_and_size(bamFileHandles[0]))
non_common_chr = set()
for j in range(1, len(bamFileHandles)):
_names_and_size = set(get_chrom_and_size(bamFileHandles[j]))
if len(common_chr & _names_and_size) == 0:
# try to add remove 'chr' from the chromosome name
_corr_names_size = set()
for chrom_name, size in _names_and_size:
if chrom_name.startswith('chr'):
_corr_names_size.add((chrom_name[3:], size))
else:
_corr_names_size.add(('chr' + chrom_name, size))
if len(common_chr & _corr_names_size) == 0:
message = "No common chromosomes found. Are the bam files files " \
"from the same species and same assemblies?\n"
sys.stderr.write(message)
print_chr_names_and_size(common_chr)
sys.stderr.write("\nand the following is the list of the unmatched chromosome and chromosome\n"
"lengths from file\n{}\n".format(bamFileHandles.name))
print_chr_names_and_size(_names_and_size)
exit(1)
else:
_names_and_size = _corr_names_size
non_common_chr |= common_chr ^ _names_and_size
common_chr = common_chr & _names_and_size
if len(non_common_chr) > 0:
sys.stderr.write("\nThe following chromosome names did not match between the the bam files\n")
print_chr_names_and_size(non_common_chr)
# the common chromosomes has to be sorted as in the original
# bam files
chr_sizes = []
for tuple in get_chrom_and_size(bamFileHandles[0]):
if tuple in common_chr:
chr_sizes.append(tuple)
return chr_sizes, non_common_chr
def copyFileInMemory(filePath, suffix=''):
"""
copies a file into the special /dev/shm device which
moves the file into memory.
This process speeds ups the multiprocessor access to such files
"""
# fallback for windows users
if os.name == 'nt':
return filePath
memFileName = getTempFileName(suffix=suffix)
import shutil
shutil.copyfile(filePath, memFileName)
return memFileName
def getTempFileName(suffix=''):
"""
Return a temporary file name. The calling function is responsible for
deleting this upon completion.
"""
import tempfile
_tempFile = tempfile.NamedTemporaryFile(prefix="_deeptools_",
suffix=suffix,
delete=False)
memFileName = _tempFile.name
_tempFile.close()
return memFileName
def gtfOptions(allArgs=None):
"""
This is used a couple places to setup arguments to mapReduce
"""
transcriptID = "transcript"
exonID = "exon"
transcript_id_designator = "transcript_id"
keepExons = False
if allArgs is not None:
allArgs = vars(allArgs)
transcriptID = allArgs.get("transcriptID", transcriptID)
exonID = allArgs.get("exonID", exonID)
transcript_id_designator = allArgs.get("transcript_id_designator", transcript_id_designator)
keepExons = allArgs.get("keepExons", keepExons)
return transcriptID, exonID, transcript_id_designator, keepExons
def toString(s):
"""
This takes care of python2/3 differences
"""
if isinstance(s, str):
return s
if isinstance(s, bytes):
if sys.version_info[0] == 2:
return str(s)
return s.decode('ascii')
if isinstance(s, list):
return [toString(x) for x in s]
return s
def toBytes(s):
"""
Like toString, but for functions requiring bytes in python3
"""
if sys.version_info[0] == 2:
return s
if isinstance(s, bytes):
return s
if isinstance(s, str):
return bytes(s, 'ascii')
if isinstance(s, list):
return [toBytes(x) for x in s]
return s
def mungeChromosome(chrom, chromList):
"""
A generic chromosome munging function. "chrom" is munged by adding/removing "chr" such that it appears in chromList
On error, None is returned, but a common chromosome list should be used beforehand to avoid this possibility
"""
if chrom in chromList:
return chrom
if chrom == "MT" and "chrM" in chromList:
return "chrM"
if chrom == "chrM" and "MT" in chromList:
return "MT"
if chrom.startswith("chr") and chrom[3:] in chromList:
return chrom[3:]
if "chr" + chrom in chromList:
return "chr" + chrom
# This shouldn't actually happen
return None
def bam_total_reads(bam_handle, chroms_to_ignore, stats):
"""
Count the total number of mapped reads in a BAM file, filtering
the chromosome given in chroms_to_ignore list
"""
if chroms_to_ignore:
return sum([s[0] for k, s in stats.items() if k not in chroms_to_ignore])
else:
return sum([s[0] for s in stats.values()])
def bam_blacklisted_worker(args):
bam, chrom, start, end = args
fh = openBam(bam)
blacklisted = 0
for r in fh.fetch(reference=chrom, start=start, end=end):
if r.is_unmapped:
continue
if r.reference_start >= start and r.reference_start + r.infer_query_length(always=False) - 1 <= end:
blacklisted += 1
fh.close()
return blacklisted
def bam_blacklisted_reads(bam_handle, chroms_to_ignore, blackListFileName=None, numberOfProcessors=1):
blacklisted = 0
if blackListFileName is None:
return blacklisted
# Get the chromosome lengths
chromLens = {x: y for x, y in zip(bam_handle.references, bam_handle.lengths)}
bl = GTF(blackListFileName)
hasOverlaps, minOverlap = bl.hasOverlaps(returnDistance=True)
if hasOverlaps:
sys.exit("Your blacklist file(s) has (have) regions that overlap. Proceeding with such a file would result in deepTools incorrectly calculating scaling factors. As such, you MUST fix this issue before being able to proceed.\n")
if minOverlap < 1000:
sys.stderr.write("WARNING: The minimum distance between intervals in your blacklist is {}. It makes little biological sense to include small regions between two blacklisted regions. Instead, these should likely be blacklisted as well.\n".format(minOverlap))
regions = []
for chrom in bl.chroms:
if (not chroms_to_ignore or chrom not in chroms_to_ignore) and chrom in chromLens:
for reg in bl.findOverlaps(chrom, 0, chromLens[chrom]):
regions.append([bam_handle.filename, chrom, reg[0], reg[1]])
if len(regions) > 0:
import multiprocessing
if len(regions) > 1 and numberOfProcessors > 1:
pool = multiprocessing.Pool(numberOfProcessors)
res = pool.map_async(bam_blacklisted_worker, regions).get(9999999)
else:
res = [bam_blacklisted_worker(x) for x in regions]
for val in res:
blacklisted += val
return blacklisted
| 6,456 |
14,425 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.*;
import org.apache.hadoop.hdfs.server.common.FileRegion;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.junit.Test;
import static org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
import static org.junit.Assert.*;
/**
* Test for the text based block format for provided block maps.
*/
public class TestTextBlockAliasMap {
static final String OUTFILE_PATH = "hdfs://dummyServer:0000/";
static final String OUTFILE_BASENAME = "dummyFile";
static final Path OUTFILE = new Path(OUTFILE_PATH, OUTFILE_BASENAME + "txt");
static final String BPID = "BPID-0";
void check(TextWriter.Options opts, final Path vp,
final Class<? extends CompressionCodec> vc) throws IOException {
TextFileRegionAliasMap mFmt = new TextFileRegionAliasMap() {
@Override
public TextWriter createWriter(Path file, CompressionCodec codec,
String delim, Configuration conf) throws IOException {
assertEquals(vp, file);
if (null == vc) {
assertNull(codec);
} else {
assertEquals(vc, codec.getClass());
}
return null; // ignored
}
};
mFmt.getWriter(opts, BPID);
}
void check(TextReader.Options opts, final Path vp,
final Class<? extends CompressionCodec> vc) throws IOException {
TextFileRegionAliasMap aliasMap = new TextFileRegionAliasMap() {
@Override
public TextReader createReader(Path file, String delim, Configuration cfg,
String blockPoolID) throws IOException {
assertEquals(vp, file);
if (null != vc) {
CompressionCodecFactory factory = new CompressionCodecFactory(cfg);
CompressionCodec codec = factory.getCodec(file);
assertEquals(vc, codec.getClass());
}
return null; // ignored
}
};
aliasMap.getReader(opts, BPID);
}
@Test
public void testWriterOptions() throws Exception {
TextWriter.Options opts = TextWriter.defaults();
assertTrue(opts instanceof WriterOptions);
WriterOptions wopts = (WriterOptions) opts;
Path def =
new Path(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR_DEFAULT);
assertEquals(def, wopts.getDir());
assertNull(wopts.getCodec());
Path cp = new Path(OUTFILE_PATH, "blocks_" + BPID + ".csv");
opts.dirName(new Path(OUTFILE_PATH));
check(opts, cp, null);
opts.codec("gzip");
cp = new Path(OUTFILE_PATH, "blocks_" + BPID + ".csv.gz");
check(opts, cp, org.apache.hadoop.io.compress.GzipCodec.class);
}
@Test
public void testReaderOptions() throws Exception {
TextReader.Options opts = TextReader.defaults();
assertTrue(opts instanceof ReaderOptions);
ReaderOptions ropts = (ReaderOptions) opts;
Path cp = new Path(OUTFILE_PATH, fileNameFromBlockPoolID(BPID));
opts.filename(cp);
check(opts, cp, null);
cp = new Path(OUTFILE_PATH, "blocks_" + BPID + ".csv.gz");
opts.filename(cp);
check(opts, cp, org.apache.hadoop.io.compress.GzipCodec.class);
}
@Test
public void testCSVReadWrite() throws Exception {
final DataOutputBuffer out = new DataOutputBuffer();
FileRegion r1 = new FileRegion(4344L, OUTFILE, 0, 1024);
FileRegion r2 = new FileRegion(4345L, OUTFILE, 1024, 1024);
FileRegion r3 = new FileRegion(4346L, OUTFILE, 2048, 512);
try (TextWriter csv = new TextWriter(new OutputStreamWriter(out), ",")) {
csv.store(r1);
csv.store(r2);
csv.store(r3);
}
Iterator<FileRegion> i3;
try (TextReader csv = new TextReader(null, null, null, ",") {
@Override
public InputStream createStream() {
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
return in;
}}) {
Iterator<FileRegion> i1 = csv.iterator();
assertEquals(r1, i1.next());
Iterator<FileRegion> i2 = csv.iterator();
assertEquals(r1, i2.next());
assertEquals(r2, i2.next());
assertEquals(r3, i2.next());
assertEquals(r2, i1.next());
assertEquals(r3, i1.next());
assertFalse(i1.hasNext());
assertFalse(i2.hasNext());
i3 = csv.iterator();
}
try {
i3.next();
} catch (IllegalStateException e) {
return;
}
fail("Invalid iterator");
}
@Test
public void testCSVReadWriteTsv() throws Exception {
final DataOutputBuffer out = new DataOutputBuffer();
FileRegion r1 = new FileRegion(4344L, OUTFILE, 0, 1024);
FileRegion r2 = new FileRegion(4345L, OUTFILE, 1024, 1024);
FileRegion r3 = new FileRegion(4346L, OUTFILE, 2048, 512);
try (TextWriter csv = new TextWriter(new OutputStreamWriter(out), "\t")) {
csv.store(r1);
csv.store(r2);
csv.store(r3);
}
Iterator<FileRegion> i3;
try (TextReader csv = new TextReader(null, null, null, "\t") {
@Override
public InputStream createStream() {
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
return in;
}}) {
Iterator<FileRegion> i1 = csv.iterator();
assertEquals(r1, i1.next());
Iterator<FileRegion> i2 = csv.iterator();
assertEquals(r1, i2.next());
assertEquals(r2, i2.next());
assertEquals(r3, i2.next());
assertEquals(r2, i1.next());
assertEquals(r3, i1.next());
assertFalse(i1.hasNext());
assertFalse(i2.hasNext());
i3 = csv.iterator();
}
try {
i3.next();
} catch (IllegalStateException e) {
return;
}
fail("Invalid iterator");
}
}
| 2,675 |
453 | #include "headers/atan2d2.h"
static __inline double _atan2(double y, double x)
{
return spu_extract(_atan2d2(spu_promote(y, 0), spu_promote(x, 0)), 0);
}
| 69 |
636 | <reponame>hadoop835/fluent-mybatis
package cn.org.atool.fluent.mybatis.segment.model;
/**
* 在sql语句特定位置中插入特定语句或注释
*
* @author darui.wu
*/
public enum HintType {
/**
* hint select ...
* hint update ...
* hint delete ...
*/
Before_All,
/**
* select hint ... from table ...
* update hint table set ....
* delete hint from table ...
*/
After_CrudKey,
/**
* select ... from hint table ...
* update hint table set ...
* delete from hint table(...)
*/
Before_Table,
/**
* select ... from table hint ...
* update table hint set ...
* delete from table hint(...)
*/
After_Table
} | 311 |
545 | #include <vector>
#include <iostream>
int main() {
std::vector<int> vi;
std::cout << "Size: " << vi.size()
<< "\tCapacity : " << vi.capacity() << std::endl;
vi.push_back(1);
std::cout << "Size: " << vi.size()
<< "\tCapacity : " << vi.capacity() << std::endl;
for (std::vector<int>::size_type ix = 0; ix != 100; ++ix)
vi.push_back(ix);
std::cout << "Size: " << vi.size()
<< "\tCapacity : " << vi.capacity() << std::endl;
vi.shrink_to_fit();
std::cout << "Size: " << vi.size()
<< "\tCapacity : " << vi.capacity() << std::endl;
return 0;
}
| 281 |
4,879 | #include "drape/gl_gpu_program.hpp"
#include "drape/gl_functions.hpp"
#include "drape/render_state.hpp"
#include "drape/support_manager.hpp"
#include "base/logging.hpp"
#include <set>
namespace dp
{
GLGpuProgram::GLGpuProgram(std::string const & programName,
ref_ptr<Shader> vertexShader, ref_ptr<Shader> fragmentShader)
: GpuProgram(programName)
, m_vertexShader(vertexShader)
, m_fragmentShader(fragmentShader)
{
m_programID = GLFunctions::glCreateProgram();
GLFunctions::glAttachShader(m_programID, m_vertexShader->GetID());
GLFunctions::glAttachShader(m_programID, m_fragmentShader->GetID());
std::string errorLog;
if (!GLFunctions::glLinkProgram(m_programID, errorLog))
LOG(LERROR, ("Program ", programName, " link error = ", errorLog));
// On Tegra3 glGetActiveUniform isn't work if you detach shaders after linking.
LoadUniformLocations();
// On Tegra2 we cannot detach shaders at all.
// https://devtalk.nvidia.com/default/topic/528941/alpha-blending-not-working-on-t20-and-t30-under-ice-cream-sandwich/
if (!SupportManager::Instance().IsTegraDevice())
{
GLFunctions::glDetachShader(m_programID, m_vertexShader->GetID());
GLFunctions::glDetachShader(m_programID, m_fragmentShader->GetID());
}
}
GLGpuProgram::~GLGpuProgram()
{
if (SupportManager::Instance().IsTegraDevice())
{
GLFunctions::glDetachShader(m_programID, m_vertexShader->GetID());
GLFunctions::glDetachShader(m_programID, m_fragmentShader->GetID());
}
GLFunctions::glDeleteProgram(m_programID);
}
void GLGpuProgram::Bind()
{
// Deactivate all unused textures.
uint8_t const usedSlots = TextureState::GetLastUsedSlots();
for (uint8_t i = m_textureSlotsCount; i < usedSlots; i++)
{
GLFunctions::glActiveTexture(gl_const::GLTexture0 + i);
GLFunctions::glBindTexture(0);
}
GLFunctions::glUseProgram(m_programID);
}
void GLGpuProgram::Unbind()
{
GLFunctions::glUseProgram(0);
}
int8_t GLGpuProgram::GetAttributeLocation(std::string const & attributeName) const
{
return GLFunctions::glGetAttribLocation(m_programID, attributeName);
}
int8_t GLGpuProgram::GetUniformLocation(std::string const & uniformName) const
{
auto const it = m_uniforms.find(uniformName);
if (it == m_uniforms.end())
return -1;
return it->second.m_location;
}
glConst GLGpuProgram::GetUniformType(std::string const & uniformName) const
{
auto const it = m_uniforms.find(uniformName);
if (it == m_uniforms.end())
return -1;
return it->second.m_type;
}
GLGpuProgram::UniformsInfo const & GLGpuProgram::GetUniformsInfo() const
{
return m_uniforms;
}
void GLGpuProgram::LoadUniformLocations()
{
static std::set<glConst> const kSupportedTypes = {
gl_const::GLFloatType, gl_const::GLFloatVec2, gl_const::GLFloatVec3, gl_const::GLFloatVec4,
gl_const::GLIntType, gl_const::GLIntVec2, gl_const::GLIntVec3, gl_const::GLIntVec4,
gl_const::GLFloatMat4, gl_const::GLSampler2D};
auto const uniformsCount = GLFunctions::glGetProgramiv(m_programID, gl_const::GLActiveUniforms);
for (int i = 0; i < uniformsCount; ++i)
{
int32_t size = 0;
UniformInfo info;
std::string name;
GLFunctions::glGetActiveUniform(m_programID, static_cast<uint32_t>(i), &size, &info.m_type, name);
CHECK(kSupportedTypes.find(info.m_type) != kSupportedTypes.cend(),
("Used uniform has unsupported type. Program =", m_programName, "Type =", info.m_type));
info.m_location = GLFunctions::glGetUniformLocation(m_programID, name);
m_uniforms[name] = std::move(info);
}
m_numericUniformsCount = CalculateNumericUniformsCount();
m_textureSlotsCount = static_cast<uint8_t>(m_uniforms.size() - m_numericUniformsCount);
}
uint32_t GLGpuProgram::CalculateNumericUniformsCount() const
{
uint32_t counter = 0;
for (auto const & u : m_uniforms)
{
if (u.second.m_type != gl_const::GLSampler2D)
counter++;
}
return counter;
}
} // namespace dp
| 1,548 |
424 | <filename>vertx-pg-client/src/main/java/io/vertx/pgclient/SslMode.java
/*
* Copyright (C) 2018 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.vertx.pgclient;
import io.vertx.codegen.annotations.VertxGen;
/**
* The different values for the sslmode parameter provide different levels of
* protection. See more information in <a href=
* "https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION">Protection
* Provided in Different Modes</a>.
*/
@VertxGen
public enum SslMode {
/**
* only try a non-SSL connection.
*/
DISABLE("disable"),
/**
* first try a non-SSL connection; if that fails, try an SSL connection.
*/
ALLOW("allow"),
/**
* first try an SSL connection; if that fails, try a non-SSL connection.
*/
PREFER("prefer"),
/**
* only try an SSL connection. If a root CA file is present, verify the certificate in the same way as if verify-ca was specified.
*/
REQUIRE("require"),
/**
* only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
*/
VERIFY_CA("verify-ca"),
/**
* only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
*/
VERIFY_FULL("verify-full");
public static final SslMode[] VALUES = SslMode.values();
public final String value;
SslMode(String value) {
this.value = value;
}
public static SslMode of(String value) {
for (SslMode sslMode : VALUES) {
if (sslMode.value.equalsIgnoreCase(value)) {
return sslMode;
}
}
throw new IllegalArgumentException("Could not find an appropriate SSL mode for the value [" + value + "].");
}
}
| 705 |
7,676 | """Ray-Horovod Elastic training unit tests.
This is currently not run on the Ray CI.
"""
from contextlib import contextmanager
import psutil
import os
import socket
import time
import mock
import pytest
import ray
from horovod.common.util import gloo_built
from horovod.runner.elastic.discovery import HostDiscovery
from horovod.ray.elastic import ElasticRayExecutor, RayHostDiscovery
@pytest.fixture
def ray_shutdown():
yield
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def ray_8_cpus():
ray.init(num_cpus=8, resources={
f"node:host-{i}": 1 for i in range(10)})
yield
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def ray_8_cpus_gpus():
if "CUDA_VISIBLE_DEVICES" in os.environ:
if len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) < 8:
pytest.skip("Avoiding mismatched GPU machine.")
ray.init(num_cpus=8, num_gpus=8, resources={
f"node:host-{i}": 1 for i in range(10)})
try:
yield
finally:
# The code after the yield will run as teardown code.
ray.shutdown()
class TestRayDiscoverySuite:
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_cpu_discovery(self, ray_shutdown):
ray.init(num_cpus=4, num_gpus=1)
discovery = RayHostDiscovery(cpus_per_slot=1)
mapping = discovery.find_available_hosts_and_slots()
assert len(mapping) == 1
assert list(mapping.values()) == [4]
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_gpu_discovery(self, ray_shutdown):
ray.init(num_cpus=4, num_gpus=1)
discovery = RayHostDiscovery(use_gpu=True, cpus_per_slot=1)
mapping = discovery.find_available_hosts_and_slots()
assert len(mapping) == 1
assert list(mapping.values()) == [1]
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_gpu_slot_discovery(self, ray_shutdown):
ray.init(num_cpus=4, num_gpus=4)
discovery = RayHostDiscovery(
use_gpu=True, cpus_per_slot=1, gpus_per_slot=2)
mapping = discovery.find_available_hosts_and_slots()
assert len(mapping) == 1
assert list(mapping.values()) == [2]
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_multinode(self, monkeypatch):
def create_multi_node_mock():
host_names = ["host-1", "host-2", "host-3"]
resources = {"GPU": 2, "CPU": 8}
def create_node_entry(hostname):
return {
"NodeManagerAddress": hostname,
"Resources": resources.copy(),
"alive": True
}
return map(create_node_entry, host_names)
monkeypatch.setattr(ray, "nodes", create_multi_node_mock)
discovery = RayHostDiscovery(use_gpu=True, cpus_per_slot=1)
mapping = discovery.find_available_hosts_and_slots()
assert len(mapping) == 3
assert list(mapping.values()) == [2, 2, 2]
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_multinode_gpus_per_slot(self, monkeypatch):
def create_multi_node_mock():
host_names = ["host-1", "host-2", "host-3"]
resources = {"GPU": 2, "CPU": 8}
def create_node_entry(hostname):
return {
"NodeManagerAddress": hostname,
"Resources": resources.copy(),
"alive": True
}
return map(create_node_entry, host_names)
monkeypatch.setattr(ray, "nodes", create_multi_node_mock)
discovery = RayHostDiscovery(use_gpu=True, gpus_per_slot=2)
mapping = discovery.find_available_hosts_and_slots()
assert len(mapping) == 3
assert list(mapping.values()) == [1, 1, 1]
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_multinode_mismatch(self, monkeypatch):
def create_multi_node_mock():
host_names = ["host-1", "host-2", "host-3"]
resources = {"CPU": 8}
def create_node_entry(hostname):
return {
"NodeManagerAddress": hostname,
"Resources": resources.copy(),
"alive": True
}
return map(create_node_entry, host_names)
monkeypatch.setattr(ray, "nodes", create_multi_node_mock)
discovery = RayHostDiscovery(use_gpu=True, cpus_per_slot=1)
mapping = discovery.find_available_hosts_and_slots()
assert sum(mapping.values()) == 0
class SimpleTestDiscovery(HostDiscovery):
def __init__(self, schedule, wait_for_previous_set=True):
self._schedule = schedule
self._generator = self.host_generator()
self.executor = None
# The previous set of hosts
# We need a reference to them as iterators only can provide the next set
self.prevlist = None
self.wait_for_previous_set = wait_for_previous_set
def host_generator(self):
for iters, hosts in self._schedule:
iters = iters or 500 # max
for i in range(iters):
yield hosts
def find_available_hosts_and_slots(self):
hostlist = next(self._generator)
# Ensure discovery waits for the previous set to register
self._wait_for_previous_set_registration(hostlist)
hosts = {}
for item in hostlist:
host, slots = item.split(":")
slots = int(slots)
hosts[host] = slots
return hosts
def _wait_for_previous_set_registration(self, hostlist):
"""
Ensure that at least one host from the previous set of hosts have
been registered.
Without this, the discovery script will "discover" the new
set of hosts before the current set can register.
This would result in a race condition.
Consider a discovery schedule:
```
discovery_schedule = [
(10, ['host-1:2']),
(30, ['host-1:2', 'host-2:1', 'host-3:1']),
(None, ['host-2:1']),
]
```
The initial set is: ['host-1:2']. Before this is registered in the driver, the discovery script
discovers the set: ['host-1:2', 'host-2:1', 'host-3:1'], and adds ['host-2:1', 'host-3:1'].
However, since ['host-1:2'] has not registered, there is no coordinator to notify the workers.
When host-1 and host-3 are removed, driver.resume will call _activate_workers, which will update the host assignments.
It has a check to see if the intersection between the previous and current set of hosts. It finds that the previous
set is ['host-1:2'], and the current set is ['host-2:1'], since there was no notification for the added and removed
hosts.
This ensures that the previous set of hosts can register before the current set is discovered.
"""
if self.wait_for_previous_set is False:
return
while(self.prevlist and self.executor):
for item in self.prevlist:
host, slots = item.split(":")
slot = self.executor.driver.get_slot_info(host, 0)
# Avoid the empty slot
if (not slot.hostname) or self.executor.driver.get_worker_client(slot):
break
else:
time.sleep(0.001)
continue
break
self.prevlist = hostlist
class StatusCallback:
def __init__(self):
self._journal = []
def __call__(self, info_dict):
self._journal.append(info_dict)
def fetch(self):
return self._journal.copy()
def _create_training_function(iterations):
def training_fn():
import time
import torch
import horovod.torch as hvd
from horovod.ray import ray_logger
hvd.init()
model = torch.nn.Sequential(torch.nn.Linear(2, 2))
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
ray_logger.log({"started": True, "pid": os.getpid()})
@hvd.elastic.run
def train(state):
for state.epoch in range(state.epoch, iterations):
ray_logger.log({"training": True, "pid": os.getpid()})
time.sleep(0.1)
state.commit() # triggers scale-up, scale-down
ray_logger.log({"finished": True, "pid": os.getpid()})
state = hvd.elastic.TorchState(
model, optimizer, batch=0, epoch=0, commits=0, rendezvous=0)
train(state)
return True
return training_fn
@contextmanager
def fault_tolerance_patches():
with mock.patch(
'horovod.runner.elastic.driver.DISCOVER_HOSTS_FREQUENCY_SECS',
0.1):
with mock.patch(
"horovod.runner.util.network.get_driver_ip",
return_value=socket.gethostbyname(socket.gethostname())):
yield
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_fault_tolerance_hosts_added_and_removed(ray_8_cpus):
with fault_tolerance_patches():
discovery_schedule = [
(10, ['host-1:2']),
(30, ['host-1:2', 'host-2:1', 'host-3:1']),
(None, ['host-2:1']),
]
nics = list(psutil.net_if_addrs().keys())[0]
settings = ElasticRayExecutor.create_settings(min_np=1, nics={nics})
settings.discovery = SimpleTestDiscovery(discovery_schedule)
executor = ElasticRayExecutor(
settings, cpus_per_slot=1, override_discovery=False)
settings.discovery.executor = executor
training_fn = _create_training_function(iterations=50)
executor.start()
trace = StatusCallback()
results = executor.run(training_fn, callbacks=[trace])
assert len(results) == 1
events = trace.fetch()
assert sum(int("started" in e) for e in events) == 4, events
assert sum(int("finished" in e) for e in events) == 1, events
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
@pytest.mark.skip(reason='https://github.com/horovod/horovod/issues/3197')
def test_fault_tolerance_hosts_remove_and_add(ray_8_cpus):
with fault_tolerance_patches():
discovery_schedule = [
(10, ['host-1:2', 'host-2:1', 'host-3:2']),
(10, ['host-1:2']),
(None, ['host-1:2', 'host-4:1', 'host-5:1']),
]
nics = list(psutil.net_if_addrs().keys())[0]
settings = ElasticRayExecutor.create_settings(min_np=1, nics={nics})
settings.discovery = SimpleTestDiscovery(discovery_schedule)
executor = ElasticRayExecutor(
settings, cpus_per_slot=1, override_discovery=False)
training_fn = _create_training_function(iterations=30)
executor.start()
trace = StatusCallback()
results = executor.run(training_fn, callbacks=[trace])
assert len(results) == 4
events = trace.fetch()
assert sum(int("started" in e) for e in events) == 7, events
assert sum(int("finished" in e) for e in events) == 4, events
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_max_np(ray_8_cpus):
with fault_tolerance_patches():
discovery_schedule = [
(10, ['host-1:2']),
(None, ['host-1:2', 'host-4:1', 'host-5:1']),
]
nics = list(psutil.net_if_addrs().keys())[0]
settings = ElasticRayExecutor.create_settings(
min_np=1, max_np=2, nics={nics})
settings.discovery = SimpleTestDiscovery(discovery_schedule)
executor = ElasticRayExecutor(
settings, cpus_per_slot=1, override_discovery=False)
training_fn = _create_training_function(iterations=20)
executor.start()
trace = StatusCallback()
results = executor.run(training_fn, callbacks=[trace])
assert len(results) == 2
events = trace.fetch()
assert sum(int("started" in e) for e in events) == 2, events
assert sum(int("finished" in e) for e in events) == 2, events
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_min_np(ray_8_cpus):
with fault_tolerance_patches():
discovery_schedule = [
(10, ['host-1:1']),
(10, ['host-1:1', 'host-4:1', 'host-5:1']),
(None, ['host-1:1', 'host-4:1', 'host-5:1', 'host-6:1']),
]
nics = list(psutil.net_if_addrs().keys())[0]
settings = ElasticRayExecutor.create_settings(
min_np=4, max_np=4, nics={nics})
settings.discovery = SimpleTestDiscovery(discovery_schedule)
executor = ElasticRayExecutor(
settings, cpus_per_slot=1, override_discovery=False)
training_fn = _create_training_function(iterations=30)
executor.start()
trace = StatusCallback()
results = executor.run(training_fn, callbacks=[trace])
assert len(results) == 4
events = trace.fetch()
assert sum(int("started" in e) for e in events) == 4, events
assert sum(int("finished" in e) for e in events) == 4, events
@pytest.mark.skipif(
not gloo_built(), reason='Gloo is required for Ray integration')
def test_gpu_e2e(ray_8_cpus_gpus):
with fault_tolerance_patches():
discovery_schedule = [
(10, ['host-1:1']),
(10, ['host-1:1', 'host-4:1', 'host-5:1']),
(None, ['host-1:1', 'host-4:1', 'host-5:1', 'host-6:1']),
]
nics = list(psutil.net_if_addrs().keys())[0]
settings = ElasticRayExecutor.create_settings(
min_np=4, max_np=4, nics={nics})
settings.discovery = SimpleTestDiscovery(discovery_schedule)
executor = ElasticRayExecutor(
settings, gpus_per_slot=1, use_gpu=True, override_discovery=False)
training_fn = _create_training_function(iterations=30)
executor.start()
trace = StatusCallback()
results = executor.run(training_fn, callbacks=[trace])
assert len(results) == 4
events = trace.fetch()
assert sum(int("started" in e) for e in events) == 4, events
assert sum(int("finished" in e) for e in events) == 4, events
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv[1:] + ["-v", "-x", __file__]))
| 6,651 |
324 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.ultradns.ws.predicates;
import static org.jclouds.ultradns.ws.predicates.ZonePredicates.typeEqualTo;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import org.jclouds.ultradns.ws.domain.Zone;
import org.jclouds.ultradns.ws.domain.Zone.DNSSECStatus;
import org.jclouds.ultradns.ws.domain.Zone.Type;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "ZonePredicatesTest")
public class ZonePredicatesTest {
Zone zone = Zone.builder()
.name("jclouds.org.")
.typeCode(1)
.accountId("AAAAAAAAAAAAAAAA")
.ownerId("EEEEEEEEEEEEEEEE")
.id("0000000000000001")
.dnssecStatus(DNSSECStatus.UNSIGNED).build();
@Test
public void testTypeEqualsWhenEqual() {
assertTrue(typeEqualTo(Type.PRIMARY).apply(zone));
}
@Test
public void testTypeEqualsWhenNotEqual() {
assertFalse(typeEqualTo(Type.SECONDARY).apply(zone));
}
}
| 642 |
711 | /*
* Copyright 2014 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.apiman.manager.api.beans.policies;
import java.io.Serializable;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
/**
* The bean/model used when updating a new policy definition.
*
* @author <EMAIL>
*/
@JsonInclude(Include.NON_NULL)
public class UpdatePolicyDefinitionBean implements Serializable {
private static final long serialVersionUID = 350049376316732992L;
private String name;
private String description;
private String icon;
/**
* Constructor.
*/
public UpdatePolicyDefinitionBean() {
}
/**
* @return the name
*/
public String getName() {
return name;
}
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
/**
* @return the description
*/
public String getDescription() {
return description;
}
/**
* @param description the description to set
*/
public void setDescription(String description) {
this.description = description;
}
/**
* @return the icon
*/
public String getIcon() {
return icon;
}
/**
* @param icon the icon to set
*/
public void setIcon(String icon) {
this.icon = icon;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
@SuppressWarnings("nls")
public String toString() {
return "UpdatePolicyDefinitionBean [name=" + name + ", description=" + description + ", icon=" + icon
+ "]";
}
}
| 798 |
5,079 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright (C) 2000-2004 <NAME> <<EMAIL>>
# 2003 <NAME>, <NAME>
# 2004 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# NOTE:
# This parser doesn't understand the ALT-TRANS element.
from odf.xml import make_parser
from xml.sax.handler import ContentHandler
from xml.sax import handler, InputSource
from cStringIO import StringIO
from types import StringType, UnicodeType
#constants
_FILE_ATTRS = ['original', 'source-language', 'datatype', 'date',
'target-language', 'product-name', 'product-version', 'build-num']
_PHASE_ATTRS = ['phase-name', 'process-name', 'tool', 'date', 'contact-name',
'contact-email', 'company-name']
class XLIFFHandler(ContentHandler):
""" This is used to parse the xliff file
"""
def __init__(self):
"""constructor """
self.__currentTag = ''
self.__filetag = []
self.__phase_group = []
self.__source = 0
self.__body = {}
self.__data = []
self.__inside_alttrans = 0
self.__tuid = ''
#functions related with <file> tag
def getFileTag(self):
return self.__filetag
def setFileTag(self, dict):
self.__filetag.extend(dict)
#functions related with <phase-group> tag
def getPhaseGroup(self):
return self.__phase_group
def setPhaseGroup(self, dict):
self.__phase_group.append(dict)
def getBody(self):
return self.__body
def setBody(self, key, value):
self.__body[key] = value
def startElement(self, name, attrs):
self.__currentTag = name
if name == 'alt-trans':
self.__inside_alttrans = 1
# Make the attributes available
# Implicit assumption: There is only one <file> element.
if name == 'file':
tmp = attrs.items()
for i in [elem for elem in attrs.keys() if elem not in _FILE_ATTRS]:
tmp.remove((i, attrs[i]))
self.setFileTag(tmp)
if name == 'phase':
tmp = attrs.items()
for i in [elem for elem in attrs.keys() if elem not in _PHASE_ATTRS]:
tmp.remove((i, attrs[i]))
self.setPhaseGroup(tmp)
if name == 'trans-unit':
self.__tuid = attrs['id']
self.__source = u''
self.__target = u''
self.__note = u''
def endElement(self, name):
if name == 'alt-trans':
self.__inside_alttrans = 0
if name == 'source' and self.__inside_alttrans == 0:
content = u''.join(self.__data).strip()
self.__data = []
self.__source = content
if name == 'target' and self.__inside_alttrans == 0:
content = u''.join(self.__data).strip()
self.__data = []
self.__target = content
if name == 'note' and self.__inside_alttrans == 0:
content = u''.join(self.__data).strip()
self.__data = []
self.__note = content
if name == 'trans-unit':
self.setBody(self.__tuid, {'source':self.__source,
'target':self.__target, 'note':self.__note})
self.__currentTag = ''
def characters(self, content):
currentTag = self.__currentTag
if currentTag in ( 'source', 'target', 'note'):
self.__data.append(content)
class HandleXliffParsing:
""" class for parse xliff files """
def __init__(self):
""" """
pass
def parseXLIFFSTring(self, xml_string):
""" """
chandler = XLIFFHandler()
parser = make_parser()
# Tell the parser to use our handler
parser.setContentHandler(chandler)
# Don't load the DTD from the Internet
parser.setFeature(handler.feature_external_ges, 0)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(xml_string))
try:
parser.parse(inpsrc)
return chandler
except:
return None
def parseXLIFFFile(self, file):
# Create a parser
parser = make_parser()
chandler = XLIFFHandler()
# Tell the parser to use our handler
parser.setContentHandler(chandler)
# Don't load the DTD from the Internet
parser.setFeature(handler.feature_external_ges, 0)
inputsrc = InputSource()
try:
if type(file) is StringType:
inputsrc.setByteStream(StringIO(file))
else:
filecontent = file.read()
inputsrc.setByteStream(StringIO(filecontent))
parser.parse(inputsrc)
return chandler
except:
return None
| 2,355 |
1,841 | <filename>test/cuda/test_autotuner_utility.cc
/**
* Copyright (c) 2017-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "tc/aten/aten_compiler.h"
#include "tc/autotuner/autotuner.h"
#include "tc/autotuner/genetic_search.h"
#include "tc/autotuner/utils.h"
#include "tc/core/cuda/cuda.h"
#include "tc/core/cuda/cuda_backend.h"
#include "tc/core/cuda/cuda_tc_executor.h"
#include "tc/core/scope_guard.h"
#include "tc/lang/canonicalize.h"
using namespace tc;
using namespace tc::aten;
using namespace tc::autotune;
using CudaOptionsCache =
Autotuner<CudaBackend, GeneticSearch>::OptionsCacheType;
TEST(DivisorsAndPowers, Default) {
auto dp = powers2andCeilDivisors(10);
std::vector<size_t> expected{1, 2, 3, 4, 5, 8, 10, 16};
ASSERT_EQ(dp, expected);
dp = powers2andCeilDivisors(72);
expected = {1, 2, 3, 4, 5, 8, 9, 16, 18, 32, 36, 64, 72, 128};
ASSERT_EQ(dp, expected);
dp = powers2andCeilDivisors(35);
expected = {1, 2, 3, 4, 5, 8, 9, 16, 18, 32, 35, 64};
ASSERT_EQ(dp, expected);
dp = powers2andCeilDivisors(130);
expected = {1, 2, 3, 4, 5, 8, 9, 16, 17, 32, 33, 64, 65, 128, 130, 256};
ASSERT_EQ(dp, expected);
}
std::vector<CudaMappingOptions> restoreCandidates(
CudaOptionsCache& optionsCache,
const std::string& tc,
std::vector<at::Tensor>& inputs,
std::vector<at::Tensor>& outputs) {
auto inputDLTensors = makeDLConstTensors(inputs);
auto outputDLTensors = makeDLTensors(outputs);
return optionsCache.getTopKOptions(
lang::canonicalTc(tc),
makeTensorInfoVector(extractRawPtrs(inputDLTensors)),
makeTensorInfoVector(extractRawPtrs(outputDLTensors)),
CudaGPUInfo::GPUInfo().getCudaDeviceStr(),
FLAGS_tuner_gen_restore_number);
}
TEST(RestoreCandidates, NotATCid) {
CudaOptionsCache optionsCache;
std::vector<at::Tensor> inputs{at::CUDA(at::kFloat).rand({10, 16}),
at::CUDA(at::kFloat).rand({16, 20})};
ASSERT_THROW(
restoreCandidates(optionsCache, "bla", inputs, inputs),
lang::ErrorReport);
}
static constexpr auto tc_ = R"(
def matmul(float(M,N) A, float(N,K) B) -> (output) {
output(m, k) +=! A(m, r_n) * B(r_n, k)
})";
TEST(RestoreCandidates, NoRuntimeRecorded) {
CudaOptionsCache optionsCache;
std::vector<at::Tensor> inputs{at::CUDA(at::kFloat).rand({10, 16}),
at::CUDA(at::kFloat).rand({16, 20})};
auto options = CudaMappingOptions::makeMlpMappingOptions();
auto pExecutor = compile<CudaBackend>(tc_, "matmul", inputs, options);
std::vector<at::Tensor> outputs = prepareOutputs(tc_, "matmul", inputs);
run(*pExecutor, inputs, outputs);
FLAGS_tuner_gen_restore_number = 1;
ASSERT_EQ(restoreCandidates(optionsCache, tc_, inputs, outputs).size(), 0u);
}
TEST(RestoreCandidates, Hit) {
CudaOptionsCache optionsCache;
std::vector<at::Tensor> inputs{at::CUDA(at::kFloat).rand({10, 16}),
at::CUDA(at::kFloat).rand({16, 20})};
auto options = CudaMappingOptions::makeMlpMappingOptions();
auto pExecutor = compile<CudaBackend>(tc_, "matmul", inputs, options);
std::vector<at::Tensor> outputs = prepareOutputs(tc_, "matmul", inputs);
auto timings = profile(*pExecutor, inputs, outputs);
auto inputDLTensors = makeDLConstTensors(inputs);
auto outputDLTensors = makeDLTensors(outputs);
optionsCache.recordRuntime(
lang::canonicalTc(tc_),
makeTensorInfoVector(extractRawPtrs(inputDLTensors)),
makeTensorInfoVector(extractRawPtrs(outputDLTensors)),
CudaGPUInfo::GPUInfo().getCudaDeviceStr(),
options,
timings.kernelRuntime);
{
options = CudaMappingOptions::makeNaiveMappingOptions();
auto pExecutor = compile<CudaBackend>(tc_, "matmul", inputs, options);
auto timings = profile(*pExecutor, inputs, outputs);
optionsCache.recordRuntime(
lang::canonicalTc(tc_),
makeTensorInfoVector(extractRawPtrs(inputDLTensors)),
makeTensorInfoVector(extractRawPtrs(outputDLTensors)),
CudaGPUInfo::GPUInfo().getCudaDeviceStr(),
options,
timings.kernelRuntime);
}
FLAGS_tuner_gen_restore_number = 2;
auto restored = restoreCandidates(optionsCache, tc_, inputs, outputs);
ASSERT_EQ(restored.size(), 2u);
FLAGS_tuner_gen_restore_number = 1;
restored = restoreCandidates(optionsCache, tc_, inputs, outputs);
ASSERT_EQ(restored.size(), 1u);
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::gflags::ParseCommandLineFlags(&argc, &argv, true);
::google::InitGoogleLogging(argv[0]);
return RUN_ALL_TESTS();
}
| 2,049 |
852 | <gh_stars>100-1000
#include "FWCore/Framework/interface/MakerMacros.h"
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include "DQMServices/Core/interface/DQMStore.h"
#include "DQMServices/Core/interface/DQMEDAnalyzer.h"
#include "DQMOffline/Trigger/plugins/TriggerDQMBase.h"
#include "CommonTools/TriggerUtils/interface/GenericTriggerEventFlag.h"
#include "CommonTools/Utils/interface/StringCutObjectSelector.h"
#include "DataFormats/JetReco/interface/CaloJet.h"
#include "DataFormats/JetReco/interface/CaloJetCollection.h"
#include "DataFormats/TrackReco/interface/Track.h"
#include "DataFormats/TrackReco/interface/TrackFwd.h"
#include <string>
#include <vector>
class NoBPTXMonitor : public DQMEDAnalyzer, public TriggerDQMBase {
public:
typedef dqm::reco::MonitorElement MonitorElement;
typedef dqm::reco::DQMStore DQMStore;
NoBPTXMonitor(const edm::ParameterSet&);
~NoBPTXMonitor() throw() override;
static void fillDescriptions(edm::ConfigurationDescriptions& descriptions);
protected:
void bookHistograms(DQMStore::IBooker&, edm::Run const&, edm::EventSetup const&) override;
void analyze(edm::Event const& iEvent, edm::EventSetup const& iSetup) override;
private:
const std::string folderName_;
const bool requireValidHLTPaths_;
bool hltPathsAreValid_;
edm::EDGetTokenT<reco::CaloJetCollection> jetToken_;
edm::EDGetTokenT<reco::TrackCollection> muonToken_;
std::vector<double> jetE_variable_binning_;
MEbinning jetE_binning_;
MEbinning jetEta_binning_;
MEbinning jetPhi_binning_;
std::vector<double> muonPt_variable_binning_;
MEbinning muonPt_binning_;
MEbinning muonEta_binning_;
MEbinning muonPhi_binning_;
MEbinning ls_binning_;
MEbinning bx_binning_;
ObjME jetENoBPTX_;
ObjME jetENoBPTX_variableBinning_;
ObjME jetEVsLS_;
ObjME jetEVsBX_;
ObjME jetEtaNoBPTX_;
ObjME jetEtaVsLS_;
ObjME jetEtaVsBX_;
ObjME jetPhiNoBPTX_;
ObjME jetPhiVsLS_;
ObjME jetPhiVsBX_;
ObjME muonPtNoBPTX_;
ObjME muonPtNoBPTX_variableBinning_;
ObjME muonPtVsLS_;
ObjME muonPtVsBX_;
ObjME muonEtaNoBPTX_;
ObjME muonEtaVsLS_;
ObjME muonEtaVsBX_;
ObjME muonPhiNoBPTX_;
ObjME muonPhiVsLS_;
ObjME muonPhiVsBX_;
std::unique_ptr<GenericTriggerEventFlag> num_genTriggerEventFlag_;
std::unique_ptr<GenericTriggerEventFlag> den_genTriggerEventFlag_;
StringCutObjectSelector<reco::CaloJet, true> jetSelection_;
StringCutObjectSelector<reco::Track, true> muonSelection_;
unsigned int njets_;
unsigned int nmuons_;
};
NoBPTXMonitor::NoBPTXMonitor(const edm::ParameterSet& iConfig)
: folderName_(iConfig.getParameter<std::string>("FolderName")),
requireValidHLTPaths_(iConfig.getParameter<bool>("requireValidHLTPaths")),
hltPathsAreValid_(false),
jetToken_(consumes<reco::CaloJetCollection>(iConfig.getParameter<edm::InputTag>("jets"))),
muonToken_(consumes<reco::TrackCollection>(iConfig.getParameter<edm::InputTag>("muons"))),
jetE_variable_binning_(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<std::vector<double> >("jetEBinning")),
jetE_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("jetEPSet"))),
jetEta_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("jetEtaPSet"))),
jetPhi_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("jetPhiPSet"))),
muonPt_variable_binning_(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<std::vector<double> >("muonPtBinning")),
muonPt_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("muonPtPSet"))),
muonEta_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("muonEtaPSet"))),
muonPhi_binning_(getHistoPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("muonPhiPSet"))),
ls_binning_(
getHistoPSet(iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("lsPSet"))),
bx_binning_(getHistoLSPSet(
iConfig.getParameter<edm::ParameterSet>("histoPSet").getParameter<edm::ParameterSet>("bxPSet"))),
num_genTriggerEventFlag_(new GenericTriggerEventFlag(
iConfig.getParameter<edm::ParameterSet>("numGenericTriggerEventPSet"), consumesCollector(), *this)),
den_genTriggerEventFlag_(new GenericTriggerEventFlag(
iConfig.getParameter<edm::ParameterSet>("denGenericTriggerEventPSet"), consumesCollector(), *this)),
jetSelection_(iConfig.getParameter<std::string>("jetSelection")),
muonSelection_(iConfig.getParameter<std::string>("muonSelection")),
njets_(iConfig.getParameter<unsigned int>("njets")),
nmuons_(iConfig.getParameter<unsigned int>("nmuons")) {}
NoBPTXMonitor::~NoBPTXMonitor() throw() {
if (num_genTriggerEventFlag_) {
num_genTriggerEventFlag_.reset();
}
if (den_genTriggerEventFlag_) {
den_genTriggerEventFlag_.reset();
}
}
void NoBPTXMonitor::bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) {
// Initialize the GenericTriggerEventFlag
if (num_genTriggerEventFlag_ && num_genTriggerEventFlag_->on()) {
num_genTriggerEventFlag_->initRun(iRun, iSetup);
}
if (den_genTriggerEventFlag_ && den_genTriggerEventFlag_->on()) {
den_genTriggerEventFlag_->initRun(iRun, iSetup);
}
// check if every HLT path specified in numerator and denominator has a valid match in the HLT Menu
hltPathsAreValid_ = (num_genTriggerEventFlag_ && den_genTriggerEventFlag_ && num_genTriggerEventFlag_->on() &&
den_genTriggerEventFlag_->on() && num_genTriggerEventFlag_->allHLTPathsAreValid() &&
den_genTriggerEventFlag_->allHLTPathsAreValid());
// if valid HLT paths are required,
// create DQM outputs only if all paths are valid
if (requireValidHLTPaths_ and (not hltPathsAreValid_)) {
return;
}
std::string histname, histtitle;
std::string currentFolder = folderName_;
ibooker.setCurrentFolder(currentFolder);
histname = "jetE";
histtitle = "jetE";
bookME(ibooker, jetENoBPTX_, histname, histtitle, jetE_binning_.nbins, jetE_binning_.xmin, jetE_binning_.xmax);
setMETitle(jetENoBPTX_, "Jet E [GeV]", "Events / [GeV]");
histname = "jetE_variable";
histtitle = "jetE";
bookME(ibooker, jetENoBPTX_variableBinning_, histname, histtitle, jetE_variable_binning_);
setMETitle(jetENoBPTX_variableBinning_, "Jet E [GeV]", "Events / [GeV]");
histname = "jetEVsLS";
histtitle = "jetE vs LS";
bookME(ibooker,
jetEVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
jetE_binning_.xmin,
jetE_binning_.xmax);
setMETitle(jetEVsLS_, "LS", "Jet E [GeV]");
histname = "jetEVsBX";
histtitle = "jetE vs BX";
bookME(ibooker,
jetEVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
jetE_binning_.xmin,
jetE_binning_.xmax,
false);
setMETitle(jetEVsBX_, "BX", "Jet E [GeV]");
histname = "jetEta";
histtitle = "jetEta";
bookME(
ibooker, jetEtaNoBPTX_, histname, histtitle, jetEta_binning_.nbins, jetEta_binning_.xmin, jetEta_binning_.xmax);
setMETitle(jetEtaNoBPTX_, "Jet #eta", "Events");
histname = "jetEtaVsLS";
histtitle = "jetEta vs LS";
bookME(ibooker,
jetEtaVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
jetEta_binning_.xmin,
jetEta_binning_.xmax,
false);
setMETitle(jetEtaVsLS_, "LS", "Jet #eta");
histname = "jetEtaVsBX";
histtitle = "jetEta vs BX";
bookME(ibooker,
jetEtaVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
jetEta_binning_.xmin,
jetEta_binning_.xmax,
false);
setMETitle(jetEtaVsBX_, "BX", "Jet #eta");
histname = "jetPhi";
histtitle = "jetPhi";
bookME(
ibooker, jetPhiNoBPTX_, histname, histtitle, jetPhi_binning_.nbins, jetPhi_binning_.xmin, jetPhi_binning_.xmax);
setMETitle(jetPhiNoBPTX_, "Jet #phi", "Events");
histname = "jetPhiVsLS";
histtitle = "jetPhi vs LS";
bookME(ibooker,
jetPhiVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
jetPhi_binning_.xmin,
jetPhi_binning_.xmax,
false);
setMETitle(jetPhiVsLS_, "LS", "Jet #phi");
histname = "jetPhiVsBX";
histtitle = "jetPhi vs BX";
bookME(ibooker,
jetPhiVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
jetPhi_binning_.xmin,
jetPhi_binning_.xmax,
false);
setMETitle(jetPhiVsBX_, "BX", "Jet #phi");
histname = "muonPt";
histtitle = "muonPt";
bookME(
ibooker, muonPtNoBPTX_, histname, histtitle, muonPt_binning_.nbins, muonPt_binning_.xmin, muonPt_binning_.xmax);
setMETitle(muonPtNoBPTX_, "DisplacedStandAlone Muon p_{T} [GeV]", "Events / [GeV]");
histname = "muonPt_variable";
histtitle = "muonPt";
bookME(ibooker, muonPtNoBPTX_variableBinning_, histname, histtitle, muonPt_variable_binning_);
setMETitle(muonPtNoBPTX_variableBinning_, "DisplacedStandAlone Muon p_{T} [GeV]", "Events / [GeV]");
histname = "muonPtVsLS";
histtitle = "muonPt vs LS";
bookME(ibooker,
muonPtVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
muonPt_binning_.xmin,
muonPt_binning_.xmax,
false);
setMETitle(muonPtVsLS_, "LS", "DisplacedStandAlone Muon p_{T} [GeV]");
histname = "muonPtVsBX";
histtitle = "muonPt vs BX";
bookME(ibooker,
muonPtVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
muonPt_binning_.xmin,
muonPt_binning_.xmax,
false);
setMETitle(muonPtVsBX_, "BX", "DisplacedStandAlone Muon p_{T} [GeV]");
histname = "muonEta";
histtitle = "muonEta";
bookME(ibooker,
muonEtaNoBPTX_,
histname,
histtitle,
muonEta_binning_.nbins,
muonEta_binning_.xmin,
muonEta_binning_.xmax);
setMETitle(muonEtaNoBPTX_, "DisplacedStandAlone Muon #eta", "Events");
histname = "muonEtaVsLS";
histtitle = "muonEta vs LS";
bookME(ibooker,
muonEtaVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
muonEta_binning_.xmin,
muonEta_binning_.xmax,
false);
setMETitle(muonEtaVsLS_, "LS", "DisplacedStandAlone Muon #eta");
histname = "muonEtaVsBX";
histtitle = "muonEta vs BX";
bookME(ibooker,
muonEtaVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
muonEta_binning_.xmin,
muonEta_binning_.xmax,
false);
setMETitle(muonEtaVsBX_, "BX", "DisplacedStandAlone Muon #eta");
histname = "muonPhi";
histtitle = "muonPhi";
bookME(ibooker,
muonPhiNoBPTX_,
histname,
histtitle,
muonPhi_binning_.nbins,
muonPhi_binning_.xmin,
muonPhi_binning_.xmax);
setMETitle(muonPhiNoBPTX_, "DisplacedStandAlone Muon #phi", "Events");
histname = "muonPhiVsLS";
histtitle = "muonPhi vs LS";
bookME(ibooker,
muonPhiVsLS_,
histname,
histtitle,
ls_binning_.nbins,
ls_binning_.xmin,
ls_binning_.xmax,
muonPhi_binning_.xmin,
muonPhi_binning_.xmax,
false);
setMETitle(muonPhiVsLS_, "LS", "DisplacedStandAlone Muon #phi");
histname = "muonPhiVsBX";
histtitle = "muonPhi vs BX";
bookME(ibooker,
muonPhiVsBX_,
histname,
histtitle,
bx_binning_.nbins,
bx_binning_.xmin,
bx_binning_.xmax,
muonPhi_binning_.xmin,
muonPhi_binning_.xmax,
false);
setMETitle(muonPhiVsBX_, "BX", "DisplacedStandAlone Muon #phi");
}
void NoBPTXMonitor::analyze(edm::Event const& iEvent, edm::EventSetup const& iSetup) {
// if valid HLT paths are required,
// analyze event only if all paths are valid
if (requireValidHLTPaths_ and (not hltPathsAreValid_)) {
return;
}
// Filter out events if Trigger Filtering is requested
if (den_genTriggerEventFlag_->on() && !den_genTriggerEventFlag_->accept(iEvent, iSetup)) {
return;
}
const int ls = iEvent.id().luminosityBlock();
const int bx = iEvent.bunchCrossing();
edm::Handle<reco::CaloJetCollection> jetHandle;
iEvent.getByToken(jetToken_, jetHandle);
if ((unsigned int)(jetHandle->size()) < njets_)
return;
std::vector<reco::CaloJet> jets;
for (auto const& j : *jetHandle) {
if (jetSelection_(j))
jets.push_back(j);
}
if ((unsigned int)(jets.size()) < njets_)
return;
double jetE = -999;
double jetEta = -999;
double jetPhi = -999;
if (!jets.empty()) {
jetE = jets[0].energy();
jetEta = jets[0].eta();
jetPhi = jets[0].phi();
}
edm::Handle<reco::TrackCollection> DSAHandle;
iEvent.getByToken(muonToken_, DSAHandle);
if ((unsigned int)(DSAHandle->size()) < nmuons_)
return;
std::vector<reco::Track> muons;
for (auto const& m : *DSAHandle) {
if (muonSelection_(m))
muons.push_back(m);
}
if ((unsigned int)(muons.size()) < nmuons_)
return;
double muonPt = -999;
double muonEta = -999;
double muonPhi = -999;
if (!muons.empty()) {
muonPt = muons[0].pt();
muonEta = muons[0].eta();
muonPhi = muons[0].phi();
}
// passes numerator-trigger (fill-numerator flag)
const bool trg_passed = (num_genTriggerEventFlag_->on() && num_genTriggerEventFlag_->accept(iEvent, iSetup));
// filling histograms
jetENoBPTX_.fill(trg_passed, jetE);
jetENoBPTX_variableBinning_.fill(trg_passed, jetE);
jetEtaNoBPTX_.fill(trg_passed, jetEta);
jetPhiNoBPTX_.fill(trg_passed, jetPhi);
muonPtNoBPTX_.fill(trg_passed, muonPt);
muonPtNoBPTX_variableBinning_.fill(trg_passed, muonPt);
muonEtaNoBPTX_.fill(trg_passed, muonEta);
muonPhiNoBPTX_.fill(trg_passed, muonPhi);
jetEVsLS_.fill(trg_passed, ls, jetE);
if (trg_passed) {
jetEVsBX_.numerator->Fill(bx, jetE);
jetEtaVsLS_.numerator->Fill(ls, jetEta);
jetEtaVsBX_.numerator->Fill(bx, jetEta);
jetPhiVsLS_.numerator->Fill(ls, jetPhi);
jetPhiVsBX_.numerator->Fill(bx, jetPhi);
muonPtVsLS_.numerator->Fill(ls, muonPt);
muonPtVsBX_.numerator->Fill(bx, muonPt);
muonEtaVsLS_.numerator->Fill(ls, muonEta);
muonEtaVsBX_.numerator->Fill(bx, muonEta);
muonPhiVsLS_.numerator->Fill(ls, muonPhi);
muonPhiVsBX_.numerator->Fill(bx, muonPhi);
}
}
void NoBPTXMonitor::fillDescriptions(edm::ConfigurationDescriptions& descriptions) {
edm::ParameterSetDescription desc;
desc.add<std::string>("FolderName", "HLT/NoBPTX");
desc.add<bool>("requireValidHLTPaths", true);
desc.add<edm::InputTag>("jets", edm::InputTag("ak4CaloJets"));
desc.add<edm::InputTag>("muons", edm::InputTag("displacedStandAloneMuons"));
desc.add<std::string>("jetSelection", "pt > 0");
desc.add<std::string>("muonSelection", "pt > 0");
desc.add<unsigned int>("njets", 0);
desc.add<unsigned int>("nmuons", 0);
edm::ParameterSetDescription genericTriggerEventPSet;
genericTriggerEventPSet.add<bool>("andOr");
genericTriggerEventPSet.add<edm::InputTag>("dcsInputTag", edm::InputTag("scalersRawToDigi"));
genericTriggerEventPSet.add<std::vector<int> >("dcsPartitions", {});
genericTriggerEventPSet.add<bool>("andOrDcs", false);
genericTriggerEventPSet.add<bool>("errorReplyDcs", true);
genericTriggerEventPSet.add<std::string>("dbLabel", "");
genericTriggerEventPSet.add<bool>("andOrHlt", true);
genericTriggerEventPSet.add<edm::InputTag>("hltInputTag", edm::InputTag("TriggerResults::HLT"));
genericTriggerEventPSet.add<std::vector<std::string> >("hltPaths", {});
genericTriggerEventPSet.add<std::string>("hltDBKey", "");
genericTriggerEventPSet.add<bool>("errorReplyHlt", false);
genericTriggerEventPSet.add<unsigned int>("verbosityLevel", 1);
desc.add<edm::ParameterSetDescription>("numGenericTriggerEventPSet", genericTriggerEventPSet);
desc.add<edm::ParameterSetDescription>("denGenericTriggerEventPSet", genericTriggerEventPSet);
edm::ParameterSetDescription histoPSet;
edm::ParameterSetDescription jetEPSet;
edm::ParameterSetDescription jetEtaPSet;
edm::ParameterSetDescription jetPhiPSet;
edm::ParameterSetDescription muonPtPSet;
edm::ParameterSetDescription muonEtaPSet;
edm::ParameterSetDescription muonPhiPSet;
edm::ParameterSetDescription lsPSet;
edm::ParameterSetDescription bxPSet;
fillHistoPSetDescription(jetEPSet);
fillHistoPSetDescription(jetEtaPSet);
fillHistoPSetDescription(jetPhiPSet);
fillHistoPSetDescription(muonPtPSet);
fillHistoPSetDescription(muonEtaPSet);
fillHistoPSetDescription(muonPhiPSet);
fillHistoPSetDescription(lsPSet);
fillHistoLSPSetDescription(bxPSet);
histoPSet.add<edm::ParameterSetDescription>("jetEPSet", jetEPSet);
histoPSet.add<edm::ParameterSetDescription>("jetEtaPSet", jetEtaPSet);
histoPSet.add<edm::ParameterSetDescription>("jetPhiPSet", jetPhiPSet);
histoPSet.add<edm::ParameterSetDescription>("muonPtPSet", muonPtPSet);
histoPSet.add<edm::ParameterSetDescription>("muonEtaPSet", muonEtaPSet);
histoPSet.add<edm::ParameterSetDescription>("muonPhiPSet", muonPhiPSet);
histoPSet.add<edm::ParameterSetDescription>("lsPSet", lsPSet);
histoPSet.add<edm::ParameterSetDescription>("bxPSet", bxPSet);
std::vector<double> bins = {0., 20., 40., 60., 80., 90., 100., 110., 120., 130., 140., 150., 160.,
170., 180., 190., 200., 220., 240., 260., 280., 300., 350., 400., 450., 1000.};
histoPSet.add<std::vector<double> >("jetEBinning", bins);
histoPSet.add<std::vector<double> >("muonPtBinning", bins);
desc.add<edm::ParameterSetDescription>("histoPSet", histoPSet);
descriptions.add("NoBPTXMonitoring", desc);
}
// Define this as a plug-in
DEFINE_FWK_MODULE(NoBPTXMonitor);
| 8,310 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/033/03312304.json
{"nom":"Neuffons","circ":"12ème circonscription","dpt":"Gironde","inscrits":135,"abs":66,"votants":69,"blancs":8,"nuls":3,"exp":58,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":29},{"nuance":"FI","nom":"M. <NAME>","voix":29}]} | 131 |
375 | import numpy as np
import sys
from random import randint
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.io import loadmat
from scipy.io import savemat
mat = loadmat('./cache/script2.mat')
codeJ = mat['codeJ']
dim_voc = 539
bsz = 1
dim_h = 100
dim_cate_new = 19
dim_color = 17
dim_gender = 2
dim_sleeve = 4
num_layers = 2
class define_network(nn.Module):
def __init__(self):
super(define_network, self).__init__()
self.rnn = nn.RNN(dim_voc, dim_h, num_layers)
self.net_cate_new = nn.Linear(dim_h, dim_cate_new)
self.net_color = nn.Linear(dim_h, dim_color)
self.net_gender = nn.Linear(dim_h, dim_gender)
self.net_sleeve = nn.Linear(dim_h, dim_sleeve)
def forward(self, x):
h0 = Variable(torch.zeros(num_layers, bsz, dim_h).cuda())
_, hn = self.rnn(x, h0)
hn2 = hn[-1]
y_cate_new = self.net_cate_new(hn2)
y_color = self.net_color(hn2)
y_gender = self.net_gender(hn2)
y_sleeve = self.net_sleeve(hn2)
return hn2, y_cate_new, y_color, y_gender, y_sleeve
model = define_network()
model.cuda()
model.load_state_dict(torch.load('rnn_latest.pth'))
model.eval()
test_hn2 = np.zeros((len(codeJ), dim_h))
for sample_id in range(len(codeJ)):
c = codeJ[sample_id][0]
l = len(c)
cuda_c_onehot = torch.zeros(l, bsz, dim_voc).cuda()
for i in range(l):
cuda_c_onehot[i][0][int(c[i][0]-1)] = 1
cuda_c_onehot_v = Variable(cuda_c_onehot)
hn2, _, _, _, _ = model(cuda_c_onehot_v)
test_hn2[sample_id] = hn2.data[0].cpu().numpy()
result = {"hn2": test_hn2}
savemat("./cache/test_lang_initial.mat", result)
| 822 |
439 | /**
* Baidu.com,Inc.
* Copyright (c) 2000-2013 All Rights Reserved.
*/
package com.baidu.hsb.parser.ast.fragment.tableref;
import java.sql.SQLSyntaxErrorException;
import java.util.ArrayList;
import java.util.List;
import com.baidu.hsb.parser.ast.expression.primary.Identifier;
import com.baidu.hsb.parser.visitor.SQLASTVisitor;
/**
* used in <code>FROM</code> fragment
*
* @author <EMAIL>
*/
public class TableReferences implements TableReference {
protected static List<TableReference> ensureListType(List<TableReference> list) {
if (list instanceof ArrayList)
return list;
return new ArrayList<TableReference>(list);
}
private final List<TableReference> list;
/**
* @return never null
*/
public List<TableReference> getTableReferenceList() {
return list;
}
public TableReferences(List<TableReference> list) throws RuntimeException {
if (list == null || list.isEmpty()) {
throw new RuntimeException("at least one table reference");
}
this.list = ensureListType(list);
}
@Override
public Object removeLastConditionElement() {
if (list != null && !list.isEmpty()) {
return list.get(list.size() - 1).removeLastConditionElement();
}
return null;
}
@Override
public boolean isSingleTable() {
if (list == null) {
return false;
}
int count = 0;
TableReference first = null;
for (TableReference ref : list) {
if (ref != null && 1 == ++count) {
first = ref;
}
}
return count == 1 && first.isSingleTable();
}
@Override
public int getPrecedence() {
return TableReference.PRECEDENCE_REFS;
}
@Override
public void accept(SQLASTVisitor visitor) {
visitor.visit(this);
}
/*
* (non-Javadoc)
*
* @see com.baidu.hsb.parser.ast.fragment.tableref.TableReference#getTables()
*/
@Override
public List<Identifier> getTables() {
List<Identifier> list = new ArrayList<Identifier>();
for (TableReference tr : this.list) {
list.addAll(tr.getTables());
}
return list;
}
}
| 950 |
335 | <reponame>Safal08/Hacktoberfest-1<filename>M/Mute_verb.json
{
"word": "Mute",
"definitions": [
"Deaden, muffle, or soften the sound of.",
"Muffle the sound of (a musical instrument), especially by the use of a mute.",
"Reduce the strength or intensity of."
],
"parts-of-speech": "Verb"
} | 136 |
5,015 | package com.flipboard.bottomsheet.sample;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import com.flipboard.bottomsheet.BottomSheetLayout;
import com.flipboard.bottomsheet.R;
import com.flipboard.bottomsheet.commons.ImagePickerSheetView;
/**
* Activity demonstrating the use of {@link ImagePickerSheetView}
*/
public final class BottomSheetFragmentActivity extends AppCompatActivity {
protected BottomSheetLayout bottomSheetLayout;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_bottom_sheet_fragment);
bottomSheetLayout = (BottomSheetLayout) findViewById(R.id.bottomsheet);
findViewById(R.id.bottomsheet_fragment_button).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
new MyFragment().show(getSupportFragmentManager(), R.id.bottomsheet);
}
});
}
}
| 389 |
1,020 | <filename>codegen/src/test/java/org/robobinding/codegen/presentationmodel/processor/PresentationModelInfoProcessor.java<gh_stars>1000+
package org.robobinding.codegen.presentationmodel.processor;
import org.robobinding.codegen.apt.Logger;
import org.robobinding.codegen.apt.ProcessingContext;
import org.robobinding.codegen.presentationmodel.PresentationModelInfo;
/**
* @since 1.0
* @author <NAME>
*
*/
public class PresentationModelInfoProcessor extends PresentationModelProcessor {
public PresentationModelInfo result;
@Override
protected void generateAllClasses(PresentationModelInfo presentationModelInfo, ProcessingContext context, Logger log) {
this.result = presentationModelInfo;
}
}
| 208 |
2,338 | <gh_stars>1000+
//===-- ObjCLocalizeStringLiteralTests.cpp ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "TestTU.h"
#include "TweakTesting.h"
#include "gmock/gmock-matchers.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace clang {
namespace clangd {
namespace {
TWEAK_TEST(ObjCLocalizeStringLiteral);
TEST_F(ObjCLocalizeStringLiteralTest, Test) {
ExtraArgs.push_back("-x");
ExtraArgs.push_back("objective-c");
// Ensure the action can be initiated in the string literal.
EXPECT_AVAILABLE(R"(id x = ^[[@[[^"^t^est^"]]]];)");
// Ensure that the action can't be initiated in other places.
EXPECT_UNAVAILABLE(R"([[i^d ^[[x]] ^= @"test";^]])");
// Ensure that the action is not available for regular C strings.
EXPECT_UNAVAILABLE(R"(const char * x= "^test";)");
const char *Input = R"(id x = [[@"test"]];)";
const char *Output = R"(id x = NSLocalizedString(@"test", @"");)";
EXPECT_EQ(apply(Input), Output);
}
} // namespace
} // namespace clangd
} // namespace clang
| 462 |
724 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Ops for NAGO."""
import torch
from torch import nn
class depthwise_separable_conv_general(nn.Module):
"""Depthwise seperable convolution operation."""
def __init__(self, nin, nout, stride, kernel_size=3, padding=None):
"""Initialize depthwise_separable_conv_general."""
super().__init__()
if padding is None:
padding = (kernel_size - 1) // 2
self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size, stride=stride, padding=padding, groups=nin)
self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)
def forward(self, x):
"""Implement forward."""
out = self.depthwise(x)
out = self.pointwise(out)
return out
class Triplet_unit(nn.Module):
"""Node operation unit in the bottom-level graph."""
def __init__(self, inplanes, outplanes, dropout_p=0, stride=1, kernel_size=3):
"""Initialize Triplet_unit."""
super(Triplet_unit, self).__init__()
self.relu = nn.ReLU()
self.conv = depthwise_separable_conv_general(inplanes, outplanes, stride, kernel_size=kernel_size)
self.bn = nn.BatchNorm2d(outplanes)
self.dropout_p = dropout_p
if dropout_p > 0:
self.dropout = nn.Dropout(dropout_p)
def forward(self, x):
"""Implement forward."""
out = self.relu(x)
out = self.conv(out)
out = self.bn(out)
if self.dropout_p > 0:
out = self.dropout(out)
return out
class PassThrough(nn.Module):
"""Class PassThrough."""
def forward(self, x):
"""Forward method."""
return x
class BoundedScalarMultiply(nn.Module):
"""Class BoundedScalarMultiply."""
def __init__(self):
super().__init__()
self.mean = nn.Parameter(torch.ones(1))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""Forward method."""
return self.sigmoid(self.mean) * x
| 983 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.smarty.editor.completion;
import java.awt.Font;
import java.awt.Graphics;
import java.net.URL;
import javax.swing.ImageIcon;
import javax.swing.text.Caret;
import org.netbeans.api.editor.completion.Completion;
import org.netbeans.editor.BaseDocument;
import org.netbeans.editor.Utilities;
import org.netbeans.modules.editor.indent.api.Indent;
import org.netbeans.spi.editor.completion.*;
import java.awt.Color;
import java.awt.event.KeyEvent;
import javax.swing.text.BadLocationException;
import javax.swing.text.JTextComponent;
import org.netbeans.spi.editor.completion.support.AsyncCompletionTask;
import org.netbeans.spi.editor.completion.support.CompletionUtilities;
/**
* Code completion result item base class
*
* @author <NAME>
*/
public class TplCompletionItem implements CompletionItem {
protected static final int DEFAULT_SORT_PRIORITY = 22;
//------------------------------------------
protected String text, help, helpUrl;
protected boolean shift;
protected TplCompletionItem(String text) {
this.text = text;
}
protected TplCompletionItem(String text, String help) {
this.text = text;
this.help = help;
}
protected TplCompletionItem(String text, String help, String helpUrl) {
this(text);
this.help = help;
this.helpUrl = helpUrl;
}
public String getItemText() {
return text;
}
public String getItemHelp() {
return help;
}
public int getSortPriority() {
return DEFAULT_SORT_PRIORITY;
}
public CharSequence getSortText() {
return getItemText();
}
public CharSequence getInsertPrefix() {
return getItemText();
}
public void processKeyEvent(KeyEvent e) {
shift = (e.getKeyCode() == KeyEvent.VK_ENTER && e.getID() == KeyEvent.KEY_PRESSED && e.isShiftDown());
}
public void defaultAction(JTextComponent component) {
if (component != null) {
if (!shift) {
Completion.get().hideDocumentation();
Completion.get().hideCompletion();
}
substituteText(component, CodeCompletionUtils.getSubstitutionLenght(component.getDocument(), component.getCaretPosition()));
}
}
protected int getMoveBackLength() {
return 0; //default
}
/**
* Subclasses may override to customize the completed text
* if they do not want to override the substituteText method.
*/
protected String getSubstituteText() {
return getItemText();
}
protected boolean substituteText(JTextComponent c, int len) {
return substituteText(c, len, getMoveBackLength());
}
protected boolean substituteText(final JTextComponent c, final int len, int moveBack) {
return substituteText(c, getSubstituteText(), len, moveBack);
}
protected boolean substituteText(final JTextComponent c, final String substituteText, final int len, int moveBack) {
final BaseDocument doc = (BaseDocument) c.getDocument();
final boolean[] result = new boolean[1];
result[0] = true;
doc.runAtomic(new Runnable() {
public void run() {
try {
int substitutionOffset = c.getCaretPosition() - CodeCompletionUtils.getSubstitutionLenght(c.getDocument(), c.getCaretPosition());
//test whether we are trying to insert sg. what is already present in the text
String currentText = doc.getText(substitutionOffset, (doc.getLength() - substitutionOffset) < substituteText.length() ? (doc.getLength() - substitutionOffset) : substituteText.length());
if (!substituteText.equals(currentText)) {
//remove common part
doc.remove(substitutionOffset, len);
doc.insertString(substitutionOffset, substituteText, null);
} else {
c.setCaretPosition(c.getCaret().getDot() + substituteText.length() - len);
}
} catch (BadLocationException ex) {
result[0] = false;
}
}
});
//format the inserted text
reindent(c);
if (moveBack != 0) {
Caret caret = c.getCaret();
int dot = caret.getDot();
caret.setDot(dot - moveBack);
}
return result[0];
}
private void reindent(JTextComponent component) {
final BaseDocument doc = (BaseDocument) component.getDocument();
final int dotPos = component.getCaretPosition();
final Indent indent = Indent.get(doc);
indent.lock();
try {
doc.runAtomic(new Runnable() {
public void run() {
try {
int startOffset = Utilities.getRowStart(doc, dotPos);
int endOffset = Utilities.getRowEnd(doc, dotPos);
indent.reindent(startOffset, endOffset);
} catch (BadLocationException ex) {
//ignore
}
}
});
} finally {
indent.unlock();
}
}
public boolean instantSubstitution(JTextComponent component) {
if (component != null) {
try {
int substitutionOffset = CodeCompletionUtils.getSubstitutionLenght(component.getDocument(), component.getCaretPosition());
int caretOffset = component.getSelectionEnd();
if (caretOffset > substitutionOffset) {
String currentText = component.getDocument().getText(substitutionOffset, caretOffset - substitutionOffset);
if (!getSubstituteText().toString().startsWith(currentText)) {
return false;
}
}
} catch (BadLocationException ble) {
}
}
defaultAction(component);
return true;
}
public int getPreferredWidth(Graphics g, Font defaultFont) {
return CompletionUtilities.getPreferredWidth(getLeftHtmlText(), getRightHtmlText(), g, defaultFont);
}
public void render(Graphics g, Font defaultFont, Color defaultColor, Color backgroundColor, int width, int height, boolean selected) {
CompletionUtilities.renderHtml(getIcon(), getLeftHtmlText(), getRightHtmlText(), g, defaultFont, Color.BLACK, width, height, selected);
}
protected ImageIcon getIcon() {
return new ImageIcon(getClass().getResource("/org/netbeans/modules/php/smarty/resources/tpl-cc-icon.png"));
}
protected String getLeftHtmlText() {
return getItemText();
}
protected String getRightHtmlText() {
return null;
}
/** Returns help for the item. If the item doesn't have a help than returns null.
* The class can overwrite this method and compounds the help realtime.
*/
public String getHelp() {
return getItemHelp();
}
/** Returns whether the item has a help.
*/
public boolean hasHelp() {
return (help != null && help.length() > 0);
}
/** Returns a url or null, if the help is not URL or the help is not defined.
*/
public URL getHelpURL() {
if (help == null || help.equals("")) {
return null;
}
try {
return new URL(helpUrl);
} catch (java.io.IOException e) {
}
return null;
}
public CompletionTask createDocumentationTask() {
return new AsyncCompletionTask(new TplCompletionProvider.DocQuery(this));
}
public CompletionTask createToolTipTask() {
return null;
}
public static class BuiltInFunctionsCompletionItem extends TplCompletionItem {
protected static final String BUILT_IN_FUNC_COLOR = "529854";
public BuiltInFunctionsCompletionItem(String value, String help, String helpUrl) {
super(value, help, helpUrl);
}
@Override
protected String getLeftHtmlText() {
return "<font color=#" + BUILT_IN_FUNC_COLOR + ">" + getItemText() + "</font>"; //NOI18N
}
@Override
public int getSortPriority() {
return 20;
}
}
public static class CustomFunctionsCompletionItem extends BuiltInFunctionsCompletionItem {
protected static final String CUSTOM_FUNC_COLOR = "3B713B";
public CustomFunctionsCompletionItem(String value, String help, String helpUrl) {
super(value, help, helpUrl);
}
@Override
protected String getLeftHtmlText() {
return "<font color=#" + CUSTOM_FUNC_COLOR + ">" + getItemText() + "</font>"; //NOI18N
}
}
public static class FunctionParametersCompletionItem extends TplCompletionItem {
protected static final String CUSTOM_FUNC_COLOR = "D6822D";
public FunctionParametersCompletionItem(String value, String help) {
super(value, help);
}
@Override
protected String getLeftHtmlText() {
return "<font color=#" + CUSTOM_FUNC_COLOR + ">" + getItemText() + "</font>"; //NOI18N
}
@Override
public int getSortPriority() {
return 18;
}
}
public static class VariableModifiersCompletionItem extends TplCompletionItem {
protected static final String ATTR_NAME_COLOR = hexColorCode(Color.blue.darker());
public VariableModifiersCompletionItem(String value, String help, String helpUrl) {
super(value, help, helpUrl);
}
@Override
protected String getLeftHtmlText() {
return "<font color=#" + ATTR_NAME_COLOR + ">" + getItemText() + "</font>"; //NOI18N
}
@Override
public int getSortPriority() {
return 25;
}
}
public static final String hexColorCode(Color c) {
return Integer.toHexString(c.getRGB()).substring(2);
}
}
| 4,429 |
8,092 | <reponame>npodewitz/airflow<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from airflow.decorators.base import TaskDecorator
from airflow.decorators.branch_python import branch_task
from airflow.decorators.python import python_task
from airflow.decorators.python_virtualenv import virtualenv_task
from airflow.decorators.task_group import task_group
from airflow.models.dag import dag
from airflow.providers_manager import ProvidersManager
# Please keep this in sync with the .pyi's __all__.
__all__ = [
"TaskDecorator",
"TaskDecoratorCollection",
"dag",
"task",
"task_group",
"python_task",
"virtualenv_task",
"branch_task",
]
class TaskDecoratorCollection:
"""Implementation to provide the ``@task`` syntax."""
python: Any = staticmethod(python_task)
virtualenv = staticmethod(virtualenv_task)
branch = staticmethod(branch_task)
__call__ = python # Alias '@task' to '@task.python'.
def __getattr__(self, name: str) -> TaskDecorator:
"""Dynamically get provider-registered task decorators, e.g. ``@task.docker``."""
if name.startswith("__"):
raise AttributeError(f"{type(self).__name__} has no attribute {name!r}")
decorators = ProvidersManager().taskflow_decorators
if name not in decorators:
raise AttributeError(f"task decorator {name!r} not found")
return decorators[name]
task = TaskDecoratorCollection()
| 709 |
6,270 | <reponame>gabegorelick/aws-sdk-js<filename>.changes/2.1000.0.json
[
{
"type": "feature",
"category": "CodeBuild",
"description": "CodeBuild now allows you to select how batch build statuses are sent to the source provider for a project."
},
{
"type": "feature",
"category": "EFS",
"description": "EFS adds a new exception for short identifiers to be thrown after its migration to long resource identifiers."
}
] | 178 |
3,477 | // Copyright Microsoft and Project Verona Contributors.
// SPDX-License-Identifier: MIT
#pragma once
#include "compiler/visitor.h"
namespace verona::compiler
{
template<typename... Args>
class RecursiveExprVisitor : public ExprVisitor<void, Args...>
{
protected:
void visit_field(FieldExpr& expr, Args... args) override
{
this->visit_expr(*expr.expr, args...);
}
void visit_assign_local(AssignLocalExpr& expr, Args... args) override
{
this->visit_expr(*expr.right, args...);
}
void visit_assign_field(AssignFieldExpr& expr, Args... args) override
{
this->visit_expr(*expr.expr, args...);
this->visit_expr(*expr.right, args...);
}
void visit_seq(SeqExpr& expr, Args... args) override
{
for (const auto& e : expr.elements)
{
this->visit_expr(*e, args...);
}
this->visit_expr(*expr.last, args...);
}
void visit_call(CallExpr& expr, Args... args) override
{
this->visit_expr(*expr.receiver, args...);
for (const auto& arg : expr.arguments)
{
this->visit_expr(*arg->inner, args...);
}
}
void visit_when(WhenExpr& expr, Args... args) override
{
for (const auto& cown : expr.cowns)
{
if (auto argument = dynamic_cast<WhenArgumentAs*>(cown.get()))
{
this->visit_expr(*argument->inner, args...);
}
}
this->visit_expr(*expr.body, args...);
}
void visit_while(WhileExpr& expr, Args... args) override
{
this->visit_expr(*expr.condition, args...);
this->visit_expr(*expr.body, args...);
}
void visit_if(IfExpr& expr, Args... args) override
{
this->visit_expr(*expr.condition, args...);
this->visit_expr(*expr.then_block, args...);
if (expr.else_block)
{
this->visit_expr(*expr.else_block->body, args...);
}
}
void visit_block(BlockExpr& expr, Args... args) override
{
this->visit_expr(*expr.inner, args...);
}
void visit_define_local(DefineLocalExpr& expr, Args... args) override
{
if (expr.right)
{
this->visit_expr(*expr.right, args...);
}
}
void visit_match_expr(MatchExpr& expr, Args... args) override
{
this->visit_expr(*expr.expr, args...);
for (auto& arm : expr.arms)
{
this->visit_expr(*arm->expr, args...);
}
}
void visit_view_expr(ViewExpr& expr, Args... args) override
{
this->visit_expr(*expr.expr, args...);
}
void visit_new_expr(NewExpr& expr, Args... args) override {}
void visit_symbol(SymbolExpr& expr, Args... args) override {}
void visit_empty(EmptyExpr& expr, Args... args) override {}
void
visit_integer_literal_expr(IntegerLiteralExpr& expr, Args... args) override
{}
void
visit_string_literal_expr(StringLiteralExpr& expr, Args... args) override
{}
void
visit_binary_operator_expr(BinaryOperatorExpr& expr, Args... args) override
{
this->visit_expr(*expr.left, args...);
this->visit_expr(*expr.right, args...);
}
};
}
| 1,364 |
337 | class B extends A {
}
interface Y extends X {
}
interface U extends Z {
} | 28 |
715 | <reponame>iabhimanyu/Algorithms
import java.util.*;
import java.io.*;
class Factorial {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
int n = sc.nextInt();
System.out.println("the factorial of this number is: " + factorial(n));
}
//a recursive function to calculate n!
public int factorial(int n){
if(n==0)
return 1;
else
return n*factorial(n-1);
}
}
| 159 |
2,151 | <filename>jre_emul/android/platform/libcore/ojluni/src/main/java/java/nio/ByteBuffer.java
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
// -- This file was mechanically generated: Do not edit! -- //
package java.nio;
import libcore.io.Memory;
/**
* A byte buffer.
*
* <p> This class defines six categories of operations upon
* byte buffers:
*
* <ul>
*
* <li><p> Absolute and relative {@link #get() </code><i>get</i><code>} and
* {@link #put(byte) </code><i>put</i><code>} methods that read and write
* single bytes; </p></li>
*
* <li><p> Relative {@link #get(byte[]) </code><i>bulk get</i><code>}
* methods that transfer contiguous sequences of bytes from this buffer
* into an array; </p></li>
*
* <li><p> Relative {@link #put(byte[]) </code><i>bulk put</i><code>}
* methods that transfer contiguous sequences of bytes from a
* byte array or some other byte
* buffer into this buffer; </p></li>
*
* <li><p> Absolute and relative {@link #getChar() </code><i>get</i><code>}
* and {@link #putChar(char) </code><i>put</i><code>} methods that read and
* write values of other primitive types, translating them to and from
* sequences of bytes in a particular byte order; </p></li>
*
* <li><p> Methods for creating <i><a href="#views">view buffers</a></i>,
* which allow a byte buffer to be viewed as a buffer containing values of
* some other primitive type; and </p></li>
*
*
*
* <li><p> Methods for {@link #compact </code>compacting<code>}, {@link
* #duplicate </code>duplicating<code>}, and {@link #slice
* </code>slicing<code>} a byte buffer. </p></li>
*
* </ul>
*
* <p> Byte buffers can be created either by {@link #allocate
* </code><i>allocation</i><code>}, which allocates space for the buffer's
* content, or by {@link #wrap(byte[]) </code><i>wrapping</i><code>} an
* existing byte array into a buffer.
*
* <a name="direct">
* <h4> Direct <i>vs.</i> non-direct buffers </h4>
*
* <p> A byte buffer is either <i>direct</i> or <i>non-direct</i>. Given a
* direct byte buffer, the Java virtual machine will make a best effort to
* perform native I/O operations directly upon it. That is, it will attempt to
* avoid copying the buffer's content to (or from) an intermediate buffer
* before (or after) each invocation of one of the underlying operating
* system's native I/O operations.
*
* <p> A direct byte buffer may be created by invoking the {@link
* #allocateDirect(int) allocateDirect} factory method of this class. The
* buffers returned by this method typically have somewhat higher allocation
* and deallocation costs than non-direct buffers. The contents of direct
* buffers may reside outside of the normal garbage-collected heap, and so
* their impact upon the memory footprint of an application might not be
* obvious. It is therefore recommended that direct buffers be allocated
* primarily for large, long-lived buffers that are subject to the underlying
* system's native I/O operations. In general it is best to allocate direct
* buffers only when they yield a measureable gain in program performance.
*
* <p> A direct byte buffer may also be created by {@link
* java.nio.channels.FileChannel#map </code>mapping<code>} a region of a file
* directly into memory. An implementation of the Java platform may optionally
* support the creation of direct byte buffers from native code via JNI. If an
* instance of one of these kinds of buffers refers to an inaccessible region
* of memory then an attempt to access that region will not change the buffer's
* content and will cause an unspecified exception to be thrown either at the
* time of the access or at some later time.
*
* <p> Whether a byte buffer is direct or non-direct may be determined by
* invoking its {@link #isDirect isDirect} method. This method is provided so
* that explicit buffer management can be done in performance-critical code.
*
* <a name="bin">
* <h4> Access to binary data </h4>
*
* <p> This class defines methods for reading and writing values of all other
* primitive types, except <tt>boolean</tt>. Primitive values are translated
* to (or from) sequences of bytes according to the buffer's current byte
* order, which may be retrieved and modified via the {@link #order order}
* methods. Specific byte orders are represented by instances of the {@link
* ByteOrder} class. The initial order of a byte buffer is always {@link
* ByteOrder#BIG_ENDIAN BIG_ENDIAN}.
*
* <p> For access to heterogeneous binary data, that is, sequences of values of
* different types, this class defines a family of absolute and relative
* <i>get</i> and <i>put</i> methods for each type. For 32-bit floating-point
* values, for example, this class defines:
*
* <blockquote><pre>
* float {@link #getFloat()}
* float {@link #getFloat(int) getFloat(int index)}
* void {@link #putFloat(float) putFloat(float f)}
* void {@link #putFloat(int, float) putFloat(int index, float f)}</pre></blockquote>
*
* <p> Corresponding methods are defined for the types <tt>char</tt>,
* <tt>short</tt>, <tt>int</tt>, <tt>long</tt>, and <tt>double</tt>. The index
* parameters of the absolute <i>get</i> and <i>put</i> methods are in terms of
* bytes rather than of the type being read or written.
*
* <a name="views">
*
* <p> For access to homogeneous binary data, that is, sequences of values of
* the same type, this class defines methods that can create <i>views</i> of a
* given byte buffer. A <i>view buffer</i> is simply another buffer whose
* content is backed by the byte buffer. Changes to the byte buffer's content
* will be visible in the view buffer, and vice versa; the two buffers'
* position, limit, and mark values are independent. The {@link
* #asFloatBuffer() asFloatBuffer} method, for example, creates an instance of
* the {@link FloatBuffer} class that is backed by the byte buffer upon which
* the method is invoked. Corresponding view-creation methods are defined for
* the types <tt>char</tt>, <tt>short</tt>, <tt>int</tt>, <tt>long</tt>, and
* <tt>double</tt>.
*
* <p> View buffers have three important advantages over the families of
* type-specific <i>get</i> and <i>put</i> methods described above:
*
* <ul>
*
* <li><p> A view buffer is indexed not in terms of bytes but rather in terms
* of the type-specific size of its values; </p></li>
*
* <li><p> A view buffer provides relative bulk <i>get</i> and <i>put</i>
* methods that can transfer contiguous sequences of values between a buffer
* and an array or some other buffer of the same type; and </p></li>
*
* <li><p> A view buffer is potentially much more efficient because it will
* be direct if, and only if, its backing byte buffer is direct. </p></li>
*
* </ul>
*
* <p> The byte order of a view buffer is fixed to be that of its byte buffer
* at the time that the view is created. </p>
*
* <h4> Invocation chaining </h4>
*
* <p> Methods in this class that do not otherwise have a value to return are
* specified to return the buffer upon which they are invoked. This allows
* method invocations to be chained.
*
* The sequence of statements
*
* <blockquote><pre>
* bb.putInt(0xCAFEBABE);
* bb.putShort(3);
* bb.putShort(45);</pre></blockquote>
*
* can, for example, be replaced by the single statement
*
* <blockquote><pre>
* bb.putInt(0xCAFEBABE).putShort(3).putShort(45);</pre></blockquote>
*
* @author <NAME>
* @author JSR-51 Expert Group
* @since 1.4
*/
public abstract class ByteBuffer
extends Buffer
implements Comparable<ByteBuffer> {
// These fields are declared here rather than in Heap-X-Buffer in order to
// reduce the number of virtual method invocations needed to access these
// values, which is especially costly when coding small buffers.
//
final byte[] hb; // Non-null only for heap buffers
final int offset;
boolean isReadOnly; // Valid only for heap buffers
// Creates a new buffer with the given mark, position, limit, capacity,
// backing array, and array offset
//
ByteBuffer(int mark, int pos, int lim, int cap, // package-private
byte[] hb, int offset) {
super(mark, pos, lim, cap, 0);
this.hb = hb;
this.offset = offset;
}
// Creates a new buffer with the given mark, position, limit, and capacity
//
ByteBuffer(int mark, int pos, int lim, int cap) { // package-private
this(mark, pos, lim, cap, null, 0);
}
/**
* Allocates a new direct byte buffer.
*
* <p> The new buffer's position will be zero, its limit will be its
* capacity, its mark will be undefined, and each of its elements will be
* initialized to zero. Whether or not it has a
* {@link #hasArray </code>backing array<code>} is unspecified.
*
* @param capacity The new buffer's capacity, in bytes
* @return The new byte buffer
* @throws IllegalArgumentException If the <tt>capacity</tt> is a negative integer
*/
public static ByteBuffer allocateDirect(int capacity) {
if (capacity < 0) {
throw new IllegalArgumentException("capacity < 0: " + capacity);
}
DirectByteBuffer.MemoryRef memoryRef = new DirectByteBuffer.MemoryRef(capacity);
return new DirectByteBuffer(capacity, memoryRef);
}
/**
* Allocates a new byte buffer.
*
* <p> The new buffer's position will be zero, its limit will be its
* capacity, its mark will be undefined, and each of its elements will be
* initialized to zero. It will have a {@link #array
* </code>backing array<code>}, and its {@link #arrayOffset </code>array
* offset<code>} will be zero.
*
* @param capacity The new buffer's capacity, in bytes
* @return The new byte buffer
* @throws IllegalArgumentException If the <tt>capacity</tt> is a negative integer
*/
public static ByteBuffer allocate(int capacity) {
if (capacity < 0)
throw new IllegalArgumentException();
return new HeapByteBuffer(capacity, capacity);
}
/**
* Wraps a byte array into a buffer.
*
* <p> The new buffer will be backed by the given byte array;
* that is, modifications to the buffer will cause the array to be modified
* and vice versa. The new buffer's capacity will be
* <tt>array.length</tt>, its position will be <tt>offset</tt>, its limit
* will be <tt>offset + length</tt>, and its mark will be undefined. Its
* {@link #array </code>backing array<code>} will be the given array, and
* its {@link #arrayOffset </code>array offset<code>} will be zero. </p>
*
* @param array The array that will back the new buffer
* @param offset The offset of the subarray to be used; must be non-negative and
* no larger than <tt>array.length</tt>. The new buffer's position
* will be set to this value.
* @param length The length of the subarray to be used;
* must be non-negative and no larger than
* <tt>array.length - offset</tt>.
* The new buffer's limit will be set to <tt>offset + length</tt>.
* @return The new byte buffer
* @throws IndexOutOfBoundsException If the preconditions on the <tt>offset</tt> and
* <tt>length</tt>
* parameters do not hold
*/
public static ByteBuffer wrap(byte[] array,
int offset, int length) {
try {
return new HeapByteBuffer(array, offset, length);
} catch (IllegalArgumentException x) {
throw new IndexOutOfBoundsException();
}
}
/**
* Wraps a byte array into a buffer.
*
* <p> The new buffer will be backed by the given byte array;
* that is, modifications to the buffer will cause the array to be modified
* and vice versa. The new buffer's capacity and limit will be
* <tt>array.length</tt>, its position will be zero, and its mark will be
* undefined. Its {@link #array </code>backing array<code>} will be the
* given array, and its {@link #arrayOffset </code>array offset<code>} will
* be zero. </p>
*
* @param array The array that will back this buffer
* @return The new byte buffer
*/
public static ByteBuffer wrap(byte[] array) {
return wrap(array, 0, array.length);
}
/**
* Creates a new byte buffer whose content is a shared subsequence of
* this buffer's content.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer, and its mark
* will be undefined. The new buffer will be direct if, and only if, this
* buffer is direct, and it will be read-only if, and only if, this buffer
* is read-only. </p>
*
* @return The new byte buffer
*/
public abstract ByteBuffer slice();
/**
* Creates a new byte buffer that shares this buffer's content.
*
* <p> The content of the new buffer will be that of this buffer. Changes
* to this buffer's content will be visible in the new buffer, and vice
* versa; the two buffers' position, limit, and mark values will be
* independent.
*
* <p> The new buffer's capacity, limit, position, and mark values will be
* identical to those of this buffer. The new buffer will be direct if,
* and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return The new byte buffer
*/
public abstract ByteBuffer duplicate();
/**
* Creates a new, read-only byte buffer that shares this buffer's
* content.
*
* <p> The content of the new buffer will be that of this buffer. Changes
* to this buffer's content will be visible in the new buffer; the new
* buffer itself, however, will be read-only and will not allow the shared
* content to be modified. The two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's capacity, limit, position, and mark values will be
* identical to those of this buffer.
*
* <p> If this buffer is itself read-only then this method behaves in
* exactly the same way as the {@link #duplicate duplicate} method. </p>
*
* @return The new, read-only byte buffer
*/
public abstract ByteBuffer asReadOnlyBuffer();
// -- Singleton get/put methods --
/**
* Relative <i>get</i> method. Reads the byte at this buffer's
* current position, and then increments the position. </p>
*
* @return The byte at the buffer's current position
* @throws BufferUnderflowException If the buffer's current position is not smaller than its
* limit
*/
public abstract byte get();
/**
* Relative <i>put</i> method <i>(optional operation)</i>.
*
* <p> Writes the given byte into this buffer at the current
* position, and then increments the position. </p>
*
* @param b The byte to be written
* @return This buffer
* @throws BufferOverflowException If this buffer's current position is not smaller than its
* limit
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer put(byte b);
/**
* Absolute <i>get</i> method. Reads the byte at the given
* index. </p>
*
* @param index The index from which the byte will be read
* @return The byte at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit
*/
public abstract byte get(int index);
/**
* Absolute <i>put</i> method <i>(optional operation)</i>.
*
* <p> Writes the given byte into this buffer at the given
* index. </p>
*
* @param index The index at which the byte will be written
* @param b The byte value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer put(int index, byte b);
// -- Bulk get operations --
/**
* Relative bulk <i>get</i> method.
*
* <p> This method transfers bytes from this buffer into the given
* destination array. If there are fewer bytes remaining in the
* buffer than are required to satisfy the request, that is, if
* <tt>length</tt> <tt>></tt> <tt>remaining()</tt>, then no
* bytes are transferred and a {@link BufferUnderflowException} is
* thrown.
*
* <p> Otherwise, this method copies <tt>length</tt> bytes from this
* buffer into the given array, starting at the current position of this
* buffer and at the given offset in the array. The position of this
* buffer is then incremented by <tt>length</tt>.
*
* <p> In other words, an invocation of this method of the form
* <tt>src.get(dst, off, len)</tt> has exactly the same effect as
* the loop
*
* <pre>
* for (int i = off; i < off + len; i++)
* dst[i] = src.get(); </pre>
*
* except that it first checks that there are sufficient bytes in
* this buffer and it is potentially much more efficient. </p>
*
* @param dst The array into which bytes are to be written
* @param offset The offset within the array of the first byte to be
* written; must be non-negative and no larger than
* <tt>dst.length</tt>
* @param length The maximum number of bytes to be written to the given
* array; must be non-negative and no larger than
* <tt>dst.length - offset</tt>
* @return This buffer
* @throws BufferUnderflowException If there are fewer than <tt>length</tt> bytes
* remaining in this buffer
* @throws IndexOutOfBoundsException If the preconditions on the <tt>offset</tt> and
* <tt>length</tt>
* parameters do not hold
*/
public ByteBuffer get(byte[] dst, int offset, int length) {
checkBounds(offset, length, dst.length);
if (length > remaining())
throw new BufferUnderflowException();
int end = offset + length;
for (int i = offset; i < end; i++)
dst[i] = get();
return this;
}
/**
* Relative bulk <i>get</i> method.
*
* <p> This method transfers bytes from this buffer into the given
* destination array. An invocation of this method of the form
* <tt>src.get(a)</tt> behaves in exactly the same way as the invocation
*
* <pre>
* src.get(a, 0, a.length) </pre>
*
* @return This buffer
* @throws BufferUnderflowException If there are fewer than <tt>length</tt> bytes
* remaining in this buffer
*/
public ByteBuffer get(byte[] dst) {
return get(dst, 0, dst.length);
}
// -- Bulk put operations --
/**
* Relative bulk <i>put</i> method <i>(optional operation)</i>.
*
* <p> This method transfers the bytes remaining in the given source
* buffer into this buffer. If there are more bytes remaining in the
* source buffer than in this buffer, that is, if
* <tt>src.remaining()</tt> <tt>></tt> <tt>remaining()</tt>,
* then no bytes are transferred and a {@link
* BufferOverflowException} is thrown.
*
* <p> Otherwise, this method copies
* <i>n</i> = <tt>src.remaining()</tt> bytes from the given
* buffer into this buffer, starting at each buffer's current position.
* The positions of both buffers are then incremented by <i>n</i>.
*
* <p> In other words, an invocation of this method of the form
* <tt>dst.put(src)</tt> has exactly the same effect as the loop
*
* <pre>
* while (src.hasRemaining())
* dst.put(src.get()); </pre>
*
* except that it first checks that there is sufficient space in this
* buffer and it is potentially much more efficient. </p>
*
* @param src The source buffer from which bytes are to be read;
* must not be this buffer
* @return This buffer
* @throws BufferOverflowException If there is insufficient space in this buffer
* for the remaining bytes in the source buffer
* @throws IllegalArgumentException If the source buffer is this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public ByteBuffer put(ByteBuffer src) {
if (!isAccessible()) {
throw new IllegalStateException("buffer is inaccessible");
}
if (isReadOnly) {
throw new ReadOnlyBufferException();
}
if (src == this) {
throw new IllegalArgumentException();
}
int n = src.remaining();
if (n > remaining()) {
throw new BufferOverflowException();
}
// Note that we use offset instead of arrayOffset because arrayOffset is specified to
// throw for read only buffers. Our use of arrayOffset here is provably safe, we only
// use it to read *from* readOnly buffers.
if (this.hb != null && src.hb != null) {
// System.arraycopy is intrinsified by art and therefore tiny bit faster than memmove
System.arraycopy(src.hb, src.position() + src.offset, hb, position() + offset, n);
} else {
// Use the buffer object (and the raw memory address) if it's a direct buffer. Note that
// isDirect() doesn't imply !hasArray(), ByteBuffer.allocateDirect allocated buffer will
// have a backing, non-gc-movable byte array. JNI allocated direct byte buffers WILL NOT
// have a backing array.
final Object srcObject = src.isDirect() ? src : src.array();
int srcOffset = src.position();
if (!src.isDirect()) {
srcOffset += src.offset;
}
final ByteBuffer dst = this;
final Object dstObject = dst.isDirect() ? dst : dst.array();
int dstOffset = dst.position();
if (!dst.isDirect()) {
dstOffset += dst.offset;
}
Memory.memmove(dstObject, dstOffset, srcObject, srcOffset, n);
}
src.position(src.limit());
this.position(this.position() + n);
return this;
}
/**
* Relative bulk <i>put</i> method <i>(optional operation)</i>.
*
* <p> This method transfers bytes into this buffer from the given
* source array. If there are more bytes to be copied from the array
* than remain in this buffer, that is, if
* <tt>length</tt> <tt>></tt> <tt>remaining()</tt>, then no
* bytes are transferred and a {@link BufferOverflowException} is
* thrown.
*
* <p> Otherwise, this method copies <tt>length</tt> bytes from the
* given array into this buffer, starting at the given offset in the array
* and at the current position of this buffer. The position of this buffer
* is then incremented by <tt>length</tt>.
*
* <p> In other words, an invocation of this method of the form
* <tt>dst.put(src, off, len)</tt> has exactly the same effect as
* the loop
*
* <pre>
* for (int i = off; i < off + len; i++)
* dst.put(a[i]); </pre>
*
* except that it first checks that there is sufficient space in this
* buffer and it is potentially much more efficient. </p>
*
* @param src The array from which bytes are to be read
* @param offset The offset within the array of the first byte to be read;
* must be non-negative and no larger than <tt>array.length</tt>
* @param length The number of bytes to be read from the given array;
* must be non-negative and no larger than
* <tt>array.length - offset</tt>
* @return This buffer
* @throws BufferOverflowException If there is insufficient space in this buffer
* @throws IndexOutOfBoundsException If the preconditions on the <tt>offset</tt> and
* <tt>length</tt>
* parameters do not hold
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public ByteBuffer put(byte[] src, int offset, int length) {
checkBounds(offset, length, src.length);
if (length > remaining())
throw new BufferOverflowException();
int end = offset + length;
for (int i = offset; i < end; i++)
this.put(src[i]);
return this;
}
/**
* Relative bulk <i>put</i> method <i>(optional operation)</i>.
*
* <p> This method transfers the entire content of the given source
* byte array into this buffer. An invocation of this method of the
* form <tt>dst.put(a)</tt> behaves in exactly the same way as the
* invocation
*
* <pre>
* dst.put(a, 0, a.length) </pre>
*
* @return This buffer
* @throws BufferOverflowException If there is insufficient space in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public final ByteBuffer put(byte[] src) {
return put(src, 0, src.length);
}
// -- Other stuff --
/**
* Tells whether or not this buffer is backed by an accessible byte
* array.
*
* <p> If this method returns <tt>true</tt> then the {@link #array() array}
* and {@link #arrayOffset() arrayOffset} methods may safely be invoked.
* </p>
*
* @return <tt>true</tt> if, and only if, this buffer
* is backed by an array and is not read-only
*/
public final boolean hasArray() {
return (hb != null) && !isReadOnly();
}
/**
* Returns the byte array that backs this
* buffer <i>(optional operation)</i>.
*
* <p> Modifications to this buffer's content will cause the returned
* array's content to be modified, and vice versa.
*
* <p> Invoke the {@link #hasArray hasArray} method before invoking this
* method in order to ensure that this buffer has an accessible backing
* array. </p>
*
* @return The array that backs this buffer
* @throws ReadOnlyBufferException If this buffer is backed by an array but is read-only
* @throws UnsupportedOperationException If this buffer is not backed by an accessible array
*/
public final byte[] array() {
if (hb == null)
throw new UnsupportedOperationException();
if (isReadOnly)
throw new ReadOnlyBufferException();
return hb;
}
/**
* Returns the offset within this buffer's backing array of the first
* element of the buffer <i>(optional operation)</i>.
*
* <p> If this buffer is backed by an array then buffer position <i>p</i>
* corresponds to array index <i>p</i> + <tt>arrayOffset()</tt>.
*
* <p> Invoke the {@link #hasArray hasArray} method before invoking this
* method in order to ensure that this buffer has an accessible backing
* array. </p>
*
* @return The offset within this buffer's array
* of the first element of the buffer
* @throws ReadOnlyBufferException If this buffer is backed by an array but is read-only
* @throws UnsupportedOperationException If this buffer is not backed by an accessible array
*/
public final int arrayOffset() {
if (hb == null)
throw new UnsupportedOperationException();
if (isReadOnly)
throw new ReadOnlyBufferException();
return offset;
}
/**
* Compacts this buffer <i>(optional operation)</i>.
*
* <p> The bytes between the buffer's current position and its limit,
* if any, are copied to the beginning of the buffer. That is, the
* byte at index <i>p</i> = <tt>position()</tt> is copied
* to index zero, the byte at index <i>p</i> + 1 is copied
* to index one, and so forth until the byte at index
* <tt>limit()</tt> - 1 is copied to index
* <i>n</i> = <tt>limit()</tt> - <tt>1</tt> - <i>p</i>.
* The buffer's position is then set to <i>n+1</i> and its limit is set to
* its capacity. The mark, if defined, is discarded.
*
* <p> The buffer's position is set to the number of bytes copied,
* rather than to zero, so that an invocation of this method can be
* followed immediately by an invocation of another relative <i>put</i>
* method. </p>
*
*
*
* <p> Invoke this method after writing data from a buffer in case the
* write was incomplete. The following loop, for example, copies bytes
* from one channel to another via the buffer <tt>buf</tt>:
*
* <blockquote><pre>
* buf.clear(); // Prepare buffer for use
* while (in.read(buf) >= 0 || buf.position != 0) {
* buf.flip();
* out.write(buf);
* buf.compact(); // In case of partial write
* }</pre></blockquote>
*
* @return This buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer compact();
/**
* Tells whether or not this byte buffer is direct. </p>
*
* @return <tt>true</tt> if, and only if, this buffer is direct
*/
public abstract boolean isDirect();
/**
* Returns a string summarizing the state of this buffer. </p>
*
* @return A summary string
*/
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append(getClass().getName());
sb.append("[pos=");
sb.append(position());
sb.append(" lim=");
sb.append(limit());
sb.append(" cap=");
sb.append(capacity());
sb.append("]");
return sb.toString();
}
/**
* Returns the current hash code of this buffer.
*
* <p> The hash code of a byte buffer depends only upon its remaining
* elements; that is, upon the elements from <tt>position()</tt> up to, and
* including, the element at <tt>limit()</tt> - <tt>1</tt>.
*
* <p> Because buffer hash codes are content-dependent, it is inadvisable
* to use buffers as keys in hash maps or similar data structures unless it
* is known that their contents will not change. </p>
*
* @return The current hash code of this buffer
*/
public int hashCode() {
int h = 1;
int p = position();
for (int i = limit() - 1; i >= p; i--)
h = 31 * h + (int) get(i);
return h;
}
/**
* Tells whether or not this buffer is equal to another object.
*
* <p> Two byte buffers are equal if, and only if,
*
* <p><ol>
*
* <li><p> They have the same element type, </p></li>
*
* <li><p> They have the same number of remaining elements, and
* </p></li>
*
* <li><p> The two sequences of remaining elements, considered
* independently of their starting positions, are pointwise equal.
*
*
*
*
*
*
*
* </p></li>
*
* </ol>
*
* <p> A byte buffer is not equal to any other type of object. </p>
*
* @param ob The object to which this buffer is to be compared
* @return <tt>true</tt> if, and only if, this buffer is equal to the
* given object
*/
public boolean equals(Object ob) {
if (this == ob)
return true;
if (!(ob instanceof ByteBuffer))
return false;
ByteBuffer that = (ByteBuffer) ob;
if (this.remaining() != that.remaining())
return false;
int p = this.position();
for (int i = this.limit() - 1, j = that.limit() - 1; i >= p; i--, j--)
if (!equals(this.get(i), that.get(j)))
return false;
return true;
}
private static boolean equals(byte x, byte y) {
return x == y;
}
/**
* Compares this buffer to another.
*
* <p> Two byte buffers are compared by comparing their sequences of
* remaining elements lexicographically, without regard to the starting
* position of each sequence within its corresponding buffer.
*
*
*
*
*
*
*
*
* Pairs of {@code byte} elements are compared as if by invoking
* {@link Byte#compare(byte, byte)}.
*
*
* <p> A byte buffer is not comparable to any other type of object.
*
* @return A negative integer, zero, or a positive integer as this buffer
* is less than, equal to, or greater than the given buffer
*/
public int compareTo(ByteBuffer that) {
int n = this.position() + Math.min(this.remaining(), that.remaining());
for (int i = this.position(), j = that.position(); i < n; i++, j++) {
int cmp = compare(this.get(i), that.get(j));
if (cmp != 0)
return cmp;
}
return this.remaining() - that.remaining();
}
private static int compare(byte x, byte y) {
return Byte.compare(x, y);
}
// -- Other char stuff --
// -- Other byte stuff: Access to binary data --
boolean bigEndian // package-private
= true;
boolean nativeByteOrder // package-private
= (Bits.byteOrder() == ByteOrder.BIG_ENDIAN);
/**
* Retrieves this buffer's byte order.
*
* <p> The byte order is used when reading or writing multibyte values, and
* when creating buffers that are views of this byte buffer. The order of
* a newly-created byte buffer is always {@link ByteOrder#BIG_ENDIAN
* BIG_ENDIAN}. </p>
*
* @return This buffer's byte order
*/
public final ByteOrder order() {
return bigEndian ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
}
/**
* Modifies this buffer's byte order. </p>
*
* @param bo The new byte order,
* either {@link ByteOrder#BIG_ENDIAN BIG_ENDIAN}
* or {@link ByteOrder#LITTLE_ENDIAN LITTLE_ENDIAN}
* @return This buffer
*/
public final ByteBuffer order(ByteOrder bo) {
bigEndian = (bo == ByteOrder.BIG_ENDIAN);
nativeByteOrder =
(bigEndian == (Bits.byteOrder() == ByteOrder.BIG_ENDIAN));
return this;
}
// Unchecked accessors, for use by ByteBufferAs-X-Buffer classes
//
abstract byte _get(int i); // package-private
abstract void _put(int i, byte b); // package-private
/**
* Relative <i>get</i> method for reading a char value.
*
* <p> Reads the next two bytes at this buffer's current position,
* composing them into a char value according to the current byte order,
* and then increments the position by two. </p>
*
* @return The char value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than two bytes
* remaining in this buffer
*/
public abstract char getChar();
/**
* Relative <i>put</i> method for writing a char
* value <i>(optional operation)</i>.
*
* <p> Writes two bytes containing the given char value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by two. </p>
*
* @param value The char value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than two bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putChar(char value);
/**
* Absolute <i>get</i> method for reading a char value.
*
* <p> Reads two bytes at the given index, composing them into a
* char value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The char value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus one
*/
public abstract char getChar(int index);
char getCharUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, char[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing a char
* value <i>(optional operation)</i>.
*
* <p> Writes two bytes containing the given char value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The char value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus one
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putChar(int index, char value);
void putCharUnchecked(int index, char value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, char[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as a char buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* two, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new char buffer
*/
public abstract CharBuffer asCharBuffer();
/**
* Relative <i>get</i> method for reading a short value.
*
* <p> Reads the next two bytes at this buffer's current position,
* composing them into a short value according to the current byte order,
* and then increments the position by two. </p>
*
* @return The short value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than two bytes
* remaining in this buffer
*/
public abstract short getShort();
/**
* Relative <i>put</i> method for writing a short
* value <i>(optional operation)</i>.
*
* <p> Writes two bytes containing the given short value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by two. </p>
*
* @param value The short value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than two bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putShort(short value);
/**
* Absolute <i>get</i> method for reading a short value.
*
* <p> Reads two bytes at the given index, composing them into a
* short value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The short value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus one
*/
public abstract short getShort(int index);
short getShortUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, short[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing a short
* value <i>(optional operation)</i>.
*
* <p> Writes two bytes containing the given short value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The short value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus one
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putShort(int index, short value);
void putShortUnchecked(int index, short value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, short[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as a short buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* two, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new short buffer
*/
public abstract ShortBuffer asShortBuffer();
/**
* Relative <i>get</i> method for reading an int value.
*
* <p> Reads the next four bytes at this buffer's current position,
* composing them into an int value according to the current byte order,
* and then increments the position by four. </p>
*
* @return The int value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than four bytes
* remaining in this buffer
*/
public abstract int getInt();
/**
* Relative <i>put</i> method for writing an int
* value <i>(optional operation)</i>.
*
* <p> Writes four bytes containing the given int value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by four. </p>
*
* @param value The int value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than four bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putInt(int value);
/**
* Absolute <i>get</i> method for reading an int value.
*
* <p> Reads four bytes at the given index, composing them into a
* int value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The int value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus three
*/
public abstract int getInt(int index);
int getIntUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, int[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing an int
* value <i>(optional operation)</i>.
*
* <p> Writes four bytes containing the given int value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The int value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus three
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putInt(int index, int value);
void putIntUnchecked(int index, int value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, int[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as an int buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* four, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new int buffer
*/
public abstract IntBuffer asIntBuffer();
/**
* Relative <i>get</i> method for reading a long value.
*
* <p> Reads the next eight bytes at this buffer's current position,
* composing them into a long value according to the current byte order,
* and then increments the position by eight. </p>
*
* @return The long value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than eight bytes
* remaining in this buffer
*/
public abstract long getLong();
/**
* Relative <i>put</i> method for writing a long
* value <i>(optional operation)</i>.
*
* <p> Writes eight bytes containing the given long value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by eight. </p>
*
* @param value The long value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than eight bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putLong(long value);
/**
* Absolute <i>get</i> method for reading a long value.
*
* <p> Reads eight bytes at the given index, composing them into a
* long value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The long value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus seven
*/
public abstract long getLong(int index);
long getLongUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, long[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing a long
* value <i>(optional operation)</i>.
*
* <p> Writes eight bytes containing the given long value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The long value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus seven
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putLong(int index, long value);
void putLongUnchecked(int index, long value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, long[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as a long buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* eight, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new long buffer
*/
public abstract LongBuffer asLongBuffer();
/**
* Relative <i>get</i> method for reading a float value.
*
* <p> Reads the next four bytes at this buffer's current position,
* composing them into a float value according to the current byte order,
* and then increments the position by four. </p>
*
* @return The float value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than four bytes
* remaining in this buffer
*/
public abstract float getFloat();
/**
* Relative <i>put</i> method for writing a float
* value <i>(optional operation)</i>.
*
* <p> Writes four bytes containing the given float value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by four. </p>
*
* @param value The float value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than four bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putFloat(float value);
/**
* Absolute <i>get</i> method for reading a float value.
*
* <p> Reads four bytes at the given index, composing them into a
* float value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The float value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus three
*/
public abstract float getFloat(int index);
float getFloatUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, float[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing a float
* value <i>(optional operation)</i>.
*
* <p> Writes four bytes containing the given float value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The float value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus three
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putFloat(int index, float value);
void putFloatUnchecked(int index, float value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, float[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as a float buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* four, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new float buffer
*/
public abstract FloatBuffer asFloatBuffer();
/**
* Relative <i>get</i> method for reading a double value.
*
* <p> Reads the next eight bytes at this buffer's current position,
* composing them into a double value according to the current byte order,
* and then increments the position by eight. </p>
*
* @return The double value at the buffer's current position
* @throws BufferUnderflowException If there are fewer than eight bytes
* remaining in this buffer
*/
public abstract double getDouble();
/**
* Relative <i>put</i> method for writing a double
* value <i>(optional operation)</i>.
*
* <p> Writes eight bytes containing the given double value, in the
* current byte order, into this buffer at the current position, and then
* increments the position by eight. </p>
*
* @param value The double value to be written
* @return This buffer
* @throws BufferOverflowException If there are fewer than eight bytes
* remaining in this buffer
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putDouble(double value);
/**
* Absolute <i>get</i> method for reading a double value.
*
* <p> Reads eight bytes at the given index, composing them into a
* double value according to the current byte order. </p>
*
* @param index The index from which the bytes will be read
* @return The double value at the given index
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus seven
*/
public abstract double getDouble(int index);
double getDoubleUnchecked(int index) {
throw new UnsupportedOperationException();
}
void getUnchecked(int pos, double[] dst, int dstOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Absolute <i>put</i> method for writing a double
* value <i>(optional operation)</i>.
*
* <p> Writes eight bytes containing the given double value, in the
* current byte order, into this buffer at the given index. </p>
*
* @param index The index at which the bytes will be written
* @param value The double value to be written
* @return This buffer
* @throws IndexOutOfBoundsException If <tt>index</tt> is negative
* or not smaller than the buffer's limit,
* minus seven
* @throws ReadOnlyBufferException If this buffer is read-only
*/
public abstract ByteBuffer putDouble(int index, double value);
void putDoubleUnchecked(int index, double value) {
throw new UnsupportedOperationException();
}
void putUnchecked(int pos, double[] dst, int srcOffset, int length) {
throw new UnsupportedOperationException();
}
/**
* Creates a view of this byte buffer as a double buffer.
*
* <p> The content of the new buffer will start at this buffer's current
* position. Changes to this buffer's content will be visible in the new
* buffer, and vice versa; the two buffers' position, limit, and mark
* values will be independent.
*
* <p> The new buffer's position will be zero, its capacity and its limit
* will be the number of bytes remaining in this buffer divided by
* eight, and its mark will be undefined. The new buffer will be direct
* if, and only if, this buffer is direct, and it will be read-only if, and
* only if, this buffer is read-only. </p>
*
* @return A new double buffer
*/
public abstract DoubleBuffer asDoubleBuffer();
/**
* @hide
*/
public boolean isAccessible() {
return true;
}
/**
* @hide
*/
public void setAccessible(boolean value) {
throw new UnsupportedOperationException();
}
}
| 22,317 |
1,071 | package com.oracle.springapp.dao.impl;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.support.JdbcDaoSupport;
import org.springframework.stereotype.Repository;
import com.oracle.springapp.dao.AllTablesDAO;
import com.oracle.springapp.model.AllTables;
/**
* Simple Java class which uses Spring's JdbcDaoSupport class to implement
* business logic.
*
*/
@Repository
public class AllTablesDAOImpl extends JdbcDaoSupport implements AllTablesDAO {
@Autowired
private DataSource dataSource;
@PostConstruct
public void initialize() {
setDataSource(dataSource);
System.out.println("Datasource used: " + dataSource);
}
@Override
public List<AllTables> getTableNames() {
final String sql = "SELECT owner, table_name, status, num_rows FROM all_tables where rownum < 20";
return getJdbcTemplate().query(sql,
(rs, rowNum) -> new AllTables(rs.getString("owner"),
rs.getString("table_name"),
rs.getString("status"),
rs.getInt("num_rows")
));
}
}
| 410 |
1,219 | <gh_stars>1000+
/*
* Copyright 2013 Bazaarvoice, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bazaarvoice.jolt;
import com.beust.jcommander.internal.Sets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class JsonUtilsTest {
private Diffy diffy = new Diffy();
private Map ab = ImmutableMap.builder().put( "a", "b" ).build();
private Map cd = ImmutableMap.builder().put( "c", "d" ).build();
private Map top = ImmutableMap.builder().put( "A", ab ).put( "B", cd ).build();
private String jsonSourceString = "{ " +
" \"a\": { " +
" \"b\": [ " +
" 0, " +
" 1, " +
" 2, " +
" 1.618 " +
" ] " +
" }, " +
" \"p\": [ " +
" \"m\", " +
" \"n\", " +
" { " +
" \"1\": 1, " +
" \"2\": 2, " +
" \"pi\": 3.14159 " +
" } " +
" ], " +
" \"x\": \"y\" " +
"}\n";
private Object jsonSource;
@BeforeClass
@SuppressWarnings("unchecked")
public void setup() throws IOException {
jsonSource = JsonUtils.jsonToObject(jsonSourceString);
// added for type cast checking
Set<String> aSet = Sets.newHashSet();
aSet.add("i");
aSet.add("j");
((Map) jsonSource).put("s", aSet);
}
@DataProvider
public Object[][] removeRecursiveCases() {
Map empty = ImmutableMap.builder().build();
Map barToFoo = ImmutableMap.builder().put( "bar", "foo" ).build();
Map fooToBar = ImmutableMap.builder().put( "foo", "bar" ).build();
return new Object[][] {
{ null, null, null },
{ null, "foo", null },
{ "foo", null, "foo" },
{ "foo", "foo", "foo" },
{ Maps.newHashMap(), "foo", empty },
{ Maps.newHashMap( barToFoo ), "foo", barToFoo },
{ Maps.newHashMap( fooToBar ), "foo", empty },
{ Lists.newArrayList(), "foo", ImmutableList.builder().build() },
{
Lists.newArrayList( ImmutableList.builder()
.add( Maps.newHashMap( barToFoo ) )
.build() ),
"foo",
ImmutableList.builder()
.add( barToFoo )
.build()
},
{
Lists.newArrayList( ImmutableList.builder()
.add( Maps.newHashMap( fooToBar ) )
.build() ),
"foo",
ImmutableList.builder()
.add( empty )
.build()
}
};
}
@Test(dataProvider = "removeRecursiveCases")
@SuppressWarnings("deprecation")
public void testRemoveRecursive(Object json, String key, Object expected) throws IOException {
JsonUtils.removeRecursive( json, key );
Diffy.Result result = diffy.diff( expected, json );
if (!result.isEmpty()) {
Assert.fail( "Failed.\nhere is a diff:\nexpected: " + JsonUtils.toJsonString( result.expected ) + "\n actual: " + JsonUtils.toJsonString( result.actual ) );
}
}
@Test
@SuppressWarnings("deprecation")
public void runFixtureTests() throws IOException {
String testFixture = "/jsonUtils/jsonUtils-removeRecursive.json";
@SuppressWarnings("unchecked")
List<Map<String, Object>> tests = (List<Map<String, Object>>) JsonUtils.classpathToObject( testFixture );
for ( Map<String,Object> testUnit : tests ) {
Object data = testUnit.get( "input" );
String toRemove = (String) testUnit.get( "remove" );
Object expected = testUnit.get( "expected" );
JsonUtils.removeRecursive( data, toRemove );
Diffy.Result result = diffy.diff( expected, data );
if (!result.isEmpty()) {
Assert.fail( "Failed.\nhere is a diff:\nexpected: " + JsonUtils.toJsonString(result.expected) + "\n actual: " + JsonUtils.toJsonString(result.actual));
}
}
}
@Test
public void validateJacksonClosesInputStreams() {
final Set<String> closedSet = new HashSet<>();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream( "{ \"a\" : \"b\" }".getBytes() ) {
@Override
public void close() throws IOException {
closedSet.add("closed");
super.close();
}
};
// Pass our wrapped InputStream to Jackson via JsonUtils.
Map<String,Object> map = JsonUtils.jsonToMap( byteArrayInputStream );
// Verify that we in fact loaded some data
Assert.assertNotNull( map );
Assert.assertEquals( 1, map.size() );
// Verify that the close method was in fact called on the InputStream
Assert.assertEquals( 1, closedSet.size() );
}
@DataProvider (parallel = true)
public Iterator<Object[]> coordinates() throws IOException {
List<Object[]> testCases = com.beust.jcommander.internal.Lists.newArrayList();
testCases.add(new Object[] { 0, new Object[] {"a", "b", 0}} );
testCases.add(new Object[] { 1, new Object[] {"a", "b", 1}} );
testCases.add(new Object[] { 2, new Object[] {"a", "b", 2}} );
testCases.add(new Object[] { 1.618, new Object[] {"a", "b", 3}} );
testCases.add(new Object[] { "m", new Object[] {"p", 0}} );
testCases.add(new Object[] { "n", new Object[] {"p", 1}} );
testCases.add(new Object[] { 1, new Object[] {"p", 2, "1"}} );
testCases.add(new Object[] { 2, new Object[] {"p", 2, "2"}} );
testCases.add(new Object[] { 3.14159, new Object[] {"p", 2, "pi"}} );
testCases.add(new Object[] { "y", new Object[] {"x"}} );
testCases.add(new Object[] { ((Map) jsonSource).get("a"), new Object[] {"a"}} );
testCases.add(new Object[] { ((Map)(((Map) jsonSource).get("a"))).get("b"), new Object[] {"a", "b"}} );
testCases.add(new Object[] { ((List)((Map)(((Map) jsonSource).get("a"))).get("b")).get(0), new Object[] {"a", "b", 0}} );
testCases.add(new Object[] { ((List)((Map)(((Map) jsonSource).get("a"))).get("b")).get(1), new Object[] {"a", "b", 1}} );
testCases.add(new Object[] { ((List)((Map)(((Map) jsonSource).get("a"))).get("b")).get(2), new Object[] {"a", "b", 2}} );
testCases.add(new Object[] { ((List)((Map)(((Map) jsonSource).get("a"))).get("b")).get(3), new Object[] {"a", "b", 3}} );
testCases.add(new Object[] { ((Map) jsonSource).get("p"), new Object[] {"p"}} );
testCases.add(new Object[] { ((List)(((Map) jsonSource).get("p"))).get(0), new Object[] {"p", 0}} );
testCases.add(new Object[] { ((List)(((Map) jsonSource).get("p"))).get(1), new Object[] {"p", 1}} );
testCases.add(new Object[] { ((List)(((Map) jsonSource).get("p"))).get(2), new Object[] {"p", 2}} );
testCases.add(new Object[] { ((Map)((List)(((Map) jsonSource).get("p"))).get(2)).get("1"), new Object[] {"p", 2, "1"}} );
testCases.add(new Object[] { ((Map)((List)(((Map) jsonSource).get("p"))).get(2)).get("2"), new Object[] {"p", 2, "2"}} );
testCases.add(new Object[] { ((Map)((List)(((Map) jsonSource).get("p"))).get(2)).get("pi"), new Object[] {"p", 2, "pi"}} );
testCases.add(new Object[] { ((Map) jsonSource).get("x"), new Object[] {"x"}} );
return testCases.iterator();
}
/**
* Method: navigate(Object source, Object... paths)
*/
@Test (dataProvider = "coordinates")
@SuppressWarnings("deprecation")
public void navigator(Object expected, Object[] path) throws Exception {
Object actual = JsonUtils.navigate(jsonSource, path);
Assert.assertEquals(actual, expected);
}
}
| 4,801 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Coudeville-sur-Mer","circ":"3ème circonscription","dpt":"Manche","inscrits":691,"abs":381,"votants":310,"blancs":31,"nuls":7,"exp":272,"res":[{"nuance":"REM","nom":"<NAME>","voix":186},{"nuance":"LR","nom":"<NAME>","voix":86}]} | 112 |
6,717 | //******************************************************************************
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
//******************************************************************************
#pragma once
#import "LinkedList.h"
#import <Starboard/SmartTypes.h>
#import <UIKit/NSLayoutAnchor.h>
#import <UIKit/NSLayoutXAxisAnchor.h>
#import <UIKit/NSLayoutYAxisAnchor.h>
#import <UIKit/NSLayoutDimension.h>
#import <UIKit/NSStringDrawingContext.h>
#import <UIKit/UILayoutSupport.h>
#import <UIKit/UITouch.h>
#import <UIKit/UIView.h>
#import "UWP/InteropBase.h"
#import "UWP/WindowsUIXamlControls.h"
#include "COMIncludes.h"
#import <winrt/Windows.Foundation.h>
#import <winrt/Windows.UI.Xaml.h>
#import <winrt/Windows.UI.Xaml.Input.h>
#include "COMIncludes_End.h"
@class UIWindow;
// Round subpixel values to be able to perform per-pixel UI placement/calculations
inline float doPixelRound(float f) {
return (float)(floorf((f * 2) + 0.5) / 2.0f);
}
// Round subpixel values to be able to perform per-pixel UI placement/calculations
inline CGSize doPixelRound(CGSize size) {
size.width = doPixelRound(size.width);
size.height = doPixelRound(size.height);
return size;
}
// Round subpixel values to be able to perform per-pixel UI placement/calculations
inline CGRect doPixelRound(CGRect frame) {
frame.origin.x = doPixelRound(frame.origin.x);
frame.origin.y = doPixelRound(frame.origin.y);
frame.size.width = doPixelRound(frame.size.width);
frame.size.height = doPixelRound(frame.size.height);
return frame;
}
class UIViewPrivateState : public LLTreeNode<UIViewPrivateState, UIView> {
public:
id superview; // id
StrongId<UIColor> backgroundColor;
id curTouch, curTouchEvent, curTouchSet;
uint32_t tag;
BOOL userInteractionEnabled;
BOOL multipleTouchEnabled;
UIViewContentMode contentMode;
StrongId<NSMutableArray> currentTouches;
StrongId<NSMutableArray> gestures;
StrongId<NSMutableArray> constraints;
bool _isChangingParent;
bool _constraintsNeedUpdate;
StrongId<NSMutableArray> _layoutGuides;
StrongId<NSLayoutDimension> _heightAnchor;
StrongId<NSLayoutDimension> _widthAnchor;
StrongId<NSLayoutXAxisAnchor> _centerXAnchor;
StrongId<NSLayoutXAxisAnchor> _leadingAnchor;
StrongId<NSLayoutXAxisAnchor> _leftAnchor;
StrongId<NSLayoutXAxisAnchor> _rightAnchor;
StrongId<NSLayoutXAxisAnchor> _trailingAnchor;
StrongId<NSLayoutYAxisAnchor> _bottomAnchor;
StrongId<NSLayoutYAxisAnchor> _centerYAnchor;
StrongId<NSLayoutYAxisAnchor> _firstBaselineAnchor;
StrongId<NSLayoutYAxisAnchor> _lastBaselineAnchor;
StrongId<NSLayoutYAxisAnchor> _topAnchor;
UIViewAutoresizing autoresizingMask;
CGSize _contentHuggingPriority;
CGSize _contentCompressionResistancePriority;
BOOL autoresizesSubviews;
BOOL translatesAutoresizingMaskIntoConstraints;
CGRect _resizeRoundingError;
winrt::event_token _pointerPressedEventRegistration = { };
winrt::event_token _pointerMovedEventRegistration = { };
winrt::event_token _pointerReleasedEventRegistration = { };
winrt::event_token _pointerCanceledEventRegistration = { };
winrt::event_token _pointerCaptureLostEventRegistration = { };
UIViewPrivateState(UIView* owner) {
setSelf(owner);
superview = nil;
backgroundColor = nil;
curTouch = nil;
curTouchEvent = nil;
curTouchSet = nil;
tag = 0;
userInteractionEnabled = YES;
multipleTouchEnabled = NO;
contentMode = UIViewContentModeScaleToFill;
currentTouches = [[NSMutableArray alloc] initWithCapacity:16];
gestures = [NSMutableArray new];
constraints = [NSMutableArray new];
translatesAutoresizingMaskIntoConstraints = YES;
_isChangingParent = false;
_constraintsNeedUpdate = false;
_contentHuggingPriority.height = 250.0f;
_contentHuggingPriority.width = 250.0f;
_contentCompressionResistancePriority.height = 750.0f;
_contentCompressionResistancePriority.width = 750.0f;
_layoutGuides = [NSMutableArray new];
memset(&_resizeRoundingError, 0, sizeof(_resizeRoundingError));
autoresizesSubviews = YES;
autoresizingMask = UIViewAutoresizingNone;
}
};
// This is a bit of a hack (since didMoveToWindow should only be in UIView-derived classes)
// but we use this to resign firstResponder-ship so carets stop blinking when moving between windows.
@interface UIResponder ()
- (void)didMoveToWindow;
@end
@interface UIView () {
@public
UIViewPrivateState* priv;
}
- (UITouchPhase)_processPointerEvent:(const winrt::Windows::UI::Xaml::Input::PointerRoutedEventArgs&)pointerEventArgs forTouchPhase:(UITouchPhase)touchPhase;
+ (void)_setPageTransitionForView:(UIView*)view fromLeft:(BOOL)fromLeft;
- (void)_applyConstraints;
- (void)_setShouldLayout;
+ (void)_setNestedAnimationsEnabled:(BOOL)enable;
- (void)__setContentsImage:(id)image;
- (UIWindow*)_getWindowInternal;
- (BOOL)_isEnabled;
- (winrt::Windows::UI::Xaml::FrameworkElement)_winrtXamlElement;
@end
@interface NSLayoutConstraint ()
- (void)_setView:(UIView*)view;
- (void)_printConstraint;
+ (void)_printConstraints:(NSArray*)constraints;
@end
@interface _UILayoutGuide : UIView <UILayoutSupport>
@end
@interface NSStringDrawingContext ()
- (void)_setInternalTotalBounds:(CGRect)rect;
@end
inline void RunSynchronouslyOnMainThread(void (^block)()) {
if ([NSThread isMainThread]) {
block();
} else {
dispatch_sync(dispatch_get_main_queue(), block);
}
}
| 2,272 |
370 | # -*- coding: utf-8 -*-
"""
Highmaps Demos
Detailed map, US counties: http://www.highcharts.com/maps/demo/us-counties
"""
from highcharts import Highmap
from highcharts.highmaps.common import RawJavaScriptText
H = Highmap()
"""
This example shows how to make the map of US unemployment rates at county level in April 2015
as the highmaps demo: http://www.highcharts.com/maps/demo/us-counties
However, this example requires to do many things in javascript environment:
1. a JS function to get "mapline" data using highcharts geojson function:
Highcharts.geojson(Highcharts.maps['countries/us/us-all-all'], 'mapline')
where highcharts.maps is to get map data loaded from http://code.highcharts.com/mapdata/countries/us/us-all-all.js
2. a JS function to change names of each data set using Highcharts.each
3. need to add datasets for maplines. however, the datasets are not defined in python, therefore they need to add
using "RawJavaScriptText('[lines[0]]')" which unquote the python string '[lines[0]]' in javascript environment
(from '[lines[0]]' to [lines[0]])
This is not a good way to generate this map with python-highcharts API but still use many javascript function
The example us-counties-2.py shows how to do this in pure python environment
"""
options = {
'chart': {
'borderWidth': 1,
'marginRight': 50
},
'title': {
'text': 'US Counties unemployment rates, April 2015'
},
'legend': {
'title': {
'text': 'Unemployment<br>rate',
'style': {
'color': "(Highcharts.theme && Highcharts.theme.textColor) || 'black'"
}
},
'layout': 'vertical',
'align': 'right',
'floating': True,
'valueDecimals': 0,
'valueSuffix': '%',
'backgroundColor': "(Highcharts.theme && Highcharts.theme.legendBackgroundColor) || 'rgba(255, 255, 255, 0.85)'",
'symbolRadius': 0,
'symbolHeight': 14
},
'mapNavigation': {
'enabled': True
},
'colorAxis': {
'dataClasses': [{
'from': 0,
'to': 2,
'color': "#F1EEF6"
}, {
'from': 2,
'to': 4,
'color': "#D4B9DA"
}, {
'from': 4,
'to': 6,
'color': "#C994C7"
}, {
'from': 6,
'to': 8,
'color': "#DF65B0"
}, {
'from': 8,
'to': 10,
'color': "#DD1C77"
}, {
'from': 10,
'color': "#980043"
}]
},
'plotOptions': {
'mapline': {
'showInLegend': False,
'enableMouseTracking': False
}
},
}
H.set_dict_options(options)
data_url = 'http://www.highcharts.com/samples/data/jsonp.php?filename=us-counties-unemployment.json&callback=?'
H.add_data_from_jsonp(data_url, 'json_data', 'map', 'Unemployment rate', joinBy = ['hc-key', 'code'],
tooltip = {
'valueSuffix': '%'
},
borderWidth = 0.5,
states = {
'hover': {
'color': '#bada55'
}
}
)
H.add_data_set(RawJavaScriptText('[lines[0]]'), 'mapline', 'State borders', color = 'white')
H.add_data_set(RawJavaScriptText('[lines[1]]'), 'mapline', 'Separator', color = 'gray')
H.set_map_source('http://code.highcharts.com/mapdata/countries/us/us-all-all.js', jsonp_map = False)
H.add_JSscript("var lines = Highcharts.geojson(Highcharts.maps['countries/us/us-all-all'], 'mapline');", 'head')
H.add_JSscript("Highcharts.each(geojson, function (mapPoint) {\
mapPoint.name = mapPoint.name + ', ' + mapPoint.properties['hc-key'].substr(3, 2);\
});", 'head')
H.htmlcontent
| 2,027 |
789 | package io.advantageous.qbit.jsend;
/**
* https://labs.omniti.com/labs/jsend
*/
public enum JSendStatus {
SUCCESS("success"), FAIL("fail"), ERROR("error");
private final String status;
JSendStatus(final String status) {
this.status = status;
}
@Override
public String toString() {
return status;
}
}
| 139 |
751 | <reponame>xerothermic/vpp<filename>test/vpp_vxlan_gpe_tunnel.py
from vpp_interface import VppInterface
from vpp_papi import VppEnum
INDEX_INVALID = 0xffffffff
DEFAULT_PORT = 4790
UNDEFINED_PORT = 0
def find_vxlan_gpe_tunnel(test, src, dst, s_port, d_port, vni):
ts = test.vapi.vxlan_gpe_tunnel_v2_dump(INDEX_INVALID)
src_port = DEFAULT_PORT
if s_port != UNDEFINED_PORT:
src_port = s_port
dst_port = DEFAULT_PORT
if d_port != UNDEFINED_PORT:
dst_port = d_port
for t in ts:
if src == str(t.local) and \
dst == str(t.remote) and \
src_port == t.local_port and \
dst_port == t.remote_port and \
t.vni == vni:
return t.sw_if_index
return INDEX_INVALID
class VppVxlanGpeTunnel(VppInterface):
"""
VPP VXLAN GPE interface
"""
def __init__(self, test, src_addr, dst_addr, vni,
src_port=UNDEFINED_PORT, dst_port=UNDEFINED_PORT,
mcast_sw_if_index=INDEX_INVALID,
encap_vrf_id=None,
decap_vrf_id=None, protocol=3):
""" Create VXLAN GPE Tunnel interface """
super(VppVxlanGpeTunnel, self).__init__(test)
self.src = src_addr
self.dst = dst_addr
self.vni = vni
self.src_port = src_port
self.dst_port = dst_port
self.mcast_sw_if_index = mcast_sw_if_index
self.encap_vrf_id = encap_vrf_id
self.decap_vrf_id = decap_vrf_id
self.protocol = 3
def add_vpp_config(self):
reply = self.test.vapi.vxlan_gpe_add_del_tunnel_v2(
is_add=1, local=self.src, remote=self.dst, vni=self.vni,
local_port=self.src_port, remote_port=self.dst_port,
mcast_sw_if_index=self.mcast_sw_if_index,
encap_vrf_id=self.encap_vrf_id,
decap_vrf_id=self.decap_vrf_id,
protocol=self.protocol)
self.set_sw_if_index(reply.sw_if_index)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self.test.vapi.vxlan_gpe_add_del_tunnel_v2(
is_add=0, local=self.src, remote=self.dst, vni=self.vni,
local_port=self.src_port, remote_port=self.dst_port,
mcast_sw_if_index=self.mcast_sw_if_index,
encap_vrf_id=self.encap_vrf_id,
decap_vrf_id=self.decap_vrf_id,
protocol=self.protocol)
def query_vpp_config(self):
return (INDEX_INVALID != find_vxlan_gpe_tunnel(self._test,
self.src,
self.dst,
self.src_port,
self.dst_port,
self.vni))
def object_id(self):
return "vxlan-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
self.src, self.dst)
| 1,747 |
831 | /*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.build.attribution.ui.panels;
import com.android.build.attribution.ui.data.CriticalPathPluginUiData;
import com.android.build.attribution.ui.data.TaskUiData;
import com.intellij.ui.components.JBLabel;
import com.intellij.ui.components.panels.HorizontalLayout;
import com.intellij.util.ui.ColorIcon;
import java.awt.Color;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import javax.swing.JPanel;
import javax.swing.SwingConstants;
import org.jetbrains.annotations.NotNull;
@SuppressWarnings("UseJBColor")
public interface CriticalPathChartLegend {
ChartColor MISC_COLOR = new ChartColor(new Color(0xBDBDBD));
ChartColor OTHER_TASKS_COLOR = new ChartColor(new Color(0xA2DFFE));
Color OTHER_TASKS_TEXT_COLOR = Color.BLACK;
ChartColor androidPluginColor = new ChartColor(new Color(0xE66F9A));
ChartColor externalPluginColor = new ChartColor(new Color(0x1A7AFF));
ChartColor buildsrcPluginColor = new ChartColor(new Color(0xA78BD9));
ChartColor[] categoricalGooglePalette = new ChartColor[]{
new ChartColor(new Color(0x97B1C0)),
new ChartColor(new Color(0xA2DFFE)),
new ChartColor(new Color(0xF79C6E)),
new ChartColor(new Color(0x74E288)),
new ChartColor(new Color(0xA78BD9)),
new ChartColor(new Color(0xE66F9A)),
new ChartColor(new Color(0x52E5CF)),
new ChartColor(new Color(0xDFCC9F)),
new ChartColor(new Color(0x0093D4)),
new ChartColor(new Color(0x158F7F)),
new ChartColor(new Color(0x824BDF)),
new ChartColor(new Color(0xC1571A)),
new ChartColor(new Color(0x335A99)),
new ChartColor(new Color(0xADAC38)),
new ChartColor(new Color(0xB8388E)),
new ChartColor(new Color(0x1A7AFF))
};
class ChartColor {
public final Color baseColor;
public final Color selectionColor;
public ChartColor(Color baseColor) {
this.baseColor = baseColor;
this.selectionColor = new Color(baseColor.getRed() / 2, baseColor.getGreen() / 2, baseColor.getBlue() / 2);
}
}
static JPanel createTasksLegendPanel() {
JPanel panel = new JPanel(new HorizontalLayout(10));
panel.add(new JBLabel("Android/Java/Kotlin Plugin", new ColorIcon(10, androidPluginColor.baseColor), SwingConstants.RIGHT));
panel.add(new JBLabel("Other Plugin", new ColorIcon(10, externalPluginColor.baseColor), SwingConstants.RIGHT));
panel.add(new JBLabel("Project Customization", new ColorIcon(10, buildsrcPluginColor.baseColor), SwingConstants.RIGHT));
return panel;
}
static ChartColor resolveTaskColor(TaskUiData taskData) {
switch (taskData.getSourceType()) {
case BUILD_SRC:
return buildsrcPluginColor;
case ANDROID_PLUGIN:
return androidPluginColor;
case THIRD_PARTY:
return externalPluginColor;
default:
throw new IllegalArgumentException("Unknown type: " + taskData.getSourceType());
}
}
PluginColorPalette pluginColorPalette = new PluginColorPalette();
class PluginColorPalette {
private int paletteCursor = 0;
private Map<String, ChartColor> pluginToColorMapping = new HashMap<>();
public void reset() {
paletteCursor = 0;
pluginToColorMapping.clear();
}
@NotNull
public ChartColor getColor(@NotNull String name) {
return pluginToColorMapping
.computeIfAbsent(name, key -> categoricalGooglePalette[Math.min(paletteCursor++, categoricalGooglePalette.length)]);
}
@NotNull
public ChartColor getOneColorForAll(@NotNull ArrayList<CriticalPathPluginUiData> aggregatedPlugins) {
ChartColor otherPluginsGroupColor = getColor("Other");
aggregatedPlugins.forEach(plugin -> pluginToColorMapping.put(plugin.getName(), otherPluginsGroupColor));
return otherPluginsGroupColor;
}
}
}
| 1,528 |
366 | <filename>videos/manimTutorial/part5_Coordinates.py
from manimlib.imports import *
from manim_sandbox.utils.imports import *
from manim_sandbox.videos.manimTutorial.utils import *
from manim_projects.tony_useful.imports import *
numberline_t2c = {
"NumberLine": BLUE_D,
"x_min": ORANGE,
"x_max": ORANGE,
"include_ticks": ORANGE,
"include_tip": ORANGE,
"include_numbers": ORANGE,
"unit_size": ORANGE,
"tick_frequency": ORANGE,
"label_direction": ORANGE,
"n2p": BLUE_D,
"p2n": BLUE_D,
"number_to_point": BLUE_D,
"point_to_number": BLUE_D,
"add_numbers": BLUE_D,
"Dot": BLUE_D,
"get_center": BLUE_D,
}
axes_t2c = {
"Axes": BLUE_D,
"c2p": BLUE_D,
"p2c": BLUE_D,
"coords_to_point": BLUE_D,
"point_to_coords": BLUE_D,
"number_line_config": ORANGE,
"x_axis_config": ORANGE,
"y_axis_config": ORANGE,
"x(y)_axis_config": ORANGE,
"center_point": ORANGE,
"add_coordinates": BLUE_D,
"get_axis_labels": BLUE_D,
"y_min": ORANGE,
"y_max": ORANGE,
'"unit_size"': GOLD_D,
'"tick_frequency"': GOLD_D,
}
numberplane_t2c = {
"NumberPlane": BLUE_D,
"axis_config": ORANGE,
'"stroke_color"': GOLD_D,
"apply_function": BLUE_D,
"matrix": BLUE_D,
"prepare_for_nonlinear_transform": BLUE_D,
"ComplexPlane": BLUE_D,
"apply_complex_function": BLUE_D,
"lambda": BLUE,
"sin": BLUE,
"cos": BLUE,
"exp": BLUE,
"2j": average_color(BLUE, PINK),
}
pf_t2c = {
"ParametricFunction": BLUE_D,
"FunctionGraph": BLUE_D,
"t_min": ORANGE,
"t_max": ORANGE,
"def": BLUE_D,
"PI": average_color(BLUE, PINK),
"func2": DARK_GRAY,
}
class OpeningScene(Scene_):
def construct(self):
t2c = {"manim": average_color(PINK, RED),
"坐标系": BLUE, "图像": GREEN}
text_color = DARK_GRAY
font = "庞门正道标题体"
text_1 = Text("大家好!", font=font, color=text_color, size=2, t2c=t2c).to_edge(UP * 2, buff=1)
text_2 = Text("欢迎来到manim视频教程", font=font,
color=text_color, size=2, t2c=t2c).to_edge(UP * 3.2, buff=1)
text_3 = Text("这一期我们将学习manim中", font=font, color=text_color, size=2, t2c=t2c).to_edge(UP * 1.8, buff=1)
text_4 = Text("坐标系与图像的相关知识", font=font, color=text_color, size=2, t2c=t2c).to_edge(UP * 3., buff=1)
text_34, text_12 = VGroup(text_3, text_4), VGroup(text_1, text_2)
methods = [["NumberLine", "ticks", "tips", "numbers", "n2p", "p2n"],
["Axes", "labels", "coordinates", "c2p", "p2c"],
["NumerPlane", "ComplexPlane", "nonlinear_transform"],
["ParametricFunction", "FunctionGraph"]]
m_group_1 = VGroup(*[Text(tex + ', ', size=0.84, font='Consolas', stroke_width=2, color=BLUE_D) for tex in methods[0]]).arrange(RIGHT)
m_group_2 = VGroup(*[Text(tex + ', ', size=0.84, font='Consolas', stroke_width=2, color=BLUE_D) for tex in methods[1]]).arrange(RIGHT)
m_group_3 = VGroup(*[Text(tex, size=0.84, font='Consolas', stroke_width=2, color=BLUE_D) for tex in methods[2]]).arrange(RIGHT)
m_group_4 = VGroup(*[Text(tex, size=0.84, font='Consolas', stroke_width=2, color=BLUE_D) for tex in methods[3]]).arrange(RIGHT)
m_group = VGroup(m_group_1, m_group_2, m_group_3, m_group_4).arrange(DOWN, aligned_edge=LEFT, buff=0.42)
methodes_group = VGroup(*m_group_1, *m_group_2, *m_group_3, *m_group_4).next_to(text_34, DOWN, buff=0.5)
# self.add(picture)
self.wait(0.5)
self.play(Write(text_1))
self.wait(0.5)
self.play(WriteRandom(text_2), run_time=1.5)
self.wait(1.8)
self.play(ReplacementTransform(text_12, text_34), run_time=1.2)
self.wait(1.2)
self.play(FadeInRandom(methodes_group), run_time=2.4)
self.wait(2.6)
self.play(FadeOutRandom(methodes_group), FadeOutRandom(text_3),
FadeOutRandom(text_4), run_time=1.8)
self.wait(1)
class NumberLineTutorial(Scene_):
CONFIG = {
# "fade_all": False,
}
def start(self):
t2c = {"manim": GOLD,
"NumberLine": GREEN}
title = VGroup(
Text("Chapter Ⅰ.", font="Monaco for Powerline", color=BLUE_D, size=1, t2c=t2c),
Text("使用NumberLine构建数轴", font="Source Han Sans CN Bold", color=DARK_GRAY, size=1, t2c=t2c),
).arrange(RIGHT, buff=0.5, aligned_edge=DOWN)
self.wait()
self.play(DrawBorderThenFill(title))
self.wait(2)
self.play(FadeOutAndShiftDown(title))
def construct(self):
self.start()
CodeLine.CONFIG["t2c"].update(numberline_t2c)
CodeLine.CONFIG["size"] = 0.55
captions = [
"在manim中,可以使用NumberLine构建一个数轴",
"通过x_min和x_max调整数轴的最小值最大值",
"数轴默认附带刻度,可以通过设置include_ticks取消刻度",
"使用include_tip添加箭头,include_numbers添加默认刻度数字",
"unit_size表示数轴上的单位长度为manim中的多少单位",
"tick_frequency表示数轴上添加刻度的频率(每...个单位一个)",
"label_direction表示刻度数字在对应刻度的位置,默认为DOWN",
"除了使用默认刻度数字之外,还可以通过add_numbers手动添加需要的数字",
"构建了数轴之后,可以使用它的相关方法,最常用的是n2p和p2n",
"n2p是number_to_point的缩写,给出一个数字,返回数轴上这个点的坐标",
"p2n是point_to_number的缩写,与n2p正好相反",
]
self.caps = VGroup(
*[
CodeLine(cap, font='Source Han Sans CN Bold', size=0.64).to_edge(DOWN * 1.2)
for cap in captions
]
)
codes = CodeLines(
">>> axis = NumberLine(",
"~~~~~~~~x_min=-2, x_max=2,",
"~~~~~~~~include_ticks=False,",
"~~~~~~~~include_tip=True,",
"~~~~~~~~include_numbers=True,",
"~~~~~~~~unit_size=1.5,",
"~~~~~~~~tick_frequency=0.5,",
"~~~~~~~~label_direction=UP,",
"~~~~).shift(LEFT*3)",
">>> axis.add_numbers(-1, 2)",
">>> dot = Dot(axis.n2p(1))",
">>> axis.p2n(dot.get_center())",
"1",
)
codebg = CodeBackground(codes, buff=0.3)
VGroup(codes, codebg).to_edge(RIGHT, buff=0.6).shift(UP*0.3)
axis = NumberLine(color=BLACK, plot_depth=-2)
axis2 = NumberLine(color=BLACK, x_min=-2, x_max=2, plot_depth=-2).shift(LEFT*3)
axis3 = NumberLine(color=BLACK, x_min=-2, x_max=2, plot_depth=-2, include_ticks=False).shift(LEFT*3)
axis4 = NumberLine(color=BLACK, x_min=-2, x_max=2, plot_depth=-2, include_tip=True).shift(LEFT*3)
axis5 = NumberLine(color=BLACK, x_min=-2, x_max=2, plot_depth=-2, include_tip=True, include_numbers=True).shift(LEFT*3)
axis6 = NumberLine(color=BLACK, x_min=-2, x_max=2, unit_size=1.5, include_tip=True, include_numbers=True).shift(LEFT*3)
axis7 = NumberLine(color=BLACK, x_min=-2, x_max=2, unit_size=1.5, include_tip=True, include_numbers=True, tick_frequency=0.5).shift(LEFT*3)
axis8 = NumberLine(color=BLACK, x_min=-2, x_max=2, unit_size=1.5, include_tip=True, include_numbers=True, tick_frequency=0.5, label_direction=UP).shift(LEFT*3)
self.wait()
self.play(Write(self.caps[0]))
self.wait()
self.play(FadeInFromDown(codebg))
self.play(Write(VGroup(codes[0], codes[8][4])))
self.wait()
self.play(ShowCreation(axis))
self.wait(3)
self.next_caps()
self.play(Write(codes[1]))
self.play(Write(codes[8][5:]))
self.wait()
self.play(Transform(axis, axis2))
self.wait(2)
self.next_caps()
self.play(Write(codes[2]))
self.wait()
self.play(Transform(axis, axis3))
self.wait(2)
self.play(Transform(
codes[2][-6:], CodeLine("True,").move_to(codes[2][-6:], aligned_edge=LEFT)
))
self.wait()
self.play(Transform(axis, axis2))
self.wait()
self.next_caps()
self.play(Write(codes[3]))
self.play(Write(codes[4]))
self.wait()
self.play(FadeOut(axis), FadeIn(axis5))
self.wait(2)
self.next_caps()
self.play(Write(codes[5]))
self.wait()
self.play(Transform(axis5, axis6))
self.wait()
brace = Brace(Line(axis5.n2p(0), axis5.n2p(1)), UP, color=DARK_GRAY)
text = CodeLine("1.5", size=0.72).next_to(brace, UP)
self.play(FadeInFrom(VGroup(brace, text), UP))
self.wait(2)
self.play(FadeOut(VGroup(brace, text)))
self.next_caps()
self.play(Write(codes[6]))
self.wait()
self.play(Transform(axis5, axis7))
self.wait(3)
self.next_caps()
self.play(Write(codes[7]))
self.wait()
self.play(FadeOut(axis5), FadeIn(axis8))
self.wait(2)
self.play(Transform(
codes[4][-5:], CodeLine("False,").move_to(codes[4][-5:], aligned_edge=LEFT)
))
self.wait()
axis = NumberLine(color=BLACK, x_min=-2, x_max=2, unit_size=1.5, include_tip=True, tick_frequency=0.5, label_direction=UP).shift(LEFT*3)
self.play(FadeOut(axis8), FadeIn(axis))
self.wait(0.5)
self.next_caps()
self.play(Write(codes[9]))
self.wait()
self.play(FadeInFromDown(axis.get_number_mobjects(-1, 2)))
self.wait(3)
self.next_caps()
self.wait(2)
self.next_caps()
self.play(Write(codes[10]))
self.wait()
dot = Dot(axis.n2p(1), color=BLUE_D, radius=0.1)
self.play(Write(dot))
self.wait(3)
self.next_caps()
self.play(Write(codes[11]))
self.wait()
self.play(Write(codes[12]))
self.wait(4)
class AxesTutorial(Scene_):
CONFIG = {
# "fade_all": False,
}
def start(self):
t2c = {"manim": GOLD,
"Axes": GREEN}
title = VGroup(
Text("Chapter II.", font="Monaco for Powerline", color=BLUE_D, size=1, t2c=t2c),
Text("使用Axes构建坐标系", font="Source Han Sans CN Bold", color=DARK_GRAY, size=1, t2c=t2c),
).arrange(RIGHT, buff=0.5, aligned_edge=DOWN)
self.wait()
self.play(DrawBorderThenFill(title))
self.wait(2)
self.play(FadeOutAndShiftDown(title))
def construct(self):
self.start()
CodeLine.CONFIG["t2c"].update(numberline_t2c)
CodeLine.CONFIG["t2c"].update(axes_t2c)
CodeLine.CONFIG["size"] = 0.48
captions = [
"使用Axes构建一个直角坐标系,默认全屏带箭头",
"通过x_min,x_max,y_min,y_max来更改最大最小值",
"传入center_point可以指定原点在屏幕上的位置",
"通过number_line_config传入一个字典,表示两个轴的通用属性(见上部分)",
"还可以使用x(y)_axis_config传入字典表示某个轴的特有属性",
"Axes可以使用add_coordinates传入两个列表(表示x/y轴坐标)来手动添加两轴上的坐标数字",
"如果没有传入列表,则默认添加出所有数字",
"通过get_axis_labels方法返回xy轴的标签(一个VGroup包含两个label)",
"和NumberLine类似,Axes含有c2p和p2c两个常用方法",
"c2p即coords_to_point,根据Axes坐标系内坐标返回屏幕坐标系内该点坐标",
"p2c即point_to_coords,是c2p的逆操作",
]
self.caps = VGroup(
*[
CodeLine(cap, font='Source Han Sans CN Bold', size=0.64).to_edge(DOWN * 1.2)
for cap in captions
]
)
codes = CodeLines(
">>> axes = Axes(",
"~~~~~~~~x_min=-2, x_max=2,",
"~~~~~~~~y_min=-2, y_max=2,",
"~~~~~~~~center_point=LEFT*3",
"~~~~~~~~number_line_config={",
"~~~~~~~~~~~~\"unit_size\": 1.5,",
"~~~~~~~~},",
"~~~~~~~~x_axis_config={",
"~~~~~~~~~~~~\"tick_frequency\": 0.5",
"~~~~~~~~},",
"~~~~)",
">>> axes.add_coordinates(",
"~~~~~~~~[-1, 2], [-2, 1] )",
">>> axes.add_coordinates()",
">>> self.add(axes.get_axis_labels())",
">>> dot = Dot(axes.c2p(1, 2))",
">>> axes.p2c(dot.get_center())",
"(1, 2)",
buff=0.12,
)
codebg = CodeBackground(codes, buff=0.25)
VGroup(codes, codebg).to_edge(RIGHT, buff=0.7).shift(UP*0.35)
nlc = {"color": BLACK}
axes = Axes(number_line_config=nlc, plot_depth=-5)
axes1 = Axes(
number_line_config=nlc,
x_min=-2, x_max=2, y_min=-2, y_max=2,
center_point=LEFT*3,
plot_depth=-5,
)
nlc2 = {"color": BLACK, "unit_size": 1.5}
axes2 = Axes(
number_line_config=nlc2,
x_min=-2, x_max=2, y_min=-2, y_max=2,
center_point=LEFT*3,
plot_depth=-5,
)
axes3 = Axes(
number_line_config=nlc2,
x_min=-2, x_max=2, y_min=-2, y_max=2,
center_point=LEFT*3,
x_axis_config={"tick_frequency": 0.5},
plot_depth=-5,
)
nlc3 = {"color": BLACK, "unit_size": 1.5, "include_numbers": True}
axes4 = Axes(
number_line_config=nlc2,
x_min=-2, x_max=2, y_min=-2, y_max=2,
center_point=LEFT*3,
x_axis_config={"tick_frequency": 0.5},
plot_depth=-5,
).add_coordinates([-1, 2], [-2, 1], number_config={"color": BLACK})
# self.add(codebg, codes)
self.wait()
self.play(Write(self.caps[0]))
self.wait()
self.play(FadeInFromDown(codebg))
self.play(Write(VGroup(codes[0], codes[10])))
self.wait()
self.play(ShowCreation(axes))
self.wait(3)
self.next_caps()
self.play(Write(codes[1]))
self.play(Write(codes[2]))
self.wait(0.5)
self.next_caps()
self.play(Write(codes[3]))
self.wait()
self.play(Transform(axes, axes1))
self.wait(3)
self.next_caps()
self.play(Write(VGroup(codes[4], codes[6])))
self.wait(0.5)
self.play(Write(codes[5]))
self.wait()
self.play(Transform(axes, axes2))
self.wait(3)
self.next_caps()
self.play(Write(VGroup(codes[7], codes[9])))
self.wait(0.5)
self.play(Write(codes[8]))
self.wait()
self.play(Transform(axes, axes3))
self.next_caps()
self.play(Write(VGroup(codes[11], codes[12])))
self.wait()
nc = {"color": BLACK}
# labels1 = axes.get_coordinate_labels([-1, 2], [-2, 1], number_config=nc)
self.play(FadeOut(axes), FadeIn(axes4))
self.wait(3)
self.next_caps()
self.play(Write(codes[13]))
self.wait()
# labels2 = axes.get_coordinate_labels(number_config=nc)
axes = Axes(
number_line_config=nlc2,
x_min=-2, x_max=2, y_min=-2, y_max=2,
center_point=LEFT*3,
x_axis_config={"tick_frequency": 0.5},
plot_depth=-5,
).add_coordinates(number_config=nc)
self.play(FadeOut(axes4), FadeIn(axes))
self.wait(3)
self.next_caps()
self.play(Write(codes[14]))
self.wait()
xy_labels = axes.get_axis_labels().set_color(BLACK)
self.play(Write(xy_labels))
self.wait(3)
self.next_caps()
self.wait(2)
self.next_caps()
self.play(Write(codes[15]))
dot = Dot(axes.c2p(1, 2), color=BLUE_D, radius=0.1)
self.wait()
self.play(Write(dot))
self.wait(2)
self.next_caps()
self.play(Write(codes[16]))
self.wait(1)
self.play(Write(codes[17]))
self.wait(4)
class NumberPlaneTutorial(Scene_):
CONFIG = {
# "fade_all": False,
}
def start(self):
t2c = {"manim": GOLD,
"NumberPlane": GREEN}
title = VGroup(
Text("Chapter III.", font="Monaco for Powerline", color=BLUE_D, size=1, t2c=t2c),
Text("使用NumberPlane构建坐标网格", font="Source Han Sans CN Bold", color=DARK_GRAY, size=1, t2c=t2c),
).arrange(RIGHT, buff=0.5, aligned_edge=DOWN)
self.wait()
self.play(DrawBorderThenFill(title))
self.wait(2)
self.play(FadeOutAndShiftDown(title))
def construct(self):
self.start()
CodeLine.CONFIG["t2c"].update(numberline_t2c)
CodeLine.CONFIG["t2c"].update(axes_t2c)
CodeLine.CONFIG["t2c"].update(numberplane_t2c)
CodeLine.CONFIG["size"] = 0.48
captions = [
"NumberPlane构建的坐标系默认带网格,用法和Axes相同,但一般不做更改",
"同样使用add_coordinates添加数字标签,c2p与p2c也同样适用",
"NumberPlane常用于进行变换,可以直接使用apply_function(matrix)进行线性变换",
"在进行非线性变换前,需要调用prepare_for_nonlinear_transform方法",
"它有一个子类,ComplexPlane用于展示复平面,用法相同,但是纵轴标签为b·i的形式",
"使用n2p和p2n来转换坐标与复数(c2p/p2c同时适用)",
"使用apply_complex_function来施加复变换",
]
self.caps = VGroup(
*[
CodeLine(cap, font='Source Han Sans CN Bold', size=0.64).to_edge(DOWN * 1.2)\
.add_background_rectangle(color=WHITE, buff=0.1, opacity=0.85)
for cap in captions
]
)
codes = CodeLines(
">>> grid = NumberPlane(",
"~~~~~~~~axis_config={\"stroke_color\": BLACK}",
"~~~~)",
">>> grid.add_coordinates()",
">>> grid.apply_function(",
"~~~~~~~~lambda p: p+RIGHT*p[1]",
"~~~~)",
">>> grid.prepare_for_nonlinear_transform()",
">>> grid.apply_function(",
"~~~~~~~~lambda p: p + np.array([",
"~~~~~~~~~~~~np.sin(p[1]),",
"~~~~~~~~~~~~np.sin(p[0]),",
"~~~~~~~~~~~~0,",
"~~~~~~~~])",
"~~~~)",
buff=0.13
)
codebg = CodeBackground(codes, buff=0.25)
VGroup(codes, codebg).to_edge(RIGHT, buff=0.7).shift(UP*0.3)
grid = NumberPlane(axis_config={"stroke_color": BLACK}, plot_depth=-5)
self.wait()
self.play(Write(self.caps[0]))
self.wait()
self.play(FadeInFromDown(codebg))
self.play(Write(codes[:3]))
self.wait()
self.play(ShowCreation(grid))
self.wait(3)
self.next_caps()
self.play(Write(codes[3]))
self.wait()
labels = grid.get_coordinate_labels(number_config={"color": BLACK})
labels.set_plot_depth(-5)
self.play(Write(labels))
self.wait(3)
lines = VGroup()
lines.add(Line(codes[3][4:].get_left(), codes[3][4:].get_right(), color=GRAY, stroke_width=2.5))
self.play(
FadeOut(labels),
ShowCreation(lines[-1])
)
self.next_caps()
self.play(Write(VGroup(codes[4], codes[6])))
self.wait(0.5)
self.play(Write(codes[5]))
self.wait(1.5)
self.play(grid.apply_function,
lambda p: p + RIGHT*p[1],
run_time=2
)
self.wait(3)
lines.add(Line(codes[4][4:].get_left(), codes[4][4:].get_right(), color=GRAY, stroke_width=2.5))
lines.add(Line(codes[5][8:].get_left(), codes[5][8:].get_right(), color=GRAY, stroke_width=2.5))
lines.add(Line(codes[6][4:].get_left(), codes[6][4:].get_right(), color=GRAY, stroke_width=2.5))
self.play(
grid.apply_function,
lambda p: p - RIGHT*p[1],
ShowCreation(lines[1]),
ShowCreation(lines[2]),
ShowCreation(lines[3]),
run_time=1
)
self.next_caps()
self.play(Write(codes[7]))
self.play(WiggleOutThenIn(codes[7]))
self.wait(2)
self.play(Write(codes[8:]))
self.wait()
grid.prepare_for_nonlinear_transform()
self.play(
grid.apply_function,
lambda p: p + np.array([
np.sin(p[1]),
np.sin(p[0]),
0,
]),
run_time=3,
)
self.wait(3)
self.play(FadeOut(grid), FadeOut(codes), FadeOut(lines), codebg.shift, RIGHT*0.3)
codes = CodeLines(
">>> grid = ComplexPlane(",
"~~~~~~~~axis_config={\"stroke_color\": BLACK}",
"~~~~)",
">>> grid.add_coordinates()",
">>> dot = Dot(grid.n2p(-3+2j))",
">>> grid.p2n(dot.get_center())",
"(-3+2j)",
">>> grid.prepare_for_nonlinear_transform()",
">>> grid.apply_complex_function(",
"~~~~~~~~lambda z: np.exp(z)",
"~~~~)",
buff=0.13
).next_to(codebg.get_corner(UL), DR, aligned_edge=UL, buff=0.25)
self.next_caps()
self.play(Write(codes[:3]))
self.wait()
grid = ComplexPlane(axis_config={"stroke_color": BLACK}, plot_depth=-5)
self.play(ShowCreation(grid))
self.wait(2)
self.play(Write(codes[3]))
self.wait()
labels = grid.get_coordinate_labels(number_config={"color": BLACK})
labels.set_plot_depth(-5)
self.play(Write(labels))
self.wait(3)
self.play(
FadeOut(labels),
ShowCreation(Line(codes[3][4:].get_left(), codes[3][4:].get_right(), color=GRAY, stroke_width=2.5))
)
self.next_caps()
self.play(Write(codes[4]))
self.wait()
dot = Dot(grid.n2p(-3+2j), radius=0.1, color=BLUE_D)
self.play(Write(dot))
self.wait(2)
self.play(Write(codes[5]))
self.wait()
self.play(Write(codes[6]), FadeOut(dot))
self.wait(3)
self.next_caps()
self.play(Write(codes[7:]))
self.wait()
grid.prepare_for_nonlinear_transform()
self.play(
grid.apply_complex_function,
lambda z: np.exp(z),
run_time=5
)
self.wait(4)
class ParametricFunctionTutorial(Scene_):
CONFIG = {
# "fade_all": False,
}
def start(self):
t2c = {"manim": GOLD,
"ParametricFunction": GREEN}
title = VGroup(
Text("Chapter IV.", font="Monaco for Powerline", color=BLUE_D, size=1, t2c=t2c),
Text("使用ParametricFunction绘制参数方程图像", font="Source Han Sans CN Bold", color=DARK_GRAY, size=1, t2c=t2c),
).arrange(RIGHT, buff=0.5, aligned_edge=DOWN)
self.wait()
self.play(DrawBorderThenFill(title))
self.wait(2)
self.play(FadeOutAndShiftDown(title))
def construct(self):
self.start()
CodeLine.CONFIG["t2c"].update(numberline_t2c)
CodeLine.CONFIG["t2c"].update(axes_t2c)
CodeLine.CONFIG["t2c"].update(numberplane_t2c)
CodeLine.CONFIG["t2c"].update(pf_t2c)
CodeLine.CONFIG["size"] = 0.55
captions = [
"绘制函数图像可以使用ParametricFunction",
"传入一个参数方程,自变量为参数,返回值为一个点,可以使用def定义函数或者lambda语句",
"传入t_min和t_max表示参数范围",
"它有一个子类FunctionGraph,传入一个函数,给出x返回y,并且默认x范围为画面宽度",
]
self.caps = VGroup(
*[
CodeLine(cap, font='Source Han Sans CN Bold', size=0.64).to_edge(DOWN * 1.2)
for cap in captions
]
)
codes = CodeLines(
">>> func = ParametricFunction(",
"~~~~~~~~lambda t: np.array([",
"~~~~~~~~~~~~2*np.sin(3*t)*np.cos(t),",
"~~~~~~~~~~~~2*np.sin(3*t)*np.sin(t),",
"~~~~~~~~~~~~0,",
"~~~~~~~~]),",
"~~~~~~~~t_min=0, t_max=2*PI",
"~~~~).shift(LEFT*3)",
">>> func2 = FunctionGraph(",
"~~~~~~~~lambda x: x**2",
"~~~~)",
buff=0.16
)
codebg = CodeBackground(codes, buff=0.25)
VGroup(codes, codebg).to_edge(RIGHT, buff=0.5).shift(UP*0.3)
# self.add(codebg, codes)
self.wait()
self.play(Write(self.caps[0]))
self.wait()
self.play(FadeInFromDown(codebg))
self.play(Write(VGroup(codes[0], codes[7])))
self.wait(2)
self.next_caps()
self.play(Write(codes[1:6]))
self.wait(3)
self.next_caps()
self.play(Write(codes[6]))
self.wait()
t = ValueTracker(0)
dot = Dot(color=BLACK, background_stroke_color=WHITE, background_stroke_width=2, radius=0.05)
dot.add_updater(lambda m: m.move_to(
np.array([
2*np.sin(3*t.get_value())*np.cos(t.get_value()),
2*np.sin(3*t.get_value())*np.sin(t.get_value()),
0
])+LEFT*3
))
path = TracedPath(dot.get_center, stroke_color=BLACK, stroke_width=4, plot_depth=-2)
progress = NumberLine(x_min=0, x_max=2, unit_size=3, tick_frequency=1, color=BLACK).move_to(LEFT*3+DOWN*2.6)
tick = Triangle(fill_opacity=1).scale(0.2).rotate(PI)
tick.add_updater(lambda m: m.move_to(progress.n2p(t.get_value() / PI), aligned_edge=DOWN))
label = VGroup(
TexMobject("t=", color=BLACK),
DecimalNumber(0, color=BLACK),
).arrange(RIGHT).next_to(progress, RIGHT)
label[1].add_updater(lambda m: m.set_value(t.get_value()))
self.add(path)
self.play(Write(dot))
self.play(ShowCreation(progress), Write(tick), Write(label))
self.wait()
self.play(t.set_value, 2*PI, run_time=10, rate_func=linear)
self.wait(3)
self.play(FadeOut(VGroup(dot, progress, tick, label)))
self.next_caps()
self.play(Write(codes[8:]))
self.wait()
func2 = FunctionGraph(lambda x: x**2, color=GOLD, plot_depth=-5)
self.play(ShowCreation(func2))
self.wait(4)
class DownProgressBar(Scene_):
CONFIG = {
"fade_all": False,
}
def construct(self):
methods_dict = {
'NumberLine': '0022',
'Axes': '0216',
'NumberPlane': '0359',
'ParametricFunction': '0547',
'a': '0643'
}
total_time = '0655'
func_time = lambda t: int(t[0:2]) * 60 + int(t[2:])
func_loc = lambda t: func_time(t)/func_time(total_time) * FRAME_WIDTH * RIGHT + FRAME_WIDTH * LEFT / 2
p_list = [FRAME_WIDTH * LEFT / 2]
for v in methods_dict.values():
p_list.append(func_loc(v))
p_list.append(func_loc(total_time))
colors = color_gradient([BLUE, PINK, RED, ORANGE, GREEN], len(methods_dict)+1)
lines = VGroup(*[Line(p_list[i], p_list[i+1]-0.02*RIGHT, color=colors[i], stroke_width=20) for i in range(len(methods_dict)+1)])
lines.to_edge(DOWN * 0.22, buff=1)
texts = VGroup(*[Text(t, color=WHITE, font='Consolas', size=0.33) for t in methods_dict.keys()], plot_depth=1)
texts[-1].set_color(colors[-1])
text = Text('空降', color=WHITE, font='庞门正道标题体', size=0.44).to_edge(DOWN * 0.132, buff=1).to_edge(LEFT, buff=0.53)
text[1].shift(RIGHT*0.03)
text[0].shift(LEFT*0.01)
for i in range(len(methods_dict)):
texts[i].move_to(lines[i+1])
self.add(lines, texts, text)
class VideoCover(Scene):
def construct(self):
background = Polygon(
LEFT_SIDE * 2 + BOTTOM, BOTTOM, LEFT_SIDE / 2 + TOP, LEFT_SIDE * 2 + TOP,
fill_opacity=0.7, fill_color=BLACK, stroke_width=0
).shift(RIGHT)
text = VGroup(
Text("manim教程", font="庞门正道标题体", color=BLUE, size=2).scale(0.9),
Text("第五讲", font="庞门正道标题体", color=BLUE, size=2).scale(1.1),
Text("坐标系统与图像", font="庞门正道标题体", color=ORANGE, size=2).scale(1.5)
).arrange(DOWN, aligned_edge=LEFT, buff=0.4)
text[2].shift(DOWN*0.4)
text.center().to_edge(LEFT, buff=0.8).shift(UP*0.5)
text2 = VGroup(
Text("manim教程", font="庞门正道标题体", color=BLUE, size=2).scale(0.9).set_stroke(width=12, opacity=0.4),
Text("第五讲", font="庞门正道标题体", color=BLUE, size=2).scale(1.1).set_stroke(width=12, opacity=0.4),
Text("坐标系统与图像", font="庞门正道标题体", color=ORANGE, size=2).scale(1.5).set_stroke(width=13, opacity=0.4)
).arrange(DOWN, aligned_edge=LEFT, buff=0.4)
text2[2].shift(DOWN*0.4)
text2.center().to_edge(LEFT, buff=0.8).shift(UP*0.5)
self.add(background, text2, text)
class PreView(Scene_):
CONFIG = {
"fade_all": False
}
def construct(self):
grid = NumberPlane(plot_depth=-5)
self.wait(0.5)
self.play(ShowCreation(grid, run_time=3, lag_ratio=0.1))
t = ValueTracker(0)
dot = Dot(color=BLACK, background_stroke_color=WHITE, background_stroke_width=2, radius=0.06)
dot.add_updater(lambda m: m.move_to(
np.array([
2*np.sin(3*t.get_value())*np.cos(t.get_value()),
2*np.sin(3*t.get_value())*np.sin(t.get_value()),
0
])
))
path = TracedPath(dot.get_center, stroke_color=BLACK, stroke_width=6, plot_depth=-2)
progress = NumberLine(x_min=0, x_max=2, unit_size=3, tick_frequency=1, color=BLACK).move_to(DOWN*2.6)
tick = Triangle(fill_opacity=1).scale(0.2).rotate(PI)
tick.add_updater(lambda m: m.move_to(progress.n2p(t.get_value() * 2 / PI), aligned_edge=DOWN))
label = VGroup(
TexMobject("t=", color=BLACK),
DecimalNumber(0, color=BLACK),
).arrange(RIGHT).next_to(progress, RIGHT)
label[1].add_updater(lambda m: m.set_value(t.get_value()))
self.add(path)
self.play(Write(dot))
self.play(ShowCreation(progress), Write(tick), Write(label))
self.wait(0.5)
self.play(t.set_value, PI, run_time=6, rate_func=linear)
self.wait()
self.play(FadeOut(VGroup(dot, progress, tick, label)))
func = FunctionGraph(lambda x: x**2-4, stroke_width=6, color=GOLD)
func2 = FunctionGraph(lambda x: 2*np.exp(1)**(-0.25*x**2), stroke_width=6, color=RED)
self.play(ShowCreation(func), run_time=2)
self.wait(0.5)
self.play(ShowCreation(func2), run_time=2)
self.wait()
title = VGroup(
Text("NumberLine()", font="Consolas", color=BLUE_D, t2c={"()": DARK_GRAY}, size=2),
Text("Axes()", font="Consolas", color=BLUE_D, t2c={"()": DARK_GRAY}, size=2),
Text("NumberPlane()", font="Consolas", color=BLUE_D, t2c={"()": DARK_GRAY}, size=2),
Text("ParametricFunction()", font="Consolas", color=BLUE_D, t2c={"()": DARK_GRAY}, size=2),
).arrange(DOWN, aligned_edge=LEFT).center()
bg = BackgroundRectangle(title, color=WHITE, fill_opacity=0.85, buff=0.25)
self.play(
FadeInFromDown(bg),
*[
FadeInFromDown(each) for each in title
],
run_time=2, lag_ratio=0.5
)
self.wait(3)
class NPBG(Scene_):
CONFIG = {
"fade_all": False,
}
def construct(self):
grid = NumberPlane(axis_config={"stroke_color": BLACK})
func = FunctionGraph(lambda x: x**2-4, stroke_width=6, color=GOLD)
func2 = FunctionGraph(lambda x: 2*np.exp(1)**(-0.25*x**2), stroke_width=6, color=RED)
self.add(grid, func2)
class NPBG2(Scene):
CONFIG = {
"camera_config": {
"background_color": "#EBEBEB"
}
}
def construct(self):
grid = NumberPlane(axis_config={"stroke_color": BLACK})
grid.prepare_for_nonlinear_transform()
grid.apply_function(
lambda p: p + np.array([
np.sin(p[1]),
np.sin(p[0]),
0,
])
)
self.add(grid)
class NPBG3(Scene):
CONFIG = {
"camera_config": {
"background_color": "#EBEBEB"
}
}
def construct(self):
grid = NumberPlane(axis_config={"stroke_color": BLACK})
grid.prepare_for_nonlinear_transform()
grid.apply_complex_function(
lambda z: np.exp(z)
)
self.add(grid)
class NPBG4(Scene):
CONFIG = {
"camera_config": {
"background_color": "#EBEBEB"
}
}
def construct(self):
rec = ScreenRectangle(color=DARK_GRAY, height=6)
self.add(rec)
| 18,475 |
4,812 | //===- NewGVN.h - Global Value Numbering Pass -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for LLVM's Global Value Numbering pass.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_NEWGVN_H
#define LLVM_TRANSFORMS_SCALAR_NEWGVN_H
#include "llvm/IR/PassManager.h"
namespace llvm {
class Function;
class NewGVNPass : public PassInfoMixin<NewGVNPass> {
public:
/// Run the pass over the function.
PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
};
} // end namespace llvm
#endif // LLVM_TRANSFORMS_SCALAR_NEWGVN_H
| 287 |
16,989 | /*
* Copyright 2020 The Bazel Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.devtools.build.android.desugar.typehierarchy;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSetMultimap;
import com.google.devtools.build.android.desugar.langmodel.MethodDeclInfo;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.annotation.Nullable;
/** An archive for class inheritance and overridable methods and memoizers of query results. */
@AutoValue
public abstract class TypeHierarchy {
private final ConcurrentMap<HierarchicalTypeKey, HierarchicalTypeQuery> typeQueryResults =
new ConcurrentHashMap<>();
private final ConcurrentMap<HierarchicalMethodKey, HierarchicalMethodQuery> methodQueryResults =
new ConcurrentHashMap<>();
abstract ImmutableMap<HierarchicalTypeKey, HierarchicalTypeKey> directSuperClassByType();
abstract ImmutableSetMultimap<HierarchicalTypeKey, HierarchicalTypeKey> directInterfacesByType();
public abstract ImmutableSetMultimap<HierarchicalTypeKey, HeadlessMethodKey>
headlessMethodKeysByType();
public abstract ImmutableMap<HierarchicalMethodKey, MethodDeclInfo> methodMetadata();
abstract boolean requireTypeResolutionComplete();
public static TypeHierarchyBuilder builder() {
return new AutoValue_TypeHierarchy.Builder();
}
@Nullable
final HierarchicalTypeKey getDirectSuperClass(HierarchicalTypeKey type) {
HierarchicalTypeKey superClass = directSuperClassByType().get(type);
return HierarchicalTypeKey.SENTINEL.equals(superClass) ? null : superClass;
}
final ImmutableSet<HierarchicalTypeKey> getDirectSuperInterfaces(HierarchicalTypeKey type) {
return directInterfacesByType().get(type);
}
final ImmutableSet<HeadlessMethodKey> getMethods(HierarchicalTypeKey type) {
return headlessMethodKeysByType().get(type);
}
final MethodDeclInfo getMethodMetadata(HierarchicalMethodKey method) {
MethodDeclInfo methodMetadata = methodMetadata().get(method);
if (requireTypeResolutionComplete()) {
checkNotNull(
methodMetadata,
"Expected method data present under type-resolution-complete mode for %s.");
}
return methodMetadata;
}
final HierarchicalTypeQuery query(HierarchicalTypeKey type) {
return typeQueryResults.computeIfAbsent(type, this::createQuery);
}
final HierarchicalMethodQuery query(HierarchicalMethodKey method) {
return methodQueryResults.computeIfAbsent(method, this::createQuery);
}
private HierarchicalTypeQuery createQuery(HierarchicalTypeKey type) {
return HierarchicalTypeQuery.create(type, this);
}
private HierarchicalMethodQuery createQuery(HierarchicalMethodKey method) {
return HierarchicalMethodQuery.create(method, this);
}
@AutoValue.Builder
abstract static class TypeHierarchyBuilder {
abstract ImmutableMap.Builder<HierarchicalTypeKey, HierarchicalTypeKey>
directSuperClassByTypeBuilder();
abstract ImmutableSetMultimap.Builder<HierarchicalTypeKey, HierarchicalTypeKey>
directInterfacesByTypeBuilder();
abstract ImmutableSetMultimap.Builder<HierarchicalTypeKey, HeadlessMethodKey>
headlessMethodKeysByTypeBuilder();
abstract ImmutableMap.Builder<HierarchicalMethodKey, MethodDeclInfo> methodMetadataBuilder();
final TypeHierarchyBuilder putDirectSuperClass(
HierarchicalTypeKey declaredType, HierarchicalTypeKey superclass) {
directSuperClassByTypeBuilder().put(declaredType, superclass);
return this;
}
final TypeHierarchyBuilder putDirectInterfaces(
HierarchicalTypeKey declaredType, ImmutableSet<HierarchicalTypeKey> directInterfaces) {
directInterfacesByTypeBuilder().putAll(declaredType, directInterfaces);
return this;
}
final TypeHierarchyBuilder putMethod(MethodDeclInfo methodDecl) {
checkState(!methodDecl.isPrivateAccess());
HierarchicalTypeKey typeKey = HierarchicalTypeKey.create(methodDecl.owner());
HierarchicalMethodKey methodKey = HierarchicalMethodKey.from(methodDecl.methodKey());
headlessMethodKeysByTypeBuilder().put(typeKey, methodKey.headlessMethod());
methodMetadataBuilder().put(methodKey, methodDecl);
return this;
}
abstract TypeHierarchyBuilder setRequireTypeResolutionComplete(boolean value);
abstract TypeHierarchy build();
}
}
| 1,583 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.