max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
852 | <reponame>lesreaper/jetson-reinforcement
/*
* http://github.com/dusty-nv/jetson-reinforcement
*/
#ifndef __GAZEBO_ROVER_PLUGIN_H__
#define __GAZEBO_ROVER_PLUGIN_H__
#include "deepRL.h"
#include <boost/bind.hpp>
#include <gazebo/gazebo.hh>
#include <gazebo/transport/transport.hh>
#include <gazebo/msgs/msgs.hh>
#include <gazebo/physics/physics.hh>
#include <gazebo/common/common.hh>
#include <stdio.h>
#include <iostream>
#include <gazebo/transport/TransportTypes.hh>
#include <gazebo/msgs/MessageTypes.hh>
#include <gazebo/common/Time.hh>
#include <errno.h>
#include <fcntl.h>
#include <assert.h>
#include <unistd.h>
#include <pthread.h>
#include <ctype.h>
#include <stdbool.h>
#include <math.h>
#include <inttypes.h>
#include <string.h>
#include <syslog.h>
#include <time.h>
#include "devInput.h"
namespace gazebo
{
/**
* RoverPlugin
*/
class RoverPlugin : public ModelPlugin
{
public:
RoverPlugin();
virtual void Load(physics::ModelPtr _parent, sdf::ElementPtr /*_sdf*/);
virtual void OnUpdate(const common::UpdateInfo & /*_info*/);
float resetPosition( uint32_t dof ); // center servo positions
bool createAgent();
bool updateAgent();
bool configJoint( const char* name );
bool updateJoints();
void onCameraMsg(ConstImageStampedPtr &_msg);
void onCollisionMsg(ConstContactsPtr &contacts);
static const uint32_t DOF = 2; // FWD/BACK, LEFT/RIGHT
private:
float vel[DOF]; // joint velocity control
float dT[3]; // IK delta theta
enum OperatingMode
{
USER_MANUAL,
/*USER_TRAIN,*/
AGENT_LEARN,
AGENT_RESET
/*,AGENT_AUTO*/
} opMode;
rlAgent* agent; // AI learning agent instance
//OpMode opMode; // robot operating mode
bool newState; // true if a new frame needs processed
bool newReward; // true if a new reward's been issued
bool endEpisode; // true if this episode is over
float rewardHistory; // value of the last reward issued
Tensor* inputState; // pyTorch input object to the agent
void* inputBuffer[2]; // [0] for CPU and [1] for GPU
size_t inputBufferSize;
size_t inputRawWidth;
size_t inputRawHeight;
float actionVelDelta; // amount of velocity offset caused to a joint by an action
int maxEpisodeLength; // maximum number of frames to win episode (or <= 0 for unlimited)
int episodeFrames; // frame counter for the current episode
int episodesCompleted;
int episodesWon;
int lastAction;
float lastGoalDistance;
float avgGoalDelta;
int runHistoryIdx;
int runHistoryMax;
bool runHistory[20];
physics::ModelPtr model;
math::Pose originalPose;
InputDevices* HID;
event::ConnectionPtr updateConnection;
std::vector<physics::JointPtr> joints;
physics::JointController* j2_controller;
gazebo::transport::NodePtr cameraNode;
gazebo::transport::SubscriberPtr cameraSub;
gazebo::transport::NodePtr collisionNode;
gazebo::transport::SubscriberPtr collisionSub;
};
}
#endif
| 1,153 |
311 | package com.datadog.appsec.event.data;
import java.util.concurrent.atomic.AtomicInteger;
/** @param <T> the type of data associated with the address */
public final class Address<T> {
private static final int MAX_SERIAL = 0x3FFF;
private static final AtomicInteger NEXT_SERIAL = new AtomicInteger();
private final String key;
private final int serial;
// instances are created in KnownAddresses
Address(String key) {
this.key = key;
this.serial = NEXT_SERIAL.getAndIncrement();
assert (this.serial <= MAX_SERIAL);
}
public static int instanceCount() {
return NEXT_SERIAL.get();
}
public String getKey() {
return key;
}
public int getSerial() {
return serial;
}
// do not replace equals/hashcode
// equality is identity
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Address{");
sb.append("key='").append(key).append('\'');
sb.append('}');
return sb.toString();
}
}
| 321 |
1,936 | #include <memory>
#include <unordered_map>
#include <vector>
#include <Eigen/Core>
#include <aslam/cameras/camera-pinhole.h>
#include <aslam/cameras/distortion-fisheye.h>
#include <aslam/common/hash-id.h>
#include <aslam/frames/visual-frame.h>
#include <ceres-error-terms/parameterization/pose-param-jpl.h>
#include <ceres-error-terms/parameterization/quaternion-param-jpl.h>
#include <ceres-error-terms/visual-error-term.h>
#include <maplab-common/pose_types.h>
#include <maplab-common/quaternion-math.h>
#include <maplab-common/test/testing-entrypoint.h>
#include <maplab-common/test/testing-predicates.h>
#include <vi-map/landmark.h>
#include <vi-map/mission.h>
#include <vi-map/pose-graph.h>
#include <vi-map/vertex.h>
#include <vi-map/viwls-edge.h>
namespace map_optimization_legacy {
class ViwlsGraph : public testing::Test {
protected:
typedef aslam::FisheyeDistortion DistortionType;
typedef aslam::PinholeCamera CameraType;
virtual void SetUp() {
distortion_param_ = 1.0;
fu_ = 10;
fv_ = 10;
res_u_ = 640;
res_v_ = 480;
cu_ = res_u_ / 2.0;
cv_ = res_v_ / 2.0;
dummy_7d_0_.setZero();
dummy_7d_1_.setZero();
base_frame_.setZero();
G_q_B_ << 1, 0, 0, 0;
G_p_B_.setZero();
}
void solve();
void copyDataFromPosegraph();
void copyDataToPosegraph();
void constructCamera() {
Eigen::VectorXd distortion_parameters(1);
distortion_parameters << distortion_param_;
aslam::Distortion::UniquePtr distortion(
new DistortionType(distortion_parameters));
Eigen::VectorXd intrinsics(4);
intrinsics << fu_, fv_, cu_, cv_;
aslam::Camera::Ptr camera = std::shared_ptr<CameraType>(
new CameraType(intrinsics, res_u_, res_v_, distortion));
aslam::CameraId camera_id;
common::generateId(&camera_id);
camera->setId(camera_id);
std::vector<aslam::Camera::Ptr> camera_vector;
camera_vector.push_back(camera);
aslam::TransformationVector T_C_B_vector;
// We use identity transformation to T_C_B from default constructor.
T_C_B_vector.resize(kNumCameras);
aslam::NCameraId n_camera_id;
common::generateId(&n_camera_id);
cameras_.reset(
new aslam::NCamera(
n_camera_id, T_C_B_vector, camera_vector, "Test camera rig"));
}
vi_map::PoseGraph posegraph_;
std::vector<pose_graph::VertexId> vertex_ids_;
std::unordered_map<vi_map::LandmarkId, vi_map::Landmark::Ptr> landmarks_;
std::vector<vi_map::LandmarkId> landmark_ids_;
std::unordered_map<vi_map::MissionId, std::shared_ptr<vi_map::VIMission> >
missions_;
aslam::NCamera::Ptr cameras_;
double distortion_param_;
double fu_, fv_;
double cu_, cv_;
double res_u_, res_v_;
Eigen::Vector4d G_q_B_;
Eigen::Vector3d G_p_B_;
// Containers for 7-element pose objects.
Eigen::Matrix<double, 7, 1> base_frame_;
typedef std::unordered_map<pose_graph::VertexId, int> VertexIdRotationMap;
VertexIdRotationMap vertex_id_to_pose_idx_;
Eigen::Matrix<double, 7, Eigen::Dynamic> keyframe_poses_;
// Ordering is [orientation position] -> [xyzw xyz].
Eigen::Matrix<double, 7, 1> dummy_7d_0_;
Eigen::Matrix<double, 7, 1> dummy_7d_1_;
static constexpr int kNumCameras = 1;
static constexpr int kVisualFrameIndex = 0;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
void ViwlsGraph::copyDataFromPosegraph() {
pose_graph::VertexIdList all_vertex_ids;
posegraph_.getAllVertexIds(&all_vertex_ids);
keyframe_poses_.resize(Eigen::NoChange, all_vertex_ids.size());
unsigned int col_idx = 0;
for (pose_graph::VertexId vertex_id : all_vertex_ids) {
vi_map::Vertex* ba_vertex = dynamic_cast<vi_map::Vertex*>(
posegraph_.getVertexPtrMutable(vertex_id));
CHECK(ba_vertex) << "Couldn't cast to BA edge type.";
vertex_id_to_pose_idx_.emplace(vertex_id, col_idx);
keyframe_poses_.col(col_idx) << ba_vertex->get_q_M_I().coeffs(),
ba_vertex->get_p_M_I();
++col_idx;
}
base_frame_ << G_q_B_, G_p_B_;
}
void ViwlsGraph::copyDataToPosegraph() {
for (const VertexIdRotationMap::value_type& vertex_id_to_idx :
vertex_id_to_pose_idx_) {
vi_map::Vertex* ba_vertex = dynamic_cast<vi_map::Vertex*>(
posegraph_.getVertexPtrMutable(vertex_id_to_idx.first));
CHECK(ba_vertex) << "Couldn't cast to BA edge type.";
Eigen::Map<Eigen::Vector3d> position(ba_vertex->get_p_M_I_Mutable());
Eigen::Map<Eigen::Vector4d> rotation(ba_vertex->get_q_M_I_Mutable());
position = keyframe_poses_.col(vertex_id_to_idx.second).tail(3);
rotation = keyframe_poses_.col(vertex_id_to_idx.second).head(4);
}
G_q_B_ = base_frame_.head(4);
G_p_B_ = base_frame_.tail(3);
}
void ViwlsGraph::solve() {
ceres::Problem problem;
ceres::LocalParameterization* quaternion_parameterization =
new ceres_error_terms::JplQuaternionParameterization;
ceres::LocalParameterization* pose_parameterization =
new ceres_error_terms::JplPoseParameterization;
pose_graph::VertexIdList all_vertex_ids;
posegraph_.getAllVertexIds(&all_vertex_ids);
CHECK(!all_vertex_ids.empty()) << "No vertices on the posegraph";
copyDataFromPosegraph();
for (const pose_graph::VertexId& vertex_id : all_vertex_ids) {
vi_map::Vertex* ba_vertex = dynamic_cast<vi_map::Vertex*>(
posegraph_.getVertexPtrMutable(vertex_id));
CHECK(ba_vertex) << "Couldn't cast to BA edge type.";
const Eigen::Matrix2Xd& image_points_distorted =
ba_vertex->getVisualFrame(kVisualFrameIndex).getKeypointMeasurements();
const Eigen::VectorXd& image_points_uncertainties =
ba_vertex->getVisualFrame(kVisualFrameIndex)
.getKeypointMeasurementUncertainties();
const std::shared_ptr<CameraType> camera_ptr =
std::dynamic_pointer_cast<CameraType>(
ba_vertex->getCamera(kVisualFrameIndex));
CHECK(camera_ptr != nullptr);
// Retrieve keyframe pose idx.
VertexIdRotationMap::const_iterator it;
it = vertex_id_to_pose_idx_.find(vertex_id);
CHECK(it != vertex_id_to_pose_idx_.end());
int pose_idx = it->second;
for (int i = 0; i < image_points_distorted.cols(); ++i) {
ceres::CostFunction* visual_term_cost =
new ceres_error_terms::VisualReprojectionError<CameraType,
DistortionType>(
image_points_distorted.col(i), image_points_uncertainties(i),
ceres_error_terms::visual::VisualErrorType::kLocalMission,
camera_ptr.get());
problem.AddResidualBlock(
visual_term_cost, NULL,
landmarks_
.find(ba_vertex->getObservedLandmarkId(kVisualFrameIndex, i))
->second->get_p_B_Mutable(),
base_frame_.data(), dummy_7d_0_.data(), dummy_7d_1_.data(),
keyframe_poses_.col(pose_idx).data(),
cameras_->get_T_C_B_Mutable(kVisualFrameIndex)
.getRotation()
.toImplementation()
.coeffs()
.data(),
cameras_->get_T_C_B_Mutable(kVisualFrameIndex).getPosition().data(),
camera_ptr->getParametersMutable(),
camera_ptr->getDistortionMutable()->getParametersMutable());
problem.SetParameterBlockConstant(
landmarks_
.find(ba_vertex->getObservedLandmarkId(kVisualFrameIndex, i))
->second->get_p_B_Mutable());
}
// We fix the dummy parameter blocks because
// they have no meaning.
problem.SetParameterBlockConstant(dummy_7d_0_.data());
problem.SetParameterBlockConstant(dummy_7d_1_.data());
problem.SetParameterBlockConstant(
cameras_->get_T_C_B_Mutable(kVisualFrameIndex)
.getRotation()
.toImplementation()
.coeffs()
.data());
problem.SetParameterBlockConstant(
cameras_->get_T_C_B_Mutable(kVisualFrameIndex).getPosition().data());
problem.SetParameterBlockConstant(camera_ptr->getParametersMutable());
problem.SetParameterBlockConstant(
camera_ptr->getDistortionMutable()->getParametersMutable());
problem.SetParameterization(
keyframe_poses_.col(pose_idx).data(), pose_parameterization);
problem.SetParameterization(
cameras_->get_T_C_B_Mutable(kVisualFrameIndex)
.getRotation()
.toImplementation()
.coeffs()
.data(),
quaternion_parameterization);
}
problem.SetParameterBlockConstant(base_frame_.data());
problem.SetParameterization(base_frame_.data(), pose_parameterization);
ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_SCHUR;
options.minimizer_progress_to_stdout = false;
options.max_num_iterations = 200;
options.gradient_tolerance = 1e-20;
options.function_tolerance = 1e-20;
options.parameter_tolerance = 1e-20;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
LOG(INFO) << summary.BriefReport();
copyDataToPosegraph();
}
TEST_F(ViwlsGraph, VisualBundleAdjTestCamPositionOptimization) {
constructCamera();
vi_map::MissionId mission_id;
mission_id.fromHexString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa0");
std::shared_ptr<vi_map::VIMission> mission_ptr(new vi_map::VIMission);
mission_ptr->setId(mission_id);
missions_.insert(std::make_pair(mission_id, mission_ptr));
vi_map::Landmark::Ptr landmark_ptr(new vi_map::Landmark);
vi_map::LandmarkId landmark_id;
landmark_id.fromHexString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1");
landmark_ptr->setId(landmark_id);
landmark_ptr->set_p_B(pose::Position3D(0, 0, 1));
landmark_ptr->set_p_B_Covariance(Eigen::Matrix<double, 3, 3>::Identity());
landmarks_.insert(std::make_pair(landmark_ptr->id(), landmark_ptr));
landmark_ids_.push_back(landmark_ptr->id());
Eigen::Matrix<double, 6, 1> imu_bias;
imu_bias << 1, 2, 3, 4, 5, 6;
Eigen::Matrix<double, 2, 1> points;
points << cu_, cv_;
Eigen::VectorXd uncertainties;
uncertainties.resize(1);
uncertainties << 0.5;
aslam::VisualFrame::DescriptorsT descriptors;
descriptors.resize(48, 1);
descriptors.setRandom();
pose_graph::VertexId vertex0;
vertex0.fromHexString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2");
vertex_ids_.push_back(vertex0);
aslam::FrameId frame_id;
frame_id.fromHexString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3");
int64_t frame_timestamp = 0;
// Base position and rotation.
G_q_B_ << 0, 0, 0, 1;
G_p_B_ << 0, 0, 0;
posegraph_.addVIVertex(
vertex0, imu_bias, points, uncertainties, descriptors, landmark_ids_,
mission_ptr->id(), frame_id, frame_timestamp, cameras_);
EXPECT_TRUE(posegraph_.vertexExists(vertex0));
Eigen::Vector3d vertex_position(0.2, -0.3, 0.2);
Eigen::Quaterniond vertex_orientation(1, 0, 0, 0);
vi_map::Vertex* ba_vertex =
dynamic_cast<vi_map::Vertex*>(posegraph_.getVertexPtrMutable(vertex0));
CHECK_NOTNULL(ba_vertex);
ba_vertex->set_q_M_I(vertex_orientation);
ba_vertex->set_p_M_I(vertex_position);
solve();
const Eigen::Quaterniond& G_q_I = ba_vertex->get_q_M_I();
const Eigen::Vector3d& G_p_I = ba_vertex->get_p_M_I();
Eigen::Matrix3d I_R_G = G_q_I.toRotationMatrix().transpose();
const Eigen::Vector3d C_p_fi = I_R_G * (landmark_ptr->get_p_B() - G_p_I);
// We don't really care about exact keyframe pose, but we do care
// about landmark location in the camera coordinate frame: it needs to be
// located on the optical axis.
EXPECT_NEAR_EIGEN(C_p_fi.normalized(), Eigen::Vector3d(0, 0, 1), 1e-9);
}
} // namespace map_optimization_legacy
MAPLAB_UNITTEST_ENTRYPOINT
| 4,813 |
492 | import click
from flask.cli import with_appcontext
from .extensions import guard, db
from .models import User
@click.command(name='create_database')
@with_appcontext
def create_database():
db.create_all()
@click.command(name='create_users')
@with_appcontext
def create_users():
one = User(username='One', password=guard.hash_password('<PASSWORD>'))
two = User(username='Two', password=guard.hash_password('<PASSWORD>'))
three = User(username='Three', password=guard.hash_password('<PASSWORD>'))
db.session.add_all([one, two, three])
db.session.commit() | 219 |
474 | <reponame>chenqian-dev/QNShortVideo-TuTu<filename>QNShortVideo-With-TuTu-iOS/PLShortVideoKit-master/Example/PLShortVideoKitDemo/UI+Tools/PLSClipAudioView.h
//
// PLSClipAudioView.h
// PLShortVideoKitDemo
//
// Created by suntongmian on 2017/6/21.
// Copyright © 2017年 Pili Engineering, Qiniu Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#import <CoreMedia/CoreMedia.h>
@class PLSClipAudioView;
@protocol PLSClipAudioViewDelegate <NSObject>
@optional
- (void)clipAudioView:(PLSClipAudioView *)clipAudioView musicTimeRangeChangedTo:(CMTimeRange)musicTimeRange;
@end
@interface PLSClipAudioView : UIView
@property (weak, nonatomic) id<PLSClipAudioViewDelegate> delegate;
- (id)initWithMuiscURL:(NSURL *)url timeRange:(CMTimeRange)currentMusicTimeRange;
- (void)showAtView:(UIView *)view;
@end
| 316 |
1,143 | from typing import Dict, Optional, List, Any
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("text_classifier")
class TextClassifier(Model):
"""
Implements a basic text classifier:
1) Embed tokens using `text_field_embedder`
2) Seq2SeqEncoder, e.g. BiLSTM
3) Append the first and last encoder states
4) Final feedforward layer
Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self.text_field_embedder(text)
mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, mask)
pooled = self.pool(encoded_text, mask)
ff_hidden = self.classifier_feedforward(pooled)
logits = self.prediction_layer(ff_hidden)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
if label is not None:
loss = self.loss(logits, label)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, label)
self.label_accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probs'] = class_probabilities
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
if self.verbose_metrics:
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names)
average_f1 = sum_f1 / total_len
metric_dict['average_F1'] = average_f1
metric_dict['accuracy'] = self.label_accuracy.get_metric(reset)
return metric_dict
| 2,203 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _DBAUI_DBADMINIMPL_HXX_
#define _DBAUI_DBADMINIMPL_HXX_
#ifndef _COM_SUN_STAR_LANG_XMULTISERVICEFACTORY_HPP_
#include <com/sun/star/lang/XMultiServiceFactory.hpp>
#endif
#ifndef _COM_SUN_STAR_CONTAINER_XNAMEACCESS_HPP_
#include <com/sun/star/container/XNameAccess.hpp>
#endif
#ifndef _COM_SUN_STAR_UNO_XNAMINGSERVICE_HPP_
#include <com/sun/star/uno/XNamingService.hpp>
#endif
#ifndef _COM_SUN_STAR_BEANS_XPROPERTYSET_HPP_
#include <com/sun/star/beans/XPropertySet.hpp>
#endif
#ifndef _COM_SUN_STAR_BEANS_PROPERTYVALUE_HPP_
#include <com/sun/star/beans/PropertyValue.hpp>
#endif
#ifndef _COM_SUN_STAR_SDBC_XCONNECTION_HPP_
#include <com/sun/star/sdbc/XConnection.hpp>
#endif
#ifndef _COM_SUN_STAR_SDBC_XDRIVER_HPP_
#include <com/sun/star/sdbc/XDriver.hpp>
#endif
#ifndef _COMPHELPER_STLTYPES_HXX_
#include <comphelper/stl_types.hxx>
#endif
#ifndef _DBAUI_DSNTYPES_HXX_
#include "dsntypes.hxx"
#endif
#ifndef _SFXITEMSET_HXX
#include <svl/itemset.hxx>
#endif
#ifndef _COM_SUN_STAR_FRAME_XMODEL_HPP_
#include <com/sun/star/frame/XModel.hpp>
#endif
#include <svl/poolitem.hxx>
class Window;
//.........................................................................
namespace dbaui
{
//.........................................................................
class DataSourceInfoConverter
{
::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > m_xFactory;
public:
DataSourceInfoConverter(const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >& _xFactory)
:m_xFactory(_xFactory)
{
}
void convert(const ::dbaccess::ODsnTypeCollection* _pCollection,const ::rtl::OUString& _sOldURLPrefix,const ::rtl::OUString& _sNewURLPrefix,const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >& _xDatasource);
};
class IItemSetHelper;
//========================================================================
//= ODbDataSourceAdministrationHelper
//========================================================================
class ODbDataSourceAdministrationHelper
{
public:
DECLARE_STL_MAP(sal_Int32, ::rtl::OUString, ::std::less< sal_Int32 >, MapInt2String);
private:
::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >
m_xORB; /// service factory
::com::sun::star::uno::Reference< ::com::sun::star::container::XNameAccess >
m_xDatabaseContext; /// database context we're working in
::com::sun::star::uno::Reference< ::com::sun::star::uno::XNamingService >
m_xDynamicContext; /// just another interface of the context ...
::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > m_xDatasource;
::com::sun::star::uno::Reference< ::com::sun::star::frame::XModel > m_xModel;
::com::sun::star::uno::Any m_aDataSourceOrName;
typedef ::std::set< ::rtl::OUString > StringSet;
typedef StringSet::const_iterator ConstStringSetIterator;
MapInt2String m_aDirectPropTranslator; /// translating property id's into names (direct properties of a data source)
MapInt2String m_aIndirectPropTranslator; /// translating property id's into names (indirect properties of a data source)
Window* m_pParent;
IItemSetHelper* m_pItemSetHelper;
public:
ODbDataSourceAdministrationHelper(const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >& _xORB
,Window* _pParent
,IItemSetHelper* _pItemSetHelper);
/** translate the current dialog SfxItems into driver relevant PropertyValues
@see successfullyConnected
*/
sal_Bool getCurrentSettings(::com::sun::star::uno::Sequence< ::com::sun::star::beans::PropertyValue >& _rDriverParams);
/** to be called if the settings got from getCurrentSettings have been used for successfully connecting
@see getCurrentSettings
*/
void successfullyConnected();
/// clear the password in the current data source's item set
void clearPassword();
inline ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > getORB() const { return m_xORB; }
::com::sun::star::uno::Reference< ::com::sun::star::container::XNameAccess > getDatabaseContext() const { return m_xDatabaseContext; }
::com::sun::star::uno::Reference< ::com::sun::star::uno::XNamingService > getDynamicContext() const { return m_xDynamicContext; }
/** creates a new connection. The caller is responsible to dispose it !!!!
*/
::std::pair< ::com::sun::star::uno::Reference< ::com::sun::star::sdbc::XConnection >,sal_Bool> createConnection();
/** return the corresponding driver for the selected URL
*/
::com::sun::star::uno::Reference< ::com::sun::star::sdbc::XDriver > getDriver();
::com::sun::star::uno::Reference< ::com::sun::star::sdbc::XDriver > getDriver(const ::rtl::OUString& _sURL);
/** returns the data source the dialog is currently working with
*/
::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > getCurrentDataSource();
// returns the Url of a database document
String getDocumentUrl(SfxItemSet& _rDest);
void setDataSourceOrName( const ::com::sun::star::uno::Any& _rDataSourceOrName );
/** extracts the connection type from the given set<p/>
The connection type is determined by the value of the DSN item, analyzed by the TypeCollection item.
*/
static ::rtl::OUString getDatasourceType( const SfxItemSet& _rSet );
/** returns the connection URL
@return
The connection URL
*/
String getConnectionURL() const;
/// fill the necessary information from the url line
void convertUrl(SfxItemSet& _rDest);
const MapInt2String& getIndirectProperties() const { return m_aIndirectPropTranslator; }
/** translates properties of an UNO data source into SfxItems
@param _rxSource
The data source
@param _rDest
The item set to fill.
*/
void translateProperties(
const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >& _rxSource,
SfxItemSet& _rDest);
/** translate SfxItems into properties of an UNO data source
@param _rSource
The item set to read from.
@param _rxDest
The data source to fill.
*/
void translateProperties(
const SfxItemSet& _rSource,
const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >& _rxDest);
sal_Bool saveChanges(const SfxItemSet& _rSource);
protected:
/** fill a data source info array with the settings from a given item set
*/
void fillDatasourceInfo(const SfxItemSet& _rSource, ::com::sun::star::uno::Sequence< ::com::sun::star::beans::PropertyValue >& _rInfo);
/// translate the given value into an SfxPoolItem, put this into the given set under the given id
void implTranslateProperty(SfxItemSet& _rSet, sal_Int32 _nId, const ::com::sun::star::uno::Any& _rValue);
/// translate the given SfxPoolItem into an <type scope="com.sun.star.Any">uno</type>
::com::sun::star::uno::Any implTranslateProperty(const SfxPoolItem* _pItem);
/// translate the given SfxPoolItem into an <type scope="com.sun.star.Any">uno</type>, set it (under the given name) on the given property set
void implTranslateProperty(const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >& _rxSet, const ::rtl::OUString& _rName, const SfxPoolItem* _pItem);
/** check if the data source described by the given set needs authentication<p/>
The return value depends on the data source type only.
*/
sal_Bool hasAuthentication(const SfxItemSet& _rSet) const;
#ifdef DBG_UTIL
::rtl::OString translatePropertyId( sal_Int32 _nId );
#endif
};
//.........................................................................
} // namespace dbaui
//.........................................................................
#endif // _DBAUI_DBADMINIMPL_HXX_
| 3,016 |
3,276 | <gh_stars>1000+
#!/usr/bin/env python3
# coding: utf-8
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import dlib
from utils.ddfa import ToTensorGjz, NormalizeGjz
import scipy.io as sio
from utils.inference import (
parse_roi_box_from_landmark,
crop_img,
predict_68pts,
predict_dense,
)
from utils.cv_plot import plot_kpt
from utils.render import get_depths_image, cget_depths_image, cpncc
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 0. open video
# vc = cv2.VideoCapture(str(args.video) if len(args.video) == 1 else args.video)
vc = cv2.VideoCapture(args.video if int(args.video) != 0 else 0)
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)[
'state_dict'
]
model = getattr(mobilenet_v1, arch)(
num_classes=62
) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# 2. load dlib model for face detection and landmark used for face cropping
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
face_detector = dlib.get_frontal_face_detector()
# 3. forward
success, frame = vc.read()
last_frame_pts = []
while success:
if len(last_frame_pts) == 0:
rects = face_detector(frame, 1)
for rect in rects:
pts = face_regressor(frame, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
last_frame_pts.append(pts)
vertices_lst = []
for lmk in last_frame_pts:
roi_box = parse_roi_box_from_landmark(lmk)
img = crop_img(frame, roi_box)
img = cv2.resize(
img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR
)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
vertex = predict_dense(param, roi_box)
lmk[:] = pts68[:2]
vertices_lst.append(vertex)
pncc = cpncc(frame, vertices_lst, tri - 1) / 255.0
frame = frame / 255.0 * (1.0 - pncc)
cv2.imshow('3ddfa', frame)
cv2.waitKey(1)
success, frame = vc.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument(
'-v',
'--video',
default='0',
type=str,
help='video file path or opencv cam index',
)
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
args = parser.parse_args()
main(args)
| 1,643 |
8,747 | <reponame>lovyan03/esp-idf
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/gpio_sig_map.h"
#include "soc/dedic_gpio_periph.h"
const dedic_gpio_signal_conn_t dedic_gpio_periph_signals = {
.module = -1,
.irq = -1,
.cores = {
[0] = {
.in_sig_per_channel = {
[0] = CPU_GPIO_IN0_IDX,
[1] = CPU_GPIO_IN1_IDX,
[2] = CPU_GPIO_IN2_IDX,
[3] = CPU_GPIO_IN3_IDX,
[4] = CPU_GPIO_IN4_IDX,
[5] = CPU_GPIO_IN5_IDX,
[6] = CPU_GPIO_IN6_IDX,
[7] = CPU_GPIO_IN7_IDX,
},
.out_sig_per_channel = {
[0] = CPU_GPIO_OUT0_IDX,
[1] = CPU_GPIO_OUT1_IDX,
[2] = CPU_GPIO_OUT2_IDX,
[3] = CPU_GPIO_OUT3_IDX,
[4] = CPU_GPIO_OUT4_IDX,
[5] = CPU_GPIO_OUT5_IDX,
[6] = CPU_GPIO_OUT6_IDX,
[7] = CPU_GPIO_OUT7_IDX,
}
},
},
};
| 748 |
688 | <filename>Java/libraries/recognizers-text-date-time/src/main/java/com/microsoft/recognizers/text/datetime/parsers/config/BaseDateParserConfiguration.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.microsoft.recognizers.text.datetime.parsers.config;
import com.google.common.collect.ImmutableMap;
import com.microsoft.recognizers.text.datetime.DateTimeOptions;
import com.microsoft.recognizers.text.datetime.config.BaseOptionsConfiguration;
import com.microsoft.recognizers.text.datetime.resources.BaseDateTime;
public abstract class BaseDateParserConfiguration extends BaseOptionsConfiguration implements ICommonDateTimeParserConfiguration {
protected BaseDateParserConfiguration(DateTimeOptions options) {
super(options);
}
@Override
public ImmutableMap<String, Integer> getDayOfMonth() {
return BaseDateTime.DayOfMonthDictionary;
}
}
| 268 |
1,555 | <reponame>dinithiravi/c
int
main()
{
int arr[2];
arr[1] = 2;
if(arr[1] != 2)
return 1;
return 0;
}
| 61 |
841 | /*
* Tencent is pleased to support the open source community by making Pebble available.
* Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*
*/
#ifndef PEBBLE_DR_INTERNAL_FIELDPACK_H
#define PEBBLE_DR_INTERNAL_FIELDPACK_H
#include "framework/dr/protocol/binary_protocol.h"
#include "framework/dr/transport/virtual_transport.h"
namespace pebble { namespace dr { namespace internal {
class ArrayOutOfBoundsException : public std::exception {
};
// 仅供内部使用
class PackerBuffer : public pebble::dr::transport::TVirtualTransport<PackerBuffer> {
public:
PackerBuffer()
: m_buf(NULL),
m_buf_bound(NULL),
m_buf_pos(NULL) {
}
uint32_t read(uint8_t* buf, uint32_t len);
uint32_t readAll(uint8_t* buf, uint32_t len);
void write(const uint8_t* buf, uint32_t len);
void reset(uint8_t *buf, uint32_t buf_len);
int32_t used();
private:
uint8_t *m_buf;
uint8_t *m_buf_bound;
uint8_t *m_buf_pos;
};
// 仅供内部使用
class FieldPackGlobal {
protected:
FieldPackGlobal();
FieldPackGlobal(const FieldPackGlobal& rhs) {}
public:
static FieldPackGlobal* Instance() {
static FieldPackGlobal s_field_pack_instance;
return &s_field_pack_instance;
}
cxx::shared_ptr<pebble::dr::protocol::TBinaryProtocol> Protocol() {
return protocol;
}
void reset(uint8_t *buf, uint32_t buf_len);
int32_t used();
private:
cxx::shared_ptr<PackerBuffer> packer_buffer;
cxx::shared_ptr<pebble::dr::protocol::TBinaryProtocol> protocol;
};
} // namespace internal
} // namespace dr
} // namespace pebble
#endif
| 915 |
416 | <gh_stars>100-1000
/* NSOrderedCollectionDifference.h
Copyright (c) 2017-2019, Apple Inc. All rights reserved.
*/
@class NSArray<ObjectType>;
#import <Foundation/NSOrderedCollectionChange.h>
#import <Foundation/NSIndexSet.h>
#import <Foundation/NSEnumerator.h>
NS_ASSUME_NONNULL_BEGIN
/// Options supported by methods that produce difference objects.
typedef NS_OPTIONS(NSUInteger, NSOrderedCollectionDifferenceCalculationOptions) {
/// Insertion changes do not store a reference to the inserted object.
NSOrderedCollectionDifferenceCalculationOmitInsertedObjects = (1 << 0UL),
/// Insertion changes do not store a reference to the removed object.
NSOrderedCollectionDifferenceCalculationOmitRemovedObjects = (1 << 1UL),
/// Assume objects that were uniquely removed and inserted were moved.
/// This is useful when diffing based on identity instead of equality.
NSOrderedCollectionDifferenceCalculationInferMoves = (1 << 2UL)
} API_AVAILABLE(macosx(10.15), ios(13.0), watchos(6.0), tvos(13.0));
API_AVAILABLE(macosx(10.15), ios(13.0), watchos(6.0), tvos(13.0))
@interface NSOrderedCollectionDifference<ObjectType> : NSObject <NSFastEnumeration>
#ifndef __OBJC2__
{
@private
id _removeIndexes;
id _removeObjects;
id _insertIndexes;
id _insertObjects;
id _moves;
}
#endif // !__OBJC2__
/// Creates a new difference representing the changes in the parameter.
///
/// For clients interested in the difference between two collections, the
/// collection's differenceFrom method should be used instead.
///
/// To guarantee that instances are unambiguous and safe for compatible base
/// states, this method requires that its parameter conform to the following
/// requirements:
///
/// 1) All insertion offsets are unique
/// 2) All removal offsets are unique
/// 3) All associated indexes match a change with the opposite parity.
- (instancetype)initWithChanges:(NSArray<NSOrderedCollectionChange<ObjectType> *> *)changes;
- (instancetype)initWithInsertIndexes:(NSIndexSet *)inserts
insertedObjects:(nullable NSArray<ObjectType> *)insertedObjects
removeIndexes:(NSIndexSet *)removes
removedObjects:(nullable NSArray<ObjectType> *)removedObjects
additionalChanges:(NSArray<NSOrderedCollectionChange<ObjectType> *> *)changes NS_DESIGNATED_INITIALIZER;
- (instancetype)initWithInsertIndexes:(NSIndexSet *)inserts
insertedObjects:(nullable NSArray<ObjectType> *)insertedObjects
removeIndexes:(NSIndexSet *)removes
removedObjects:(nullable NSArray<ObjectType> *)removedObjects;
@property (strong, readonly) NSArray<NSOrderedCollectionChange<ObjectType> *> *insertions API_AVAILABLE(macosx(10.15), ios(13.0), watchos(6.0), tvos(13.0));
@property (strong, readonly) NSArray<NSOrderedCollectionChange<ObjectType> *> *removals API_AVAILABLE(macosx(10.15), ios(13.0), watchos(6.0), tvos(13.0));
@property (assign, readonly) BOOL hasChanges;
// Create a new difference by mapping over this difference's members
- (NSOrderedCollectionDifference<id> *)differenceByTransformingChangesWithBlock:(NSOrderedCollectionChange<id> *(NS_NOESCAPE ^)(NSOrderedCollectionChange<ObjectType> *))block;
// Returns a difference that is the inverse of the receiver.
//
// In other words, given a valid difference `diff` the array `a` is equal to
// [[a arrayByApplyingDifference:diff] arrayByApplyingDifference:diff.inverseDifference]
//
// To revert a chronological sequence of diffs, apply their inverses in reverse order.
- (instancetype)inverseDifference API_AVAILABLE(macosx(10.15), ios(13.0), watchos(6.0), tvos(13.0));
@end
NS_ASSUME_NONNULL_END
| 1,280 |
4,013 | <gh_stars>1000+
'''配置文件'''
import os
'''游戏界面一些数值'''
SCREENWIDTH = 640
SCREENHEIGHT = 480
BRICKWIDTH = 10
BRICKHEIGHT = 10
PADDLEWIDTH = 60
PADDLEHEIGHT = 12
BALLRADIUS = 8
'''游戏素材路径'''
FONTPATH = os.path.join(os.getcwd(), 'resources/font/font.TTF')
HITSOUNDPATH = os.path.join(os.getcwd(), 'resources/audios/hit.wav')
BGMPATH = os.path.join(os.getcwd(), 'resources/audios/bgm.mp3')
LEVELROOTPATH = os.path.join(os.getcwd(), 'resources/levels')
LEVELPATHS = [os.path.join(LEVELROOTPATH, '%s.level' % str(i+1)) for i in range(len(os.listdir(LEVELROOTPATH)))]
'''一些颜色'''
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
PINK = (212, 149, 174)
PURPLE = (168, 152, 191)
YELLOW = (245, 237, 162)
BLUE = (51, 170, 230)
AQUA = (182, 225, 225) | 384 |
1,921 | #pragma once
enum class confirm_prompt_type
{
cancel, // automatically answer 'no', i.e. disallow
prompt, // prompt
none, // automatically answer 'yes'
};
| 96 |
407 | package com.alibaba.tesla.appmanager.server.addon.req;
import com.alibaba.tesla.appmanager.domain.schema.CustomAddonSchema;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
import java.util.Map;
/**
* @ClassName: ApplyCustomAddonInstanceReq
* @Author: dyj
* @DATE: 2020-12-23
* @Description:
**/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ApplyCustomAddonInstanceReq implements Serializable {
private static final long serialVersionUID = 2637914125447355377L;
/**
* 命名空间 ID
*/
private String namespaceId;
/**
* Addon ID
*/
private String addonId;
/**
* Creator
*/
private String creator;
/**
* Addon Version
*/
private String addonVersion;
/**
* Addon Name (namespaceId, addonId, addonVersion 下唯一)
*/
private String addonName;
/**
* Addon 属性字典
*/
private Map<String, String> addonAttrs;
/**
* Custom Addon Schema
*/
private CustomAddonSchema customAddonSchema;
}
| 446 |
372 | <filename>dcerpc/ncklib/cnassoc.h<gh_stars>100-1000
/*
*
* (c) Copyright 1990 OPEN SOFTWARE FOUNDATION, INC.
* (c) Copyright 1990 HEWLETT-PACKARD COMPANY
* (c) Copyright 1990 DIGITAL EQUIPMENT CORPORATION
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this
* file for any purpose is hereby granted without fee, provided that
* the above copyright notices and this notice appears in all source
* code copies, and that none of the names of Open Software
* Foundation, Inc., Hewlett-Packard Company, or Digital Equipment
* Corporation be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Neither Open Software Foundation, Inc., Hewlett-
* Packard Company, nor Digital Equipment Corporation makes any
* representations about the suitability of this software for any
* purpose.
*
*/
/*
*/
#ifndef _CNASSOC_H
#define _CNASSOC_H 1
/*
**
** NAME
**
** cnassoc.h
**
** FACILITY:
**
** Remote Procedure Call (RPC)
**
** ABSTRACT:
**
** Interface to the NCA Connection Protocol Service's Association Service.
**
**
*/
/******************************************************************************/
/*
* R P C _ C N _ A S S O C _ A C B _ I N C _ R E F
*/
#ifdef RPC_CN_DEBUG_REFCNT
static void RPC_CN_ASSOC_ACB_INC_REF(rpc_cn_assoc_t *assoc)
{
(assoc)->assoc_acb_ref_count++;
RPC_DBG_PRINTF (rpc_e_dbg_general, RPC_C_CN_DBG_GENERAL,
("(RPC_CN_ASSOC_ACB_INC_REF) assoc->%x new refcnt->%d\n",
assoc, assoc->assoc_acb_ref_count));
}
#else
#define RPC_CN_ASSOC_ACB_INC_REF(assoc) (assoc)->assoc_acb_ref_count++;
#endif /* RPC_CN_DEBUG_REFCNT */
/*
* R P C _ C N _ A S S O C _ A C B _ D E C _ R E F
*/
#ifdef RPC_CN_DEBUG_REFCNT
static void RPC_CN_ASSOC_ACB_DEC_REF(rpc_cn_assoc_t *assoc)
{
(assoc)->assoc_acb_ref_count--;
RPC_DBG_PRINTF (rpc_e_dbg_general, RPC_C_CN_DBG_GENERAL,
("(RPC_CN_ASSOC_ACB_DEC_REF) assoc->%x new refcnt->%d\n",
assoc, assoc->assoc_acb_ref_count));
}
#else
#define RPC_CN_ASSOC_ACB_DEC_REF(assoc) (assoc)->assoc_acb_ref_count--;
#endif /* RPC_CN_DEBUG_REFCNT */
/*
* R P C _ C N _ A S S O C _ G R P
*/
#define RPC_CN_ASSOC_GRP(grp_id)\
(RPC_CN_LOCAL_ID_VALID (grp_id)) ?\
&rpc_g_cn_assoc_grp_tbl.assoc_grp_vector[(grp_id).parts.id_index] : NULL;
/*
* R P C _ C N _ A S S O C _ S Y N T A X _ E Q U A L
*/
#if (uuid_c_version == 1)
#define RPC_CN_ASSOC_SYNTAX_EQUAL(s1, s2, st)\
((memcmp (&((s1)->id), &((s2)->id), sizeof (dce_uuid_t)) == 0) &&\
(((s1)->version & 0xFFFF) == ((s2)->version & 0xFFFF)) &&\
(((s1)->version >> 16) == ((s2)->version >> 16)))
#else
error "***Make sure memcmp works on this version of UUIDs***"
#endif
/*
* R P C _ C N _ A S S O C _ C A L L
*/
#define RPC_CN_ASSOC_CALL(assoc) (assoc)->call_rep
/*
* R P C _ C N _ A S S O C _ M A X _ X M I T _ F R A G
*/
#define RPC_CN_ASSOC_MAX_XMIT_FRAG(assoc) (assoc)->assoc_max_xmit_frag
/*
* R P C _ C N _ A S S O C _ M A X _ R E C V _ F R A G
*/
#define RPC_CN_ASSOC_MAX_RECV_FRAG(assoc) (assoc)->assoc_max_recv_frag
/*
* R P C _ C N _ A S S O C _ C O N T E X T _ I D
*/
#define RPC_CN_ASSOC_CONTEXT_ID(assoc) (assoc)->assoc_pres_context_id
/*
* R P C _ C N _ A S S O C _ N D R _ F O R M A T
*/
#define RPC_CN_ASSOC_NDR_FORMAT(assoc) (assoc)->assoc_remote_ndr_format
/*
* R P C _ C N _ A S S O C _ S E C U R I T Y
*/
#define RPC_CN_ASSOC_SECURITY(assoc) &(assoc)->security
/*
* R P C _ C N _ A S S O C _ W A K E U P
*/
#define RPC_CN_ASSOC_WAKEUP(assoc) rpc__cn_assoc_queue_dummy_frag(assoc);
/*
* R P C _ C N _ A S S O C _ C A N C E L _ A N D _ W A K E U P
*/
#define RPC_CN_ASSOC_CANCEL_AND_WAKEUP(assoc)\
{\
RPC_CALL_LOCK (((rpc_call_rep_t *) assoc->call_rep));\
rpc__cthread_cancel (((rpc_call_rep_t *) assoc->call_rep));\
rpc__cn_assoc_queue_dummy_frag(assoc);\
RPC_CALL_UNLOCK (((rpc_call_rep_t *) assoc->call_rep));\
}
/******************************************************************************/
/*
* R P C _ _ C N _ A S S O C _ R E Q U E S T
*/
rpc_cn_assoc_t *rpc__cn_assoc_request (
rpc_cn_call_rep_p_t /* call_r */,
rpc_cn_binding_rep_p_t /* binding_r */,
rpc_if_rep_p_t /* if_r */,
rpc_transfer_syntax_t * /* syntax */,
unsigned16 * /* context_id */,
rpc_cn_sec_context_p_t * /* sec */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ L I S T E N
*/
rpc_cn_assoc_t *rpc__cn_assoc_listen (
rpc_socket_t /* newsock */,
unsigned_char_p_t /* endpoint */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ A L L O C
*/
PRIVATE void rpc__cn_assoc_alloc (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ D E A L L O C
*/
PRIVATE void rpc__cn_assoc_dealloc (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_call_rep_p_t /* call_rep */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ A B O R T
*/
void rpc__cn_assoc_abort (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ P O P _ C A L L
*/
PRIVATE rpc_cn_call_rep_t *rpc__cn_assoc_pop_call (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_call_rep_p_t /* call_rep */ );
/*
* R P C _ _ C N _ A S S O C _ P U S H _ C A L L
*/
PRIVATE void rpc__cn_assoc_push_call (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_call_rep_p_t /* call_r */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ Q U E U E _ F R A G
*/
PRIVATE void rpc__cn_assoc_queue_frag (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_fragbuf_p_t /* fragbuf */,
boolean32 /* signal */ );
/*
* R P C _ _ C N _ A S S O C _ Q U E U E _ D U M M Y _ F R A G
*/
PRIVATE void rpc__cn_assoc_queue_dummy_frag (
rpc_cn_assoc_p_t /* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ R E C E I V E _ F R A G
*/
PRIVATE void rpc__cn_assoc_receive_frag (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_fragbuf_p_t * /* frag_buf */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E N D _ F R A G
*/
PRIVATE void rpc__cn_assoc_send_frag (
rpc_cn_assoc_p_t /* assoc */,
rpc_iovector_p_t /* iovector */,
rpc_cn_sec_context_p_t /* sec */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E N D _ F R A G B U F
*/
PRIVATE void rpc__cn_assoc_send_fragbuf (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_fragbuf_p_t /* fragbuf */,
rpc_cn_sec_context_p_t /* sec */,
boolean32 /* freebuf */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S Y N T A X _ F R E E
*/
PRIVATE void rpc__cn_assoc_syntax_free (
rpc_cn_syntax_p_t */* syntax */ );
/*
* R P C _ _ C N _ A S S O C _ S Y N T A X _ N E G O T I A T E
*/
PRIVATE void rpc__cn_assoc_syntax_negotiate (
rpc_cn_assoc_p_t /* assoc */,
rpc_cn_pres_cont_list_p_t /* pres_cont_list */,
unsigned32 * /* size */,
rpc_cn_pres_result_list_t * /* pres_result_list */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S Y N T A X _ L K U P _ B Y _ I D
*/
PRIVATE void rpc__cn_assoc_syntax_lkup_by_id (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 /* context_id */,
rpc_cn_syntax_p_t * /* pres_context */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S Y N T A X _ L K U P _ B Y _ C L
*/
PRIVATE void rpc__cn_assoc_syntax_lkup_by_cl (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 /* call_id */,
rpc_cn_syntax_p_t * /* pres_context */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E C _ L K U P _ B Y _ I D
*/
PRIVATE void rpc__cn_assoc_sec_lkup_by_id (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 /* key_id */,
rpc_cn_sec_context_p_t * /* sec */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E C _ L K U P _ B Y _ CL
*/
PRIVATE void rpc__cn_assoc_sec_lkup_by_cl (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 /* call_id */,
rpc_cn_sec_context_p_t * /* sec */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E C _ A L L O C
*/
PRIVATE rpc_cn_sec_context_t *rpc__cn_assoc_sec_alloc (
rpc_auth_info_p_t /* info */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S E C _ F R E E
*/
PRIVATE void rpc__cn_assoc_sec_free (
rpc_cn_sec_context_p_t */* sec */ );
/*
* R P C _ _ C N _ A S S O C _ P O S T _ E R R O R
*/
PRIVATE void rpc__cn_assoc_post_error (
rpc_cn_assoc_p_t /* assoc */,
unsigned32 /* st */ );
/*
* R P C _ _ C N _ A S S O C _ S M _ P R O T O C O L _ E R R O R
*/
PRIVATE unsigned32 rpc__cn_assoc_sm_protocol_error (
pointer_t /* spc_struct */,
pointer_t /* event_param */,
pointer_t /* sm */ );
/*
* R P C _ _ C N _ A S S O C _ S T A T U S _ T O _ P R E J
*/
PRIVATE unsigned32 rpc__cn_assoc_status_to_prej (
unsigned32 /* prej */ );
/*
* R P C _ _ C N _ A S S O C _ P R E J _ T O _ S T A T U S
*/
PRIVATE unsigned32 rpc__cn_assoc_prej_to_status (
unsigned32 /* prej */ );
/*
* R P C _ _ C N _ A S S O C _ P P R O V _ T O _ S T A T U S
*/
PRIVATE unsigned32 rpc__cn_assoc_pprov_to_status (
unsigned32 /* pprov */ );
/*
* R P C _ _ C N _ A S S O C _ A C B _ C R E A T E
*/
void rpc__cn_assoc_acb_create ( rpc_cn_assoc_p_t/* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ A C B _ F R E E
*/
void rpc__cn_assoc_acb_free ( rpc_cn_assoc_p_t /* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ A C B _ D E A L L O C
*/
PRIVATE void rpc__cn_assoc_acb_dealloc (rpc_cn_assoc_p_t/* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ A L L O C
*/
PRIVATE rpc_cn_local_id_t rpc__cn_assoc_grp_alloc (
rpc_addr_p_t /* rpc_addr */,
rpc_transport_info_p_t /* prot_info */,
unsigned32 /* type */,
unsigned32 /* rem_id */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ D E A L L O C
*/
PRIVATE void rpc__cn_assoc_grp_dealloc (
rpc_cn_local_id_t /* grp_id */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ A D D _ A S S O C
*/
PRIVATE void rpc__cn_assoc_grp_add_assoc (
rpc_cn_local_id_t /* grp_id */,
rpc_cn_assoc_p_t /* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ R E M _ A S S O C
*/
PRIVATE void rpc__cn_assoc_grp_rem_assoc (
rpc_cn_local_id_t /* grp_id */,
rpc_cn_assoc_p_t /* assoc */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ L K U P _ B Y _ A D D R
*/
PRIVATE rpc_cn_local_id_t rpc__cn_assoc_grp_lkup_by_addr (
rpc_addr_p_t /* rpc_addr */,
rpc_transport_info_p_t /* transport_info */,
unsigned32 /* type */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ L K U P _ B Y _ R E M I D
*/
PRIVATE rpc_cn_local_id_t rpc__cn_assoc_grp_lkup_by_remid (
unsigned32 /* rem_id */,
unsigned32 /* type */,
rpc_addr_p_t /* rpc_addr */,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ L K U P _ B Y _ I D
*/
PRIVATE rpc_cn_local_id_t rpc__cn_assoc_grp_lkup_by_id (
rpc_cn_local_id_t /* grp_id */,
unsigned32 /* type */,
rpc_transport_info_p_t transport_info,
unsigned32 * /* st */ );
/*
* R P C _ _ C N _ A S S O C _ G R P _ T B L _ I N I T
*/
PRIVATE void rpc__cn_assoc_grp_tbl_init (void);
/*
* R P C _ _ C N _ G R P _ S M _ P R O T O C O L _ E R R O R
*/
PRIVATE unsigned32 rpc__cn_grp_sm_protocol_error (
pointer_t /* spc_struct */,
pointer_t /* event_param */,
pointer_t /* sm */);
#endif
| 6,843 |
304 | /**
* Copyright (c) 2015 by Contributors
*/
#include <gtest/gtest.h>
#include "common/spmt.h"
#include "loss/logit_loss_delta.h"
#include "loss/logit_loss.h"
#include "./utils.h"
using namespace difacto;
TEST(LogitLossDelta, Grad) {
// load and tranpose data
dmlc::data::RowBlockContainer<unsigned> rowblk, transposed;
std::vector<feaid_t> uidx;
load_data(&rowblk, &uidx);
SpMT::Transpose(rowblk.GetBlock(), &transposed, uidx.size());
// init loss
KWArgs args = {{"compute_hession", "0"}};
LogitLossDelta loss; loss.Init(args);
LogitLoss ref_loss;
for (int i = 0; i < 10; ++i) {
SArray<real_t> w;
gen_vals(uidx.size(), -10, 10, &w);
SArray<real_t> ref_pred(100), ref_grad(w.size());
ref_loss.Predict(rowblk.GetBlock(), {SArray<char>(w)}, &ref_pred);
ref_loss.CalcGrad(rowblk.GetBlock(), {SArray<char>(ref_pred)}, &ref_grad);
int nblk = 10;
SArray<real_t> pred(100), grad(w.size());
for (int b = 0; b < nblk; ++b) {
auto rg = Range(0, w.size()).Segment(b, nblk);
auto data = transposed.GetBlock().Slice(rg.begin, rg.end);
data.label = rowblk.GetBlock().label;
loss.Predict(data, {SArray<char>(w.segment(rg.begin, rg.end))}, &pred);
}
for (int b = 0; b < nblk; ++b) {
auto rg = Range(0, w.size()).Segment(b, nblk);
auto data = transposed.GetBlock().Slice(rg.begin, rg.end);
data.label = rowblk.GetBlock().label;
auto grad_seg = grad.segment(rg.begin, rg.end);
auto param = {SArray<char>(pred), {}, SArray<char>(w.segment(rg.begin, rg.end))};
loss.CalcGrad(data, param, &grad_seg);
}
EXPECT_LE(fabs(norm2(pred) - norm2(ref_pred)) / norm2(ref_pred), 1e-6);
EXPECT_LE(fabs(norm2(grad) - norm2(ref_grad)) / norm2(ref_grad), 1e-6);
}
}
TEST(LogitLossDelta, Hessien) {
// load and tranpose data
dmlc::data::RowBlockContainer<unsigned> rowblk, transposed;
std::vector<feaid_t> uidx;
load_data(&rowblk, &uidx);
SpMT::Transpose(rowblk.GetBlock(), &transposed, uidx.size());
// init loss
KWArgs args = {{"compute_hession", "1"}};
LogitLossDelta loss; loss.Init(args);
// init weight
SArray<real_t> weight(47149);
for (size_t i = 0; i < weight.size(); ++i) {
weight[i] = i / 5e4;
}
SArray<real_t> w(uidx.size());
for (size_t i = 0; i < uidx.size(); ++i) {
w[i] = weight[uidx[i]];
}
int nblk = 10;
SArray<real_t> pred(100), grad(w.size()*2);
for (int b = 0; b < nblk; ++b) {
auto rg = Range(0, w.size()).Segment(b, nblk);
auto data = transposed.GetBlock().Slice(rg.begin, rg.end);
data.label = rowblk.GetBlock().label;
loss.Predict(data, {SArray<char>(w.segment(rg.begin, rg.end))}, &pred);
}
for (int b = 0; b < nblk; ++b) {
auto rg = Range(0, w.size()).Segment(b, nblk);
auto data = transposed.GetBlock().Slice(rg.begin, rg.end);
data.label = rowblk.GetBlock().label;
auto w_seg = w.segment(rg.begin, rg.end);
auto grad_seg = grad.segment(rg.begin*2, rg.end*2);
SArray<int> grad_pos(w_seg.size());
for (size_t i = 0; i < w_seg.size(); ++i) grad_pos[i] = 2*i;
auto param = {SArray<char>(pred), SArray<char>(grad_pos), SArray<char>(w_seg)};
loss.CalcGrad(data, param, &grad_seg);
}
SArray<real_t> H(w.size()), G(w.size());
for (size_t i = 0; i < w.size(); ++i) {
G[i] = grad[i*2];
H[i] = grad[i*2+1];
}
EXPECT_LT(fabs(norm2(G) - 90.5817), 1e-4);
EXPECT_LT(fabs(norm2(H) - 0.0424518), 1e-6);
}
| 1,597 |
709 | <filename>runtime/platform/s390.h
#define MLton_Platform_Arch_host "s390"
| 27 |
432 | <reponame>pvonmoradi/bl_iot_sdk
/* Tree inlining hooks and declarations.
Copyright (C) 2001-2018 Free Software Foundation, Inc.
Contributed by <NAME> <<EMAIL>>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_TREE_INLINE_H
#define GCC_TREE_INLINE_H
struct cgraph_edge;
/* Indicate the desired behavior wrt call graph edges. We can either
duplicate the edge (inlining, cloning), move the edge (versioning,
parallelization), or move the edges of the clones (saving). */
enum copy_body_cge_which
{
CB_CGE_DUPLICATE,
CB_CGE_MOVE,
CB_CGE_MOVE_CLONES
};
typedef int_hash <unsigned short, 0> dependence_hash;
/* Data required for function body duplication. */
struct copy_body_data
{
/* FUNCTION_DECL for function being inlined, or in general the
source function providing the original trees. */
tree src_fn;
/* FUNCTION_DECL for function being inlined into, or in general
the destination function receiving the new trees. */
tree dst_fn;
/* Callgraph node of the source function. */
struct cgraph_node *src_node;
/* Callgraph node of the destination function. */
struct cgraph_node *dst_node;
/* struct function for function being inlined. Usually this is the same
as DECL_STRUCT_FUNCTION (src_fn), but can be different if saved_cfg
and saved_eh are in use. */
struct function *src_cfun;
/* The VAR_DECL for the return value. */
tree retvar;
/* The VAR_DECL for the return bounds. */
tree retbnd;
/* Assign statements that need bounds copy. */
vec<gimple *> assign_stmts;
/* The map from local declarations in the inlined function to
equivalents in the function into which it is being inlined. */
hash_map<tree, tree> *decl_map;
/* Create a new decl to replace DECL in the destination function. */
tree (*copy_decl) (tree, struct copy_body_data *);
/* Current BLOCK. */
tree block;
/* GIMPLE_CALL if va arg parameter packs should be expanded or NULL
is not. */
gcall *call_stmt;
/* Exception landing pad the inlined call lies in. */
int eh_lp_nr;
/* Maps region and landing pad structures from the function being copied
to duplicates created within the function we inline into. */
hash_map<void *, void *> *eh_map;
/* We use the same mechanism do all sorts of different things. Rather
than enumerating the different cases, we categorize the behavior
in the various situations. */
/* What to do with call graph edges. */
enum copy_body_cge_which transform_call_graph_edges;
/* True if a new CFG should be created. False for inlining, true for
everything else. */
bool transform_new_cfg;
/* True if RETURN_EXPRs should be transformed to just the contained
MODIFY_EXPR. The branch semantics of the return will be handled
by manipulating the CFG rather than a statement. */
bool transform_return_to_modify;
/* True if the parameters of the source function are transformed.
Only true for inlining. */
bool transform_parameter;
/* True if this statement will need to be regimplified. */
bool regimplify;
/* True if trees should not be unshared. */
bool do_not_unshare;
/* > 0 if we are remapping a type currently. */
int remapping_type_depth;
/* A function to be called when duplicating BLOCK nodes. */
void (*transform_lang_insert_block) (tree);
/* Statements that might be possibly folded. */
hash_set<gimple *> *statements_to_fold;
/* Entry basic block to currently copied body. */
basic_block entry_bb;
/* For partial function versioning, bitmap of bbs to be copied,
otherwise NULL. */
bitmap blocks_to_copy;
/* Debug statements that need processing. */
vec<gdebug *> debug_stmts;
/* A map from local declarations in the inlined function to
equivalents in the function into which it is being inlined, where
the originals have been mapped to a value rather than to a
variable. */
hash_map<tree, tree> *debug_map;
/* A map from the inlined functions dependence info cliques to
equivalents in the function into which it is being inlined. */
hash_map<dependence_hash, unsigned short> *dependence_map;
/* A list of addressable local variables remapped into the caller
when inlining a call within an OpenMP SIMD-on-SIMT loop. */
vec<tree> *dst_simt_vars;
/* Do not create new declarations when within type remapping. */
bool prevent_decl_creation_for_types;
};
/* Weights of constructions for estimate_num_insns. */
struct eni_weights
{
/* Cost per call. */
unsigned call_cost;
/* Cost per indirect call. */
unsigned indirect_call_cost;
/* Cost per call to a target specific builtin */
unsigned target_builtin_call_cost;
/* Cost of "expensive" div and mod operations. */
unsigned div_mod_cost;
/* Cost for omp construct. */
unsigned omp_cost;
/* Cost for tm transaction. */
unsigned tm_cost;
/* Cost of return. */
unsigned return_cost;
/* True when time of statement should be estimated. Thus, the
cost of a switch statement is logarithmic rather than linear in number
of cases. */
bool time_based;
};
/* Weights that estimate_num_insns uses for heuristics in inlining. */
extern eni_weights eni_inlining_weights;
/* Weights that estimate_num_insns uses to estimate the size of the
produced code. */
extern eni_weights eni_size_weights;
/* Weights that estimate_num_insns uses to estimate the time necessary
to execute the produced code. */
extern eni_weights eni_time_weights;
/* Function prototypes. */
void init_inline_once (void);
extern tree copy_tree_body_r (tree *, int *, void *);
extern void insert_decl_map (copy_body_data *, tree, tree);
unsigned int optimize_inline_calls (tree);
tree maybe_inline_call_in_expr (tree);
bool tree_inlinable_function_p (tree);
tree copy_tree_r (tree *, int *, void *);
tree copy_decl_no_change (tree decl, copy_body_data *id);
int estimate_move_cost (tree type, bool);
int estimate_num_insns (gimple *, eni_weights *);
int estimate_num_insns_fn (tree, eni_weights *);
int estimate_num_insns_seq (gimple_seq, eni_weights *);
bool tree_versionable_function_p (tree);
extern tree remap_decl (tree decl, copy_body_data *id);
extern tree remap_type (tree type, copy_body_data *id);
extern gimple_seq copy_gimple_seq_and_replace_locals (gimple_seq seq);
extern bool debug_find_tree (tree, tree);
extern tree copy_fn (tree, tree&, tree&);
extern const char *copy_forbidden (struct function *fun);
extern tree copy_decl_for_dup_finish (copy_body_data *id, tree decl, tree copy);
/* This is in tree-inline.c since the routine uses
data structures from the inliner. */
extern tree build_duplicate_type (tree);
#endif /* GCC_TREE_INLINE_H */
| 2,317 |
2,921 | <reponame>m21917/assets
{
"name": "<NAME>",
"symbol": "MOMA",
"type": "ERC20",
"decimals": 18,
"description": "The multi-chain decentralized exchange ecosystem for non-fungible tokens",
"website": "https://mochi.market",
"explorer": "https://etherscan.io/token/0xbd1848e1491d4308Ad18287A745DD4DB2A4BD55B",
"status": "active",
"id": "0xbd1848e1491d4308Ad18287A745DD4DB2A4BD55B"
} | 188 |
922 | <reponame>jacektl/calamari
import logging
from tfaip.model.graphbase import GenericGraphBase
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.ops import ctc_ops as ctc
from calamari_ocr.ocr.model.graph import CalamariGraph
from calamari_ocr.ocr.model.params import ModelParams
logger = logging.getLogger(__name__)
class EnsembleGraph(GenericGraphBase[ModelParams]):
def __init__(self, params: ModelParams, name="CalamariGraph", **kwargs):
super(EnsembleGraph, self).__init__(params, name=name, **kwargs)
self.fold_graphs = [CalamariGraph(params, f"voter_{i}") for i in range(params.ensemble)]
if self._params.masking_mode > 0:
logger.warning("Changed masking during training. This should only be used for evaluation!")
def make_outputs(self, blank_last_softmax, lstm_seq_len, complete_outputs):
softmax = tf.roll(blank_last_softmax, shift=1, axis=-1)
greedy_decoded = ctc.ctc_greedy_decoder(
inputs=tf.transpose(blank_last_softmax, perm=[1, 0, 2]),
sequence_length=tf.cast(K.flatten(lstm_seq_len), "int32"),
)[0][0]
outputs = {
"blank_last_logits": tf.math.log(blank_last_softmax),
"blank_last_softmax": blank_last_softmax,
"logits": tf.math.log(softmax),
"softmax": softmax,
"out_len": lstm_seq_len,
"decoded": tf.sparse.to_dense(greedy_decoded, default_value=-1) + 1,
}
for i, voter_output in enumerate(complete_outputs):
for k, v in voter_output.items():
outputs[f"{k}_{i}"] = v
return outputs
def build_prediction_graph(self, inputs, training=None):
# Prediction Graph: standard voting
complete_outputs = [self.fold_graphs[i].predict(inputs) for i in range(len(self.fold_graphs))]
lstm_seq_len = complete_outputs[0]["out_len"] # is the same for all children
softmax_outputs = tf.stack([out["blank_last_softmax"] for out in complete_outputs], axis=0)
blank_last_softmax = tf.reduce_mean(softmax_outputs, axis=0)
return self.make_outputs(blank_last_softmax, lstm_seq_len, complete_outputs)
def build_train_graph(self, inputs, targets, training=None):
if training is None:
training = K.learning_phase()
batch_size = tf.shape(inputs["img_len"])[0]
max_lstm_seq_len = self._params.compute_downscaled((tf.shape(inputs["img"])[1], 1))[0]
# Training/Validation graph
def training_step():
tf.debugging.assert_greater_equal(targets["fold_id"], 0)
complete_outputs = [self.fold_graphs[i].train(inputs, targets) for i in range(len(self.fold_graphs))]
lstm_seq_len = complete_outputs[0]["out_len"] # is the same for all children
softmax_outputs = tf.stack([out["blank_last_softmax"] for out in complete_outputs], axis=0)
# Training: Mask out network that does not contribute to a sample to generate strong voters
if self._params.masking_mode == 0:
# Fixed fold ID
mask = [tf.not_equal(i, targets["fold_id"]) for i in range(len(self.fold_graphs))]
softmax_outputs *= tf.cast(tf.expand_dims(mask, axis=-1), dtype="float32")
blank_last_softmax = tf.reduce_sum(softmax_outputs, axis=0) / (
len(self.fold_graphs) - 1
) # only n - 1 since one voter is 0
elif self._params.masking_mode == 1:
# No fold ID
# In this case, training behaves similar to prediction
blank_last_softmax = tf.reduce_mean(softmax_outputs, axis=0)
elif self._params.masking_mode == 2:
# Random fold ID
fold_id = tf.random.uniform(
minval=0,
maxval=len(self.fold_graphs),
dtype="int32",
shape=[batch_size, 1],
)
mask = [tf.not_equal(i, fold_id) for i in range(len(self.fold_graphs))]
softmax_outputs *= tf.cast(tf.expand_dims(mask, axis=-1), dtype="float32")
blank_last_softmax = tf.reduce_sum(softmax_outputs, axis=0) / (
len(self.fold_graphs) - 1
) # only n - 1 since one voter is 0
else:
raise NotImplementedError
return blank_last_softmax, lstm_seq_len, complete_outputs
def validation_step():
# any dummy output is max length, to get actional outpu length t use reduce_min
def gen_empty_output(bs):
empty = tf.zeros(shape=[bs, max_lstm_seq_len, self._params.classes], dtype="float32")
return {
"blank_last_logits": empty,
"blank_last_softmax": empty,
"out_len": tf.repeat(max_lstm_seq_len, repeats=bs),
"logits": empty,
"softmax": empty,
"decoded": tf.zeros(shape=[bs, max_lstm_seq_len], dtype="int64"),
}
empty_output = gen_empty_output(1)
# Validation: Compute output for each graph but only for its own partition
# Per sample this is one CER which is then used e. g. for early stopping
def apply_single_model(batch):
batch = batch["out_len"] # Take any, all are batch id as input
single_batch_data = {k: [tf.gather(v, batch)] for k, v in inputs.items()}
complete_outputs = [
tf.cond(
tf.equal(i, targets["fold_id"][batch]),
lambda: self.fold_graphs[i].train(single_batch_data, None),
lambda: empty_output,
)
for i in range(len(self.fold_graphs))
]
outputs = {
k: tf.gather(
tf.stack([out[k] for out in complete_outputs]),
targets["fold_id"][batch][0],
)[0]
for k in empty_output.keys()
if k != "decoded"
}
paddings = [([0, 0], [0, max_lstm_seq_len - tf.shape(out["decoded"])[1]]) for out in complete_outputs]
outputs["decoded"] = tf.gather(
tf.stack(
[
tf.pad(out["decoded"], padding, "CONSTANT", constant_values=0)
for out, padding in zip(complete_outputs, paddings)
]
),
targets["fold_id"][batch][0],
)[0]
return outputs
complete_outputs = tf.map_fn(
apply_single_model,
{k: tf.range(batch_size, dtype=v.dtype) for k, v in empty_output.items()},
parallel_iterations=len(self.fold_graphs),
back_prop=False,
)
return (
complete_outputs["blank_last_softmax"],
complete_outputs["out_len"],
[complete_outputs] * len(self.fold_graphs),
)
if isinstance(training, bool) or isinstance(training, int):
blank_last_softmax, lstm_seq_len, complete_outputs = training_step() if training else validation_step()
else:
blank_last_softmax, lstm_seq_len, complete_outputs = tf.cond(training, training_step, validation_step)
return self.make_outputs(blank_last_softmax, lstm_seq_len, complete_outputs)
| 3,874 |
2,151 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ios/chrome/browser/ui/webui/signin_internals_ui_ios.h"
#include "base/hash.h"
#include "components/grit/components_resources.h"
#include "components/signin/core/browser/about_signin_internals.h"
#include "components/signin/core/browser/gaia_cookie_manager_service.h"
#include "ios/chrome/browser/browser_state/chrome_browser_state.h"
#include "ios/chrome/browser/chrome_url_constants.h"
#include "ios/chrome/browser/signin/about_signin_internals_factory.h"
#include "ios/chrome/browser/signin/gaia_cookie_manager_service_factory.h"
#include "ios/web/public/web_ui_ios_data_source.h"
#include "ios/web/public/webui/web_ui_ios.h"
namespace {
web::WebUIIOSDataSource* CreateSignInInternalsHTMLSource() {
web::WebUIIOSDataSource* source =
web::WebUIIOSDataSource::Create(kChromeUISignInInternalsHost);
source->SetJsonPath("strings.js");
source->AddResourcePath("signin_internals.js", IDR_SIGNIN_INTERNALS_INDEX_JS);
source->SetDefaultResource(IDR_SIGNIN_INTERNALS_INDEX_HTML);
source->UseGzip();
return source;
}
} // namespace
SignInInternalsUIIOS::SignInInternalsUIIOS(web::WebUIIOS* web_ui)
: WebUIIOSController(web_ui) {
ios::ChromeBrowserState* browser_state =
ios::ChromeBrowserState::FromWebUIIOS(web_ui);
DCHECK(browser_state);
web::WebUIIOSDataSource::Add(browser_state,
CreateSignInInternalsHTMLSource());
AboutSigninInternals* about_signin_internals =
ios::AboutSigninInternalsFactory::GetForBrowserState(browser_state);
if (about_signin_internals)
about_signin_internals->AddSigninObserver(this);
}
SignInInternalsUIIOS::~SignInInternalsUIIOS() {
ios::ChromeBrowserState* browser_state =
ios::ChromeBrowserState::FromWebUIIOS(web_ui());
DCHECK(browser_state);
AboutSigninInternals* about_signin_internals =
ios::AboutSigninInternalsFactory::GetForBrowserState(browser_state);
if (about_signin_internals)
about_signin_internals->RemoveSigninObserver(this);
}
bool SignInInternalsUIIOS::OverrideHandleWebUIIOSMessage(
const GURL& source_url,
const std::string& name,
const base::ListValue& content) {
if (name == "getSigninInfo") {
ios::ChromeBrowserState* browser_state =
ios::ChromeBrowserState::FromWebUIIOS(web_ui());
DCHECK(browser_state);
AboutSigninInternals* about_signin_internals =
ios::AboutSigninInternalsFactory::GetForBrowserState(browser_state);
// TODO(vishwath): The UI would look better if we passed in a dict with some
// reasonable defaults, so the about:signin-internals page doesn't look
// empty in incognito mode. Alternatively, we could force about:signin to
// open in non-incognito mode always (like about:settings for ex.).
if (about_signin_internals) {
const std::string& reply_handler =
"chrome.signin.getSigninInfo.handleReply";
web_ui()->CallJavascriptFunction(
reply_handler, *about_signin_internals->GetSigninStatus());
std::vector<gaia::ListedAccount> cookie_accounts;
GaiaCookieManagerService* cookie_manager_service =
ios::GaiaCookieManagerServiceFactory::GetForBrowserState(
browser_state);
std::vector<gaia::ListedAccount> signed_out_accounts;
if (cookie_manager_service->ListAccounts(
&cookie_accounts, &signed_out_accounts,
"ChromiumSignInInternalsUIIOS")) {
about_signin_internals->OnGaiaAccountsInCookieUpdated(
cookie_accounts, signed_out_accounts,
GoogleServiceAuthError(GoogleServiceAuthError::NONE));
}
return true;
}
}
return false;
}
void SignInInternalsUIIOS::OnSigninStateChanged(
const base::DictionaryValue* info) {
const std::string& event_handler = "chrome.signin.onSigninInfoChanged.fire";
web_ui()->CallJavascriptFunction(event_handler, *info);
}
void SignInInternalsUIIOS::OnCookieAccountsFetched(
const base::DictionaryValue* info) {
web_ui()->CallJavascriptFunction("chrome.signin.onCookieAccountsFetched.fire",
*info);
}
| 1,624 |
711 | <gh_stars>100-1000
package io.apiman.gateway.platforms.servlet.auth.tls;
import io.apiman.common.config.options.TLSOptions;
import io.apiman.gateway.engine.IApiConnection;
import io.apiman.gateway.engine.IApiConnectionResponse;
import io.apiman.gateway.engine.IApiConnector;
import io.apiman.gateway.engine.async.IAsyncResult;
import io.apiman.gateway.engine.auth.RequiredAuthType;
import io.apiman.gateway.engine.beans.Api;
import io.apiman.gateway.engine.beans.ApiRequest;
import io.apiman.gateway.platforms.servlet.connectors.ConnectorConfigImpl;
import io.apiman.gateway.platforms.servlet.connectors.HttpConnectorFactory;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Paths;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.SecureRequestCustomizer;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class AliasedCertTest {
private Server server;
private HttpConfiguration http_config;
private Map<String, String> config = new HashMap<>();
//private java.security.cert.X509Certificate clientCertUsed;
protected BigInteger clientSerial;
/**
* With thanks to assistance of http://stackoverflow.com/b/20056601/2766538
* @throws Exception any exception
*/
@Before
public void setupJetty() throws Exception {
server = new Server();
server.setStopAtShutdown(true);
http_config = new HttpConfiguration();
http_config.setSecureScheme("https");
SslContextFactory.Server sslContextFactory = new SslContextFactory.Server();
sslContextFactory.setKeyStorePath(getResourcePath("2waytest/aliased_keys/service_ks.jks"));
sslContextFactory.setKeyStorePassword("<PASSWORD>");
sslContextFactory.setKeyManagerPassword("<PASSWORD>");
sslContextFactory.setTrustStorePath(getResourcePath("2waytest/aliased_keys/service_ts.jks"));
sslContextFactory.setTrustStorePassword("<PASSWORD>");
sslContextFactory.setNeedClientAuth(true);
HttpConfiguration https_config = new HttpConfiguration(http_config);
https_config.addCustomizer(new SecureRequestCustomizer());
ServerConnector sslConnector = new ServerConnector(server,
new SslConnectionFactory(sslContextFactory,"http/1.1"),
new HttpConnectionFactory(https_config));
sslConnector.setPort(8008);
server.addConnector(sslConnector);
// Thanks to Jetty getting started guide.
server.setHandler(new AbstractHandler() {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
Enumeration<String> z = request.getAttributeNames();
while (z.hasMoreElements()) {
String elem = z.nextElement();
System.out.println(elem + " - " + request.getAttribute(elem));
}
if (request.getAttribute("javax.servlet.request.X509Certificate") != null) {
clientSerial = ((java.security.cert.X509Certificate[]) request
.getAttribute("javax.servlet.request.X509Certificate"))[0].getSerialNumber();
}
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
response.getWriter().println("apiman");
}
});
server.start();
}
@After
public void destroyJetty() throws Exception {
server.stop();
server.destroy();
config.clear();
}
ApiRequest request = new ApiRequest();
Api api = new Api();
{
request.setApiKey("12345");
request.setDestination("/");
request.getHeaders().put("test", "it-worked");
request.setTransportSecure(true);
request.setRemoteAddr("https://localhost:8008/");
request.setType("GET");
api.setEndpoint("https://localhost:8008/");
api.getEndpointProperties().put(RequiredAuthType.ENDPOINT_AUTHORIZATION_TYPE, "mtls");
}
/**
* Scenario:
* - Select client key alias `gateway2`.
* - Mutual trust exists between gateway and API
* - We must use the `gateway2` cert NOT `gateway`.
* @throws CertificateException the certificate exception
* @throws IOException the IO exception
*/
@Test
public void shouldSucceedWhenValidKeyAlias() throws CertificateException, IOException {
config.put(TLSOptions.TLS_TRUSTSTORE, getResourcePath("2waytest/aliased_keys/gateway_ts.jks"));
config.put(TLSOptions.TLS_TRUSTSTOREPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_KEYSTORE, getResourcePath("2waytest/aliased_keys/gateway_ks.jks"));
config.put(TLSOptions.TLS_KEYSTOREPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_KEYPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_ALLOWANYHOST, "true");
config.put(TLSOptions.TLS_ALLOWSELFSIGNED, "true");
config.put(TLSOptions.TLS_KEYALIASES, "gatewayalias");
X509Certificate expectedCert;
try(InputStream inStream = new FileInputStream(getResourcePath("2waytest/aliased_keys/gatewayalias.cer"))) {
expectedCert = (X509Certificate) CertificateFactory.getInstance("X.509").generateCertificate(inStream);
}
HttpConnectorFactory factory = new HttpConnectorFactory(config);
IApiConnector connector = factory.createConnector(request, api, RequiredAuthType.MTLS, false, new ConnectorConfigImpl());
IApiConnection connection = connector.connect(request,
(IAsyncResult<IApiConnectionResponse> result) -> {
if (result.isError())
throw new RuntimeException(result.getError());
Assert.assertTrue(result.isSuccess());
// Assert that the expected certificate (associated with the private key by virtue)
// was the one used.
Assert.assertEquals(expectedCert.getSerialNumber(), clientSerial);
});
connection.end();
}
/**
* Scenario:
* - First alias invalid, second valid.
* - Mutual trust exists between gateway and API.
* - We must fall back to the valid alias.
* @throws CertificateException the certificate exception
* @throws IOException the IO exception
*/
@Test
public void shouldFallbackWhenMultipleAliasesAvailable() throws CertificateException, IOException {
config.put(TLSOptions.TLS_TRUSTSTORE, getResourcePath("2waytest/aliased_keys/gateway_ts.jks"));
config.put(TLSOptions.TLS_TRUSTSTOREPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_KEYSTORE, getResourcePath("2waytest/aliased_keys/gateway_ks.jks"));
config.put(TLSOptions.TLS_KEYSTOREPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_KEYPASSWORD, "<PASSWORD>");
config.put(TLSOptions.TLS_ALLOWANYHOST, "true");
config.put(TLSOptions.TLS_ALLOWSELFSIGNED, "true");
// Only gateway2 is valid. `unrelated` is real but not trusted by API. others don't exist.
config.put(TLSOptions.TLS_KEYALIASES, "unrelated, owt, or, nowt, gateway, sonorous, unrelated");
X509Certificate expectedCert;
try(InputStream inStream = new FileInputStream(getResourcePath("2waytest/aliased_keys/gateway.cer"))) {
expectedCert = (X509Certificate) CertificateFactory.getInstance("X.509").generateCertificate(inStream);
}
HttpConnectorFactory factory = new HttpConnectorFactory(config);
IApiConnector connector = factory.createConnector(request, api, RequiredAuthType.MTLS, false, new ConnectorConfigImpl());
IApiConnection connection = connector.connect(request,
(IAsyncResult<IApiConnectionResponse> result) -> {
if (result.isError())
throw new RuntimeException(result.getError());
Assert.assertTrue(result.isSuccess());
// Assert that the expected certificate (associated with the private key by virtue)
// was the one used.
Assert.assertEquals(expectedCert.getSerialNumber(), clientSerial);
});
connection.end();
}
private String getResourcePath(String res) {
URL resource = CAMutualAuthTest.class.getResource(res);
try {
System.out.println(res);
return Paths.get(resource.toURI()).toFile().getAbsolutePath();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
| 3,740 |
348 | <gh_stars>100-1000
{"nom":"Dambach","circ":"8ème circonscription","dpt":"Bas-Rhin","inscrits":631,"abs":378,"votants":253,"blancs":11,"nuls":4,"exp":238,"res":[{"nuance":"LR","nom":"<NAME>","voix":127},{"nuance":"REM","nom":"<NAME>","voix":111}]} | 100 |
2,151 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .extended_attribute import ExtendedAttributeList
from .idl_types import RecordType
from .idl_types import SequenceType
from .utilities import assert_no_extra_args
# https://heycam.github.io/webidl/#idl-attributes
class Attribute(object):
_INVALID_TYPES = frozenset([SequenceType, RecordType])
def __init__(self, **kwargs):
self._identifier = kwargs.pop('identifier')
self._type = kwargs.pop('type')
self._is_static = kwargs.pop('is_static', False)
self._is_readonly = kwargs.pop('is_readonly', False)
self._extended_attribute_list = kwargs.pop('extended_attribute_list', ExtendedAttributeList())
assert_no_extra_args(kwargs)
if type(self.type) in Attribute._INVALID_TYPES:
raise ValueError('The type of an attribute must not be either of sequence<T> and record<K,V>.')
@property
def identifier(self):
return self._identifier
@property
def type(self):
return self._type
@property
def is_static(self):
return self._is_static
@property
def is_readonly(self):
return self._is_readonly
@property
def extended_attribute_list(self):
return self._extended_attribute_list
| 523 |
1,433 | //******************************************************************
//
// Copyright 2015 Samsung Electronics All Rights Reserved.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#ifndef SOFTSENSORRESOURCE_H_
#define SOFTSENSORRESOURCE_H_
#include "BundleResource.h"
namespace OIC
{
namespace Service
{
/**
* @class SoftSensorResource
* @brief This class represents bundle resource for Soft Sensor
* to be registered in the container and make resource server
*
*/
class SoftSensorResource: public BundleResource
{
public:
/**
* Constructor for SoftSensorResource
*/
SoftSensorResource();
/**
* Virtual destructor for SoftSensorResource
*/
virtual ~SoftSensorResource();
/**
* Initialize input and output attributes for the resource
*
* @return void
*/
virtual void initAttributes();
/**
* This function should be implemented by the according bundle resource
* and execute the according business logic (e.g., light switch or sensor resource)
* to retrieve a sensor value. If a new sensor value is retrieved, the
* setAttribute data should be called to update the value.
* The implementor of the function can decide weather to notify OIC clients
* about the changed state or not.
*
* @return Value of all attributes
*/
virtual RCSResourceAttributes &handleGetAttributesRequest() = 0;
/**
* This function should be implemented by the according bundle resource
* and execute the according business logic (e.g., light switch or sensor resource)
* and write either on soft sensor values or external bridged devices.
*
* The call of this method could for example trigger a HTTP PUT request on
* an external APIs. This method is responsible to update the resource internal
* data and call the setAttribute method.
*
* The implementor of the function can decide weather to notify OIC clients
* about the changed state or not.
*
* @param attrs Attributes to set
*
* @return void
*/
virtual void handleSetAttributesRequest(RCSResourceAttributes &attrs) = 0;
/**
* SoftSensor logic. Has to be provided by the soft sensor developer.
* This function will be executed if an input attribute is updated.
*
* @return void
*/
virtual void executeLogic() = 0;
/**
* Callback from the client module in the container.
* This function will be called if input data from remote resources are updated.
* SoftSensor resource can get a vector of input data from multiple input resources
* which have attributeName that softsensor needs to execute its logic.
*
* @param attributeName Attribute key of input data
*
* @param values Vector of input data value
*
* @return void
*/
virtual void onUpdatedInputResource(const std::string attributeName,
std::vector<RCSResourceAttributes::Value> values) = 0;
public:
std::list<std::string> m_inputList;
};
}
}
#endif
| 1,948 |
794 | <gh_stars>100-1000
package com.stardust.autojs.apkbuilder;
import com.stardust.autojs.apkbuilder.util.StreamUtils;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import zhao.arsceditor.ResDecoder.ARSCDecoder;
public class ApkBuilder {
private File mOutApkFile;
private ApkPackager mApkPackager;
private ManifestEditor mManifestEditor;
private String mWorkspacePath;
private String mArscPackageName;
public ApkBuilder(InputStream apkInputStream, File outApkFile, String workspacePath) {
mOutApkFile = outApkFile;
mWorkspacePath = workspacePath;
mApkPackager = new ApkPackager(apkInputStream, mWorkspacePath);
}
public ApkBuilder(File inFile, File outFile, String workspacePath) throws FileNotFoundException {
this(new FileInputStream(inFile), outFile, workspacePath);
}
public ApkBuilder prepare() throws IOException {
new File(mWorkspacePath).mkdirs();
mApkPackager.unzip();
return this;
}
private File getManifestFile() {
return new File(mWorkspacePath, "AndroidManifest.xml");
}
public ManifestEditor editManifest() throws FileNotFoundException {
mManifestEditor = new ManifestEditor(new FileInputStream(getManifestFile()));
return mManifestEditor;
}
public ApkBuilder setArscPackageName(String packageName) throws IOException {
mArscPackageName = packageName;
return this;
}
public ApkBuilder replaceFile(String relativePath, String newFilePath) throws IOException {
StreamUtils.write(new FileInputStream(newFilePath),
new FileOutputStream(new File(mWorkspacePath, relativePath)));
return this;
}
public ApkBuilder build() throws IOException {
if (mManifestEditor != null) {
mManifestEditor.writeTo(new FileOutputStream(getManifestFile()));
}
if (mArscPackageName != null) {
buildArsc();
}
return this;
}
private void buildArsc() throws IOException {
File oldArsc = new File(mWorkspacePath, "resources.arsc");
File newArsc = new File(mWorkspacePath, "resources.arsc.new");
ARSCDecoder decoder = new ARSCDecoder(new BufferedInputStream(new FileInputStream(oldArsc)), null, false);
FileOutputStream fos = new FileOutputStream(newArsc);
decoder.CloneArsc(fos, mArscPackageName, true);
oldArsc.delete();
newArsc.renameTo(oldArsc);
}
public ApkBuilder sign() throws Exception {
mApkPackager.repackage(mOutApkFile.getPath());
return this;
}
public ApkBuilder cleanWorkspace() {
delete(new File(mWorkspacePath));
return this;
}
private void delete(File file) {
if (file.isFile()) {
file.delete();
return;
}
for (File child : file.listFiles()) {
delete(child);
}
file.delete();
}
}
| 1,246 |
474 | package org.javacord.api.listener.user;
import org.javacord.api.event.user.UserChangeStatusEvent;
import org.javacord.api.listener.GloballyAttachableListener;
import org.javacord.api.listener.ObjectAttachableListener;
import org.javacord.api.listener.server.ServerAttachableListener;
/**
* This listener listens to user status changes.
*/
@FunctionalInterface
public interface UserChangeStatusListener extends ServerAttachableListener, UserAttachableListener,
GloballyAttachableListener, ObjectAttachableListener {
/**
* This method is called every time a user changed their status.
*
* @param event The event.
*/
void onUserChangeStatus(UserChangeStatusEvent event);
}
| 262 |
432 | <reponame>Reno-Greenleaf/bsd-games
/* $NetBSD: def.objects.h,v 1.6 2003/04/02 18:36:34 jsm Exp $ */
/*
* Copyright (c) 1985, Stichting Centrum voor Wiskunde en Informatica,
* Amsterdam
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of the Stichting Centrum voor Wiskunde en
* Informatica, nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1982 <NAME> <<EMAIL>>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DEF_OBJECTS_H_
#define _DEF_OBJECTS_H_
/* objects have letter " % ) ( 0 _ ` [ ! ? / = * */
#include "config.h"
#include "def.objclass.h"
struct objclass objects[] = {
{ "strange object", NULL, NULL, 1, 0,
ILLOBJ_SYM, 0, 0, 0, 0, 0, 0 },
{ "amulet of Yendor", NULL, NULL, 1, 0,
AMULET_SYM, 100, 0, 2, 0, 0, 0 },
#define FOOD(name,prob,delay,weight,nutrition) { name, NULL, NULL, 1, 1,\
FOOD_SYM, prob, delay, weight, 0, 0, nutrition }
/* dog eats foods 0-4 but prefers 1 above 0,2,3,4 */
/* food 4 can be read */
/* food 5 improves your vision */
/* food 6 makes you stronger (like Popeye) */
/* foods CORPSE up to CORPSE+52 are cadavers */
FOOD("food ration", 50, 5, 4, 800),
FOOD("tripe ration", 20, 1, 2, 200),
FOOD("pancake", 3, 1, 1, 200),
FOOD("dead lizard", 3, 0, 1, 40),
FOOD("fortune cookie", 7, 0, 1, 40),
FOOD("carrot", 2, 0, 1, 50),
FOOD("tin", 7, 0, 1, 0),
FOOD("orange", 1, 0, 1, 80),
FOOD("apple", 1, 0, 1, 50),
FOOD("pear", 1, 0, 1, 50),
FOOD("melon", 1, 0, 1, 100),
FOOD("banana", 1, 0, 1, 80),
FOOD("candy bar", 1, 0, 1, 100),
FOOD("egg", 1, 0, 1, 80),
FOOD("clove of garlic", 1, 0, 1, 40),
FOOD("lump of royal jelly", 0, 0, 1, 200),
FOOD("dead human", 0, 4, 40, 400),
FOOD("dead giant ant", 0, 1, 3, 30),
FOOD("dead giant bat", 0, 1, 3, 30),
FOOD("dead centaur", 0, 5, 50, 500),
FOOD("dead dragon", 0, 15, 150, 1500),
FOOD("dead floating eye", 0, 1, 1, 10),
FOOD("dead freezing sphere", 0, 1, 1, 10),
FOOD("dead gnome", 0, 1, 10, 100),
FOOD("dead hobgoblin", 0, 2, 20, 200),
FOOD("dead stalker", 0, 4, 40, 400),
FOOD("dead jackal", 0, 1, 10, 100),
FOOD("dead kobold", 0, 1, 10, 100),
FOOD("dead leprechaun", 0, 4, 40, 400),
FOOD("dead mimic", 0, 4, 40, 400),
FOOD("dead nymph", 0, 4, 40, 400),
FOOD("dead orc", 0, 2, 20, 200),
FOOD("dead purple worm", 0, 7, 70, 700),
FOOD("dead quasit", 0, 2, 20, 200),
FOOD("dead rust monster", 0, 5, 50, 500),
FOOD("dead snake", 0, 1, 10, 100),
FOOD("dead troll", 0, 4, 40, 400),
FOOD("dead umber hulk", 0, 5, 50, 500),
FOOD("dead vampire", 0, 4, 40, 400),
FOOD("dead wraith", 0, 1, 1, 10),
FOOD("dead xorn", 0, 7, 70, 700),
FOOD("dead yeti", 0, 7, 70, 700),
FOOD("dead zombie", 0, 1, 3, 30),
FOOD("dead acid blob", 0, 1, 3, 30),
FOOD("dead giant beetle", 0, 1, 1, 10),
FOOD("dead cockatrice", 0, 1, 3, 30),
FOOD("dead dog", 0, 2, 20, 200),
FOOD("dead ettin", 0, 1, 3, 30),
FOOD("dead fog cloud", 0, 1, 1, 10),
FOOD("dead gelatinous cube", 0, 1, 10, 100),
FOOD("dead homunculus", 0, 2, 20, 200),
FOOD("dead imp", 0, 1, 1, 10),
FOOD("dead jaguar", 0, 3, 30, 300),
FOOD("dead killer bee", 0, 1, 1, 10),
FOOD("dead leocrotta", 0, 5, 50, 500),
FOOD("dead minotaur", 0, 7, 70, 700),
FOOD("dead nurse", 0, 4, 40, 400),
FOOD("dead owlbear", 0, 7, 70, 700),
FOOD("dead piercer", 0, 2, 20, 200),
FOOD("dead quivering blob", 0, 1, 10, 100),
FOOD("dead giant rat", 0, 1, 3, 30),
FOOD("dead giant scorpion", 0, 1, 10, 100),
FOOD("dead tengu", 0, 3, 30, 300),
FOOD("dead unicorn", 0, 3, 30, 300),
FOOD("dead violet fungi", 0, 1, 10, 100),
FOOD("dead long worm", 0, 5, 50, 500),
/* %% wt of long worm should be proportional to its length */
FOOD("dead xan", 0, 3, 30, 300),
FOOD("dead yellow light", 0, 1, 1, 10),
FOOD("dead zruty", 0, 6, 60, 600),
/* weapons ... - ROCK come several at a time */
/* weapons ... - (ROCK-1) are shot using idem+(BOW-ARROW) */
/* weapons AXE, SWORD, THSWORD are good for worm-cutting */
/* weapons (PICK-)AXE, DAGGER, CRYSKNIFE are good for tin-opening */
#define WEAPON(name,prob,wt,ldam,sdam) { name, NULL, NULL, 1, 0 /*%%*/,\
WEAPON_SYM, prob, 0, wt, ldam, sdam, 0 }
WEAPON("arrow", 7, 0, 6, 6),
WEAPON("sling bullet", 7, 0, 4, 6),
WEAPON("crossbow bolt", 7, 0, 4, 6),
WEAPON("dart", 7, 0, 3, 2),
WEAPON("rock", 6, 1, 3, 3),
WEAPON("boomerang", 2, 3, 9, 9),
WEAPON("mace", 9, 3, 6, 7),
WEAPON("axe", 6, 3, 6, 4),
WEAPON("flail", 6, 3, 6, 5),
WEAPON("long sword", 8, 3, 8, 12),
WEAPON("two handed sword", 6, 4, 12, 6),
WEAPON("dagger", 6, 3, 4, 3),
WEAPON("worm tooth", 0, 4, 2, 2),
WEAPON("crysknife", 0, 3, 10, 10),
WEAPON("spear", 6, 3, 6, 8),
WEAPON("bow", 6, 3, 4, 6),
WEAPON("sling", 5, 3, 6, 6),
WEAPON("crossbow", 6, 3, 4, 6),
{ "whistle", "whistle", NULL, 0, 0,
TOOL_SYM, 90, 0, 2, 0, 0, 0 },
{ "magic whistle", "whistle", NULL, 0, 0,
TOOL_SYM, 10, 0, 2, 0, 0, 0 },
{ "expensive camera", NULL, NULL, 1, 1,
TOOL_SYM, 0, 0, 3, 0, 0, 0 },
{ "ice box", "large box", NULL, 0, 0,
TOOL_SYM, 0, 0, 40, 0, 0, 0 },
{ "pick-axe", NULL, NULL, 1, 1,
TOOL_SYM, 0, 0, 5, 6, 3, 0 },
{ "can opener", NULL, NULL, 1, 1,
TOOL_SYM, 0, 0, 1, 0, 0, 0 },
{ "heavy iron ball", NULL, NULL, 1, 0,
BALL_SYM, 100, 0, 20, 0, 0, 0 },
{ "iron chain", NULL, NULL, 1, 0,
CHAIN_SYM, 100, 0, 20, 0, 0, 0 },
{ "enormous rock", NULL, NULL, 1, 0,
ROCK_SYM, 100, 0, 200 /* > MAX_CARR_CAP */, 0, 0, 0 },
#define ARMOR(name,prob,delay,ac,can) { name, NULL, NULL, 1, 0,\
ARMOR_SYM, prob, delay, 8, ac, can, 0 }
ARMOR("helmet", 3, 1, 9, 0),
ARMOR("plate mail", 5, 5, 3, 2),
ARMOR("splint mail", 8, 5, 4, 1),
ARMOR("banded mail", 10, 5, 4, 0),
ARMOR("chain mail", 10, 5, 5, 1),
ARMOR("scale mail", 10, 5, 6, 0),
ARMOR("ring mail", 15, 5, 7, 0),
/* the armors below do not rust */
ARMOR("studded leather armor", 13, 3, 7, 1),
ARMOR("leather armor", 17, 3, 8, 0),
ARMOR("elven cloak", 5, 0, 9, 3),
ARMOR("shield", 3, 0, 9, 0),
ARMOR("pair of gloves", 1, 1, 9, 0),
#define POTION(name,color) { name, color, NULL, 0, 1,\
POTION_SYM, 0, 0, 2, 0, 0, 0 }
POTION("restore strength", "orange"),
POTION("booze", "bubbly"),
POTION("invisibility", "glowing"),
POTION("fruit juice", "smoky"),
POTION("healing", "pink"),
POTION("paralysis", "puce"),
POTION("monster detection", "purple"),
POTION("object detection", "yellow"),
POTION("sickness", "white"),
POTION("confusion", "swirly"),
POTION("gain strength", "purple-red"),
POTION("speed", "ruby"),
POTION("blindness", "dark green"),
POTION("gain level", "emerald"),
POTION("extra healing", "sky blue"),
POTION("levitation", "brown"),
POTION(NULL, "brilliant blue"),
POTION(NULL, "clear"),
POTION(NULL, "magenta"),
POTION(NULL, "ebony"),
#define SCROLL(name,text,prob) { name, text, NULL, 0, 1,\
SCROLL_SYM, prob, 0, 3, 0, 0, 0 }
SCROLL("mail", "KIRJE", 0),
SCROLL("enchant armor", "<NAME>", 6),
SCROLL("destroy armor", "JUYED AWK YACC", 5),
SCROLL("confuse monster", "NR 9", 5),
SCROLL("scare monster", "XIXAXA XOXAXA XUXAXA", 4),
SCROLL("blank paper", "READ ME", 3),
SCROLL("remove curse", "PRATYAVAYAH", 6),
SCROLL("enchant weapon", "<NAME>", 6),
SCROLL("damage weapon", "<NAME>", 5),
SCROLL("create monster", "<NAME>", 5),
SCROLL("taming", "PRIRUTSENIE", 1),
SCROLL("genocide", "ELBIB YLOH",2),
SCROLL("light", "<NAME>", 10),
SCROLL("teleportation", "<NAME>", 5),
SCROLL("gold detection", "THARR", 4),
SCROLL("food detection", "<NAME>", 1),
SCROLL("identify", "<NAME>", 18),
SCROLL("magic mapping", "<NAME>", 5),
SCROLL("amnesia", "<NAME>", 3),
SCROLL("fire", "<NAME>", 5),
SCROLL("punishment", "<NAME>", 1),
SCROLL(NULL, "<NAME>", 0),
SCROLL(NULL, "<NAME>", 0),
SCROLL(NULL, "TEMOV", 0),
SCROLL(NULL, "<NAME>", 0),
#define WAND(name,metal,prob,flags) { name, metal, NULL, 0, 0,\
WAND_SYM, prob, 0, 3, flags, 0, 0 }
WAND("light", "iridium", 10, NODIR),
WAND("secret door detection", "tin", 5, NODIR),
WAND("create monster", "platinum", 5, NODIR),
WAND("wishing", "glass", 1, NODIR),
WAND("striking", "zinc", 9, IMMEDIATE),
WAND("slow monster", "balsa", 5, IMMEDIATE),
WAND("speed monster", "copper", 5, IMMEDIATE),
WAND("undead turning", "silver", 5, IMMEDIATE),
WAND("polymorph", "brass", 5, IMMEDIATE),
WAND("cancellation", "maple", 5, IMMEDIATE),
WAND("teleportation", "pine", 5, IMMEDIATE),
WAND("make invisible", "marble", 9, IMMEDIATE),
WAND("digging", "iron", 5, RAY),
WAND("magic missile", "aluminium", 10, RAY),
WAND("fire", "steel", 5, RAY),
WAND("sleep", "curved", 5, RAY),
WAND("cold", "short", 5, RAY),
WAND("death", "long", 1, RAY),
WAND(NULL, "oak", 0, 0),
WAND(NULL, "ebony", 0, 0),
WAND(NULL, "runed", 0, 0),
#define RING(name,stone,spec) { name, stone, NULL, 0, 0,\
RING_SYM, 0, 0, 1, spec, 0, 0 }
RING("adornment", "engagement", 0),
RING("teleportation", "wooden", 0),
RING("regeneration", "black onyx", 0),
RING("searching", "topaz", 0),
RING("see invisible", "pearl", 0),
RING("stealth", "sapphire", 0),
RING("levitation", "moonstone", 0),
RING("poison resistance", "agate", 0),
RING("aggravate monster", "tiger eye", 0),
RING("hunger", "shining", 0),
RING("fire resistance", "gold", 0),
RING("cold resistance", "copper", 0),
RING("protection from shape changers", "diamond", 0),
RING("conflict", "jade", 0),
RING("gain strength", "ruby", SPEC),
RING("increase damage", "silver", SPEC),
RING("protection", "granite", SPEC),
RING("warning", "wire", 0),
RING("teleport control", "iron", 0),
RING(NULL, "ivory", 0),
RING(NULL, "blackened", 0),
/* gems ************************************************************/
#define GEM(name,color,prob,gval) { name, color, NULL, 0, 1,\
GEM_SYM, prob, 0, 1, 0, 0, gval }
GEM("diamond", "blue", 1, 4000),
GEM("ruby", "red", 1, 3500),
GEM("sapphire", "blue", 1, 3000),
GEM("emerald", "green", 1, 2500),
GEM("turquoise", "green", 1, 2000),
GEM("aquamarine", "blue", 1, 1500),
GEM("tourmaline", "green", 1, 1000),
GEM("topaz", "yellow", 1, 900),
GEM("opal", "yellow", 1, 800),
GEM("garnet", "dark", 1, 700),
GEM("amethyst", "violet", 2, 650),
GEM("agate", "green", 2, 600),
GEM("onyx", "white", 2, 550),
GEM("jasper", "yellowish brown", 2, 500),
GEM("jade", "green", 2, 450),
GEM("worthless piece of blue glass", "blue", 20, 0),
GEM("worthless piece of red glass", "red", 20, 0),
GEM("worthless piece of yellow glass", "yellow", 20, 0),
GEM("worthless piece of green glass", "green", 20, 0),
{ NULL, NULL, NULL, 0, 0, ILLOBJ_SYM, 0, 0, 0, 0, 0, 0 }
};
const char obj_symbols[] = {
ILLOBJ_SYM, AMULET_SYM, FOOD_SYM, WEAPON_SYM, TOOL_SYM,
BALL_SYM, CHAIN_SYM, ROCK_SYM, ARMOR_SYM, POTION_SYM, SCROLL_SYM,
WAND_SYM, RING_SYM, GEM_SYM, 0 };
int bases[sizeof(obj_symbols)];
#endif /* _DEF_OBJECTS_H_ */
| 5,727 |
1,382 | //
// qnsearch_example.c
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "liquid.h"
#define OUTPUT_FILENAME "qnsearch_example.m"
int main() {
unsigned int num_parameters = 8; // dimensionality of search (minimum 2)
unsigned int num_iterations = 4000; // number of iterations to run
float optimum_vect[num_parameters];
unsigned int i;
for (i=0; i<num_parameters; i++)
optimum_vect[i] = 0.0f;
float optimum_utility;
// open output file
FILE*fid = fopen(OUTPUT_FILENAME,"w");
fprintf(fid,"%% %s : auto-generated file\n", OUTPUT_FILENAME);
fprintf(fid,"clear all;\n");
fprintf(fid,"close all;\n");
// create qnsearch object
qnsearch gs = qnsearch_create(
NULL, optimum_vect, num_parameters, &liquid_rosenbrock, LIQUID_OPTIM_MINIMIZE);
// execute search
//optimum_utility = qnsearch_run(gs, num_iterations, -1e-6f);
// execute search one iteration at a time
fprintf(fid,"u = zeros(1,%u);\n", num_iterations);
for (i=0; i<num_iterations; i++) {
optimum_utility = liquid_rosenbrock(NULL,optimum_vect,num_parameters);
fprintf(fid,"u(%3u) = %12.4e;\n", i+1, optimum_utility);
qnsearch_step(gs);
if (((i+1)%100)==0)
qnsearch_print(gs);
}
// print results
printf("\n");
qnsearch_print(gs);
fprintf(fid,"figure;\n");
fprintf(fid,"semilogy(u);\n");
fprintf(fid,"xlabel('iteration');\n");
fprintf(fid,"ylabel('utility');\n");
fprintf(fid,"title('quasinewton search results');\n");
fprintf(fid,"grid on;\n");
fclose(fid);
printf("results written to %s.\n", OUTPUT_FILENAME);
// test results, optimum at [1, 1, 1, ... 1];
qnsearch_destroy(gs);
return 0;
}
| 789 |
5,169 | {
"name": "iAsync.async",
"version": "0.0.1",
"summary": "iAsync.async lib developed to make async programming easier",
"description": " iAsync is a set of IOS libraries that aims to make asychronous programming easy for for IOS developers. It uses functional programming ideas to solve Callback Hell problem.\n\n * google group : https://groups.google.com/forum/#!forum/iasync-users\n * skype chat (mostly, russian speaking) : skype:?chat&blob=8WfBM4NDRJZwtFEjtCR69UxYie9KVzZqp0pPogEOUHQGBbvMnxo4IxSHdusKsg8dfhFYYb5vKB2PSkJbfb72_bgSDfanudA7xIjsZORHA6FxPUaLhb7JXI1eFOnIo7l8C4pxHdpIeQipTw\n",
"homepage": "https://www.facebook.com/vladimir.gorbenko.9",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"social_media_url": "https://www.facebook.com/vladimir.gorbenko.9",
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/volodg/iAsync.async.git",
"tag": "0.0.1"
},
"source_files": "Lib/**/*.{swift,m,h}",
"dependencies": {
"iAsync.utils": [
"~> 0.0.1"
]
}
}
| 541 |
4,050 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp.event;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.ExecutableNode;
import azkaban.executor.Status;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.log4j.Logger;
public abstract class FlowWatcher {
private final int execId;
private final Map<String, BlockingStatus> map =
new ConcurrentHashMap<>();
private Logger logger;
private ExecutableFlow flow;
private boolean cancelWatch = false;
public FlowWatcher(final int execId) {
this.execId = execId;
}
public void setFlow(final ExecutableFlow flow) {
this.flow = flow;
}
protected Logger getLogger() {
return this.logger;
}
public void setLogger(final Logger logger) {
this.logger = logger;
}
/**
* Called to fire events to the JobRunner listeners
*/
protected synchronized void handleJobStatusChange(final String jobId, final Status status) {
final BlockingStatus block = this.map.get(jobId);
if (block != null) {
block.changeStatus(status);
}
}
public int getExecId() {
return this.execId;
}
public synchronized BlockingStatus getBlockingStatus(final String jobId) {
if (this.cancelWatch) {
return null;
}
final ExecutableNode node = this.flow.getExecutableNodePath(jobId);
if (node == null) {
return null;
}
BlockingStatus blockingStatus = this.map.get(jobId);
if (blockingStatus == null) {
blockingStatus = new BlockingStatus(this.execId, jobId, node.getStatus());
this.map.put(jobId, blockingStatus);
}
return blockingStatus;
}
public Status peekStatus(final String jobId) {
if (Status.isStatusFinished(this.flow.getStatus())) {
return null;
}
final ExecutableNode node = this.flow.getExecutableNodePath(jobId);
if (node != null) {
ExecutableFlowBase parentFlow = node.getParentFlow();
while (parentFlow != null) {
Status parentStatus = parentFlow.getStatus();
if (parentStatus == Status.SKIPPED || parentStatus == Status.DISABLED) {
return Status.SKIPPED;
}
parentFlow = parentFlow.getParentFlow();
}
return node.getStatus();
}
return null;
}
public synchronized void unblockAllWatches() {
this.logger.info("Unblock all watches on " + this.execId);
this.cancelWatch = true;
for (final BlockingStatus status : this.map.values()) {
this.logger.info("Unblocking " + status.getJobId());
status.changeStatus(Status.SKIPPED);
status.unblock();
}
this.logger.info("Successfully unblocked all watches on " + this.execId);
}
public boolean isWatchCancelled() {
return this.cancelWatch;
}
public abstract void stopWatcher();
}
| 1,149 |
345 | <filename>XivAlexander/App_Feature_IpcTypeFinder.h
#pragma once
namespace App {
namespace Network {
class SocketHook;
}
}
namespace App::Feature {
class IpcTypeFinder {
struct Implementation;
const std::unique_ptr<Implementation> m_pImpl;
public:
IpcTypeFinder(Network::SocketHook* socketHook);
~IpcTypeFinder();
};
}
| 129 |
1,945 | from .tqdm import stdout_to_tqdm
from .image import crop_image
from .image import color_jittering_, lighting_, normalize_
| 42 |
448 | <gh_stars>100-1000
/***** includes *****/
#include "lfds711_ringbuffer_internal.h"
/***** private prototypes *****/
static void lfds711_ringbuffer_internal_queue_umm_element_cleanup_callback( struct lfds711_queue_umm_state *qumms,
struct lfds711_queue_umm_element *qumme,
enum lfds711_misc_flag dummy_element_flag );
static void lfds711_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds711_freelist_state *fs,
struct lfds711_freelist_element *fe );
/****************************************************************************/
void lfds711_ringbuffer_cleanup( struct lfds711_ringbuffer_state *rs,
void (*element_cleanup_callback)(struct lfds711_ringbuffer_state *rs, void *key, void *value, enum lfds711_misc_flag unread_flag) )
{
LFDS711_PAL_ASSERT( rs != NULL );
// TRD : element_cleanup_callback can be NULL
if( element_cleanup_callback != NULL )
{
rs->element_cleanup_callback = element_cleanup_callback;
lfds711_queue_umm_cleanup( &rs->qumms, lfds711_ringbuffer_internal_queue_umm_element_cleanup_callback );
lfds711_freelist_cleanup( &rs->fs, lfds711_ringbuffer_internal_freelist_element_cleanup_callback );
}
return;
}
/****************************************************************************/
#pragma warning( disable : 4100 )
static void lfds711_ringbuffer_internal_queue_umm_element_cleanup_callback( struct lfds711_queue_umm_state *qumms,
struct lfds711_queue_umm_element *qumme,
enum lfds711_misc_flag dummy_element_flag )
{
struct lfds711_ringbuffer_element
*re;
struct lfds711_ringbuffer_state
*rs;
LFDS711_PAL_ASSERT( qumms != NULL );
LFDS711_PAL_ASSERT( qumme != NULL );
// TRD : dummy_element can be any value in its range
rs = (struct lfds711_ringbuffer_state *) LFDS711_QUEUE_UMM_GET_USER_STATE_FROM_STATE( *qumms );
re = (struct lfds711_ringbuffer_element *) LFDS711_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qumme );
if( dummy_element_flag == LFDS711_MISC_FLAG_LOWERED )
rs->element_cleanup_callback( rs, re->key, re->value, LFDS711_MISC_FLAG_RAISED );
return;
}
#pragma warning( default : 4100 )
/****************************************************************************/
#pragma warning( disable : 4100 )
static void lfds711_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds711_freelist_state *fs,
struct lfds711_freelist_element *fe )
{
struct lfds711_ringbuffer_element
*re;
struct lfds711_ringbuffer_state
*rs;
LFDS711_PAL_ASSERT( fs != NULL );
LFDS711_PAL_ASSERT( fe != NULL );
rs = (struct lfds711_ringbuffer_state *) LFDS711_FREELIST_GET_USER_STATE_FROM_STATE( *fs );
re = (struct lfds711_ringbuffer_element *) LFDS711_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
rs->element_cleanup_callback( rs, re->key, re->value, LFDS711_MISC_FLAG_LOWERED );
return;
}
#pragma warning( default : 4100 )
| 1,553 |
460 | #include "../../../src/xmlpatterns/data/qschematime_p.h"
| 25 |
852 | import FWCore.ParameterSet.Config as cms
from RecoParticleFlow.Configuration.pfBlockAnalyzer_cfi import *
| 33 |
1,350 | <reponame>Shashi-rk/azure-sdk-for-java<filename>sdk/resourcemanager/azure-resourcemanager-compute/src/main/java/com/azure/resourcemanager/compute/models/DiskEncryptionSetParameters.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.compute.models;
import com.azure.core.annotation.Fluent;
import com.azure.core.management.SubResource;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
/**
* Describes the parameter of customer managed disk encryption set resource id that can be specified for disk.
* <br><br> NOTE: The disk encryption set resource id can only be specified for managed disk. Please refer
* https://aka.ms/mdssewithcmkoverview for more details.
*/
@Fluent
public final class DiskEncryptionSetParameters extends SubResource {
@JsonIgnore private final ClientLogger logger = new ClientLogger(DiskEncryptionSetParameters.class);
/** {@inheritDoc} */
@Override
public DiskEncryptionSetParameters withId(String id) {
super.withId(id);
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
public void validate() {
}
}
| 441 |
398 | <filename>Graphics/GraphicsTools/interface/StreamingBuffer.hpp
/*
* Copyright 2019-2021 Diligent Graphics LLC
* Copyright 2015-2019 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* In no event and under no legal theory, whether in tort (including negligence),
* contract, or otherwise, unless required by applicable law (such as deliberate
* and grossly negligent acts) or agreed to in writing, shall any Contributor be
* liable for any damages, including any direct, indirect, special, incidental,
* or consequential damages of any character arising as a result of this License or
* out of the use or inability to use the software (including but not limited to damages
* for loss of goodwill, work stoppage, computer failure or malfunction, or any and
* all other commercial damages or losses), even if such Contributor has been advised
* of the possibility of such damages.
*/
#pragma once
#include <functional>
#include <vector>
#include <string>
#include "../../GraphicsEngine/interface/RenderDevice.h"
#include "../../GraphicsEngine/interface/DeviceContext.h"
#include "../../GraphicsEngine/interface/Buffer.h"
#include "../../../Common/interface/RefCntAutoPtr.hpp"
#include "MapHelper.hpp"
namespace Diligent
{
struct StreamingBufferCreateInfo
{
IRenderDevice* pDevice = nullptr;
BufferDesc BuffDesc;
std::function<void(IBuffer*)> OnBufferResizeCallback = nullptr;
Uint32 NumContexts = 1;
bool AllowPersistentMapping = false;
};
class StreamingBuffer
{
public:
StreamingBuffer() noexcept
{}
explicit StreamingBuffer(const StreamingBufferCreateInfo& CI) :
m_UsePersistentMap{CI.AllowPersistentMapping && (CI.pDevice->GetDeviceInfo().Type == RENDER_DEVICE_TYPE_VULKAN || CI.pDevice->GetDeviceInfo().Type == RENDER_DEVICE_TYPE_D3D12)},
m_BufferSize{CI.BuffDesc.Size},
m_OnBufferResizeCallback{CI.OnBufferResizeCallback},
m_MapInfo(CI.NumContexts)
{
VERIFY_EXPR(CI.pDevice != nullptr);
VERIFY_EXPR(CI.BuffDesc.Usage == USAGE_DYNAMIC);
CI.pDevice->CreateBuffer(CI.BuffDesc, nullptr, &m_pBuffer);
VERIFY_EXPR(m_pBuffer);
if (m_OnBufferResizeCallback)
m_OnBufferResizeCallback(m_pBuffer);
}
StreamingBuffer(const StreamingBuffer&) = delete;
StreamingBuffer& operator=(const StreamingBuffer&) = delete;
StreamingBuffer(StreamingBuffer&&) = default;
StreamingBuffer& operator=(StreamingBuffer&&) = default;
~StreamingBuffer()
{
for (const auto& mapInfo : m_MapInfo)
{
VERIFY(!mapInfo.m_MappedData, "Destroying streaming buffer that is still mapped");
}
}
// Returns offset of the allocated region
Uint32 Map(IDeviceContext* pCtx, IRenderDevice* pDevice, Uint32 Size, size_t CtxNum = 0)
{
VERIFY_EXPR(Size > 0);
auto& MapInfo = m_MapInfo[CtxNum];
// Check if there is enough space in the buffer
if (MapInfo.m_CurrOffset + Size > m_BufferSize)
{
// Unmap the buffer
Flush(CtxNum);
VERIFY_EXPR(MapInfo.m_CurrOffset == 0);
if (Size > m_BufferSize)
{
while (m_BufferSize < Size)
m_BufferSize *= 2;
auto BuffDesc = m_pBuffer->GetDesc();
BuffDesc.Size = m_BufferSize;
// BuffDesc.Name becomes invalid after old buffer is released
std::string Name = BuffDesc.Name;
BuffDesc.Name = Name.c_str();
m_pBuffer.Release();
pDevice->CreateBuffer(BuffDesc, nullptr, &m_pBuffer);
if (m_OnBufferResizeCallback)
m_OnBufferResizeCallback(m_pBuffer);
LOG_INFO_MESSAGE("Extended streaming buffer '", BuffDesc.Name, "' to ", m_BufferSize, " bytes");
}
}
if (!m_UsePersistentMap)
{
VERIFY(MapInfo.m_MappedData == nullptr, "Streaming buffer must be unmapped before it can be mapped next time when persistent mapping is not used");
}
if (MapInfo.m_MappedData == nullptr)
{
// If current offset is zero, we are mapping the buffer for the first time after it has been Reset. Use MAP_FLAG_DISCARD flag.
// Otherwise use MAP_FLAG_NO_OVERWRITE flag.
MapInfo.m_MappedData.Map(pCtx, m_pBuffer, MAP_WRITE, MapInfo.m_CurrOffset == 0 ? MAP_FLAG_DISCARD : MAP_FLAG_NO_OVERWRITE);
VERIFY_EXPR(MapInfo.m_MappedData);
}
auto Offset = MapInfo.m_CurrOffset;
// Update offset
MapInfo.m_CurrOffset += Size;
return Offset;
}
Uint32 Update(IDeviceContext* pCtx, IRenderDevice* pDevice, const void* pData, Uint32 Size, size_t CtxNum = 0)
{
VERIFY_EXPR(pData != nullptr);
auto Offset = Map(pCtx, pDevice, Size, CtxNum);
auto* pCPUAddress = reinterpret_cast<Uint8*>(GetMappedCPUAddress(CtxNum)) + Offset;
memcpy(pCPUAddress, pData, Size);
Unmap(CtxNum);
return Offset;
}
void Unmap(size_t CtxNum = 0)
{
if (!m_UsePersistentMap)
{
m_MapInfo[CtxNum].m_MappedData.Unmap();
}
}
void Flush(size_t CtxNum = 0)
{
m_MapInfo[CtxNum].m_MappedData.Unmap();
m_MapInfo[CtxNum].m_CurrOffset = 0;
}
void Reset()
{
for (Uint32 ctx = 0; ctx < m_MapInfo.size(); ++ctx)
Flush(ctx);
}
IBuffer* GetBuffer() const { return m_pBuffer.RawPtr<IBuffer>(); }
void* GetMappedCPUAddress(size_t CtxNum = 0)
{
return m_MapInfo[CtxNum].m_MappedData;
}
private:
bool m_UsePersistentMap = false;
Uint64 m_BufferSize = 0;
RefCntAutoPtr<IBuffer> m_pBuffer;
std::function<void(IBuffer*)> m_OnBufferResizeCallback;
struct MapInfo
{
MapHelper<Uint8> m_MappedData;
Uint32 m_CurrOffset = 0;
};
// We need to keep track of mapped data for every context
std::vector<MapInfo> m_MapInfo;
};
} // namespace Diligent
| 2,808 |
22,779 | <filename>plugins/org.jkiss.dbeaver.ui.editors.sql/src/org/jkiss/dbeaver/ui/editors/sql/handlers/SQLEditorHandlerSyncConnection.java
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ui.editors.sql.handlers;
import org.eclipse.core.commands.AbstractHandler;
import org.eclipse.core.commands.ExecutionEvent;
import org.eclipse.core.commands.ExecutionException;
import org.eclipse.ui.IEditorPart;
import org.eclipse.ui.handlers.HandlerUtil;
import org.jkiss.dbeaver.ui.navigator.NavigatorUtils;
import org.jkiss.dbeaver.ui.navigator.database.NavigatorViewBase;
public class SQLEditorHandlerSyncConnection extends AbstractHandler {
public SQLEditorHandlerSyncConnection()
{
}
@Override
public Object execute(ExecutionEvent event) throws ExecutionException
{
final NavigatorViewBase navigatorView = NavigatorUtils.getActiveNavigatorView(event);
if (navigatorView == null) {
return null;
}
IEditorPart activeEditor = HandlerUtil.getActiveEditor(event);
if (NavigatorUtils.syncEditorWithNavigator(navigatorView, activeEditor)) {
HandlerUtil.getActiveWorkbenchWindow(event).getActivePage().activate(activeEditor);
}
return null;
}
}
| 616 |
5,053 | import pytest
import torch
from espnet2.tts.feats_extract.energy import Energy
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_forward(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=128,
hop_length=64,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
xs = torch.randn(2, 384)
if not use_token_averaged_energy:
es, elens = layer(xs, torch.LongTensor([384, 128]))
assert es.shape[1] == max(elens)
else:
ds = torch.LongTensor([[3, 3, 1], [3, 0, 0]]) // reduction_factor
dlens = torch.LongTensor([3, 1])
es, _ = layer(
xs, torch.LongTensor([384, 128]), durations=ds, durations_lengths=dlens
)
assert torch.isnan(es).sum() == 0
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_output_size(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.output_size())
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_get_parameters(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.get_parameters())
| 776 |
348 | {"nom":"Châtel-Moron","dpt":"Saône-et-Loire","inscrits":79,"abs":21,"votants":58,"blancs":5,"nuls":2,"exp":51,"res":[{"panneau":"1","voix":27},{"panneau":"2","voix":24}]} | 76 |
1,003 | <filename>Granite/assets/shaders/inc/helper_invocation.h
#ifndef HELPER_INVOCATION_H_
#define HELPER_INVOCATION_H_
#ifdef DEMOTE
#extension GL_EXT_demote_to_helper_invocation : require
#endif
bool is_helper_invocation()
{
#if defined(DEMOTE)
return helperInvocationEXT();
#else
return gl_HelperInvocation;
#endif
}
#endif | 129 |
2,494 | <gh_stars>1000+
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef prunixos_h___
#define prunixos_h___
/*
* If FD_SETSIZE is not defined on the command line, set the default value
* before include select.h
*/
/*
* Linux: FD_SETSIZE is defined in /usr/include/sys/select.h and should
* not be redefined.
*/
#if !defined(LINUX) && !defined(__GNU__) && !defined(__GLIBC__) \
&& !defined(DARWIN)
#ifndef FD_SETSIZE
#define FD_SETSIZE 4096
#endif
#endif
#include <unistd.h>
#include <stddef.h>
#include <sys/stat.h>
#include <dirent.h>
#include <errno.h>
#include "prio.h"
#include "prmem.h"
#include "prclist.h"
/*
* For select(), fd_set, and struct timeval.
*
* In The Single UNIX(R) Specification, Version 2,
* the header file for select() is <sys/time.h>.
* In Version 3, the header file for select() is
* changed to <sys/select.h>.
*
* fd_set is defined in <sys/types.h>. Usually
* <sys/time.h> includes <sys/types.h>, but on some
* older systems <sys/time.h> does not include
* <sys/types.h>, so we include it explicitly.
*/
#include <sys/time.h>
#include <sys/types.h>
#if defined(AIX) || defined(SYMBIAN)
#include <sys/select.h>
#endif
#ifndef SYMBIAN
#define HAVE_NETINET_TCP_H
#endif
#define _PR_HAVE_O_APPEND
#define PR_DIRECTORY_SEPARATOR '/'
#define PR_DIRECTORY_SEPARATOR_STR "/"
#define PR_PATH_SEPARATOR ':'
#define PR_PATH_SEPARATOR_STR ":"
typedef int (*FARPROC)();
/*
* intervals at which GLOBAL threads wakeup to check for pending interrupt
*/
#define _PR_INTERRUPT_CHECK_INTERVAL_SECS 5
extern PRIntervalTime intr_timeout_ticks;
/*
* The bit flags for the in_flags and out_flags fields
* of _PR_UnixPollDesc
*/
#ifdef _PR_USE_POLL
#define _PR_UNIX_POLL_READ POLLIN
#define _PR_UNIX_POLL_WRITE POLLOUT
#define _PR_UNIX_POLL_EXCEPT POLLPRI
#define _PR_UNIX_POLL_ERR POLLERR
#define _PR_UNIX_POLL_NVAL POLLNVAL
#define _PR_UNIX_POLL_HUP POLLHUP
#else /* _PR_USE_POLL */
#define _PR_UNIX_POLL_READ 0x1
#define _PR_UNIX_POLL_WRITE 0x2
#define _PR_UNIX_POLL_EXCEPT 0x4
#define _PR_UNIX_POLL_ERR 0x8
#define _PR_UNIX_POLL_NVAL 0x10
#define _PR_UNIX_POLL_HUP 0x20
#endif /* _PR_USE_POLL */
typedef struct _PRUnixPollDesc {
PRInt32 osfd;
PRInt16 in_flags;
PRInt16 out_flags;
} _PRUnixPollDesc;
typedef struct PRPollQueue {
PRCList links; /* for linking PRPollQueue's together */
_PRUnixPollDesc *pds; /* array of poll descriptors */
PRUintn npds; /* length of the array */
PRPackedBool on_ioq; /* is this on the async i/o work q? */
PRIntervalTime timeout; /* timeout, in ticks */
struct PRThread *thr;
} PRPollQueue;
#define _PR_POLLQUEUE_PTR(_qp) \
((PRPollQueue*) ((char*) (_qp) - offsetof(PRPollQueue,links)))
extern PRInt32 _PR_WaitForMultipleFDs(
_PRUnixPollDesc *unixpds,
PRInt32 pdcnt,
PRIntervalTime timeout);
extern void _PR_Unblock_IO_Wait(struct PRThread *thr);
#if defined(_PR_LOCAL_THREADS_ONLY) || defined(_PR_GLOBAL_THREADS_ONLY)
#define _MD_CHECK_FOR_EXIT()
#endif
extern fd_set _pr_md_read_set, _pr_md_write_set, _pr_md_exception_set;
extern PRInt16 _pr_md_read_cnt[], _pr_md_write_cnt[], _pr_md_exception_cnt[];
extern PRInt32 _pr_md_ioq_max_osfd;
extern PRUint32 _pr_md_ioq_timeout;
struct _MDFileDesc {
int osfd;
#if defined(LINUX) && defined(_PR_PTHREADS)
int tcp_nodelay; /* used by pt_LinuxSendFile */
#endif
};
struct _MDDir {
DIR *d;
};
struct _PRCPU;
extern void _MD_unix_init_running_cpu(struct _PRCPU *cpu);
/*
** Make a redzone at both ends of the stack segment. Disallow access
** to those pages of memory. It's ok if the mprotect call's don't
** work - it just means that we don't really have a functional
** redzone.
*/
#include <sys/mman.h>
#ifndef PROT_NONE
#define PROT_NONE 0x0
#endif
#if defined(DEBUG) && !defined(DARWIN)
#if !defined(SOLARIS)
#include <string.h> /* for memset() */
#define _MD_INIT_STACK(ts,REDZONE) \
PR_BEGIN_MACRO \
(void) mprotect((void*)ts->seg->vaddr, REDZONE, PROT_NONE); \
(void) mprotect((void*) ((char*)ts->seg->vaddr + REDZONE + ts->stackSize),\
REDZONE, PROT_NONE); \
/* \
** Fill stack memory with something that turns into an illegal \
** pointer value. This will sometimes find runtime references to \
** uninitialized pointers. We don't do this for solaris because we \
** can use purify instead. \
*/ \
if (_pr_debugStacks) { \
memset(ts->allocBase + REDZONE, 0xf7, ts->stackSize); \
} \
PR_END_MACRO
#else /* !SOLARIS */
#define _MD_INIT_STACK(ts,REDZONE) \
PR_BEGIN_MACRO \
(void) mprotect((void*)ts->seg->vaddr, REDZONE, PROT_NONE); \
(void) mprotect((void*) ((char*)ts->seg->vaddr + REDZONE + ts->stackSize),\
REDZONE, PROT_NONE); \
PR_END_MACRO
#endif /* !SOLARIS */
/*
* _MD_CLEAR_STACK
* Allow access to the redzone pages; the access was turned off in
* _MD_INIT_STACK.
*/
#define _MD_CLEAR_STACK(ts) \
PR_BEGIN_MACRO \
(void) mprotect((void*)ts->seg->vaddr, REDZONE, PROT_READ|PROT_WRITE);\
(void) mprotect((void*) ((char*)ts->seg->vaddr + REDZONE + ts->stackSize),\
REDZONE, PROT_READ|PROT_WRITE); \
PR_END_MACRO
#else /* DEBUG */
#define _MD_INIT_STACK(ts,REDZONE)
#define _MD_CLEAR_STACK(ts)
#endif /* DEBUG */
#if !defined(SOLARIS)
#define PR_SET_INTSOFF(newval)
#endif
/************************************************************************/
extern void _PR_UnixInit(void);
extern void _PR_UnixCleanup(void);
#define _MD_EARLY_CLEANUP _PR_UnixCleanup
/************************************************************************/
struct _MDProcess {
pid_t pid;
};
struct PRProcess;
struct PRProcessAttr;
/* Create a new process (fork() + exec()) */
#define _MD_CREATE_PROCESS _MD_CreateUnixProcess
extern struct PRProcess * _MD_CreateUnixProcess(
const char *path,
char *const *argv,
char *const *envp,
const struct PRProcessAttr *attr
);
#define _MD_DETACH_PROCESS _MD_DetachUnixProcess
extern PRStatus _MD_DetachUnixProcess(struct PRProcess *process);
/* Wait for a child process to terminate */
#define _MD_WAIT_PROCESS _MD_WaitUnixProcess
extern PRStatus _MD_WaitUnixProcess(struct PRProcess *process,
PRInt32 *exitCode);
#define _MD_KILL_PROCESS _MD_KillUnixProcess
extern PRStatus _MD_KillUnixProcess(struct PRProcess *process);
/************************************************************************/
extern void _MD_EnableClockInterrupts(void);
extern void _MD_DisableClockInterrupts(void);
#define _MD_START_INTERRUPTS _MD_StartInterrupts
#define _MD_STOP_INTERRUPTS _MD_StopInterrupts
#define _MD_DISABLE_CLOCK_INTERRUPTS _MD_DisableClockInterrupts
#define _MD_ENABLE_CLOCK_INTERRUPTS _MD_EnableClockInterrupts
#define _MD_BLOCK_CLOCK_INTERRUPTS _MD_BlockClockInterrupts
#define _MD_UNBLOCK_CLOCK_INTERRUPTS _MD_UnblockClockInterrupts
/************************************************************************/
extern void _MD_InitCPUS(void);
#define _MD_INIT_CPUS _MD_InitCPUS
extern void _MD_Wakeup_CPUs(void);
#define _MD_WAKEUP_CPUS _MD_Wakeup_CPUs
#define _MD_PAUSE_CPU _MD_PauseCPU
#if defined(_PR_LOCAL_THREADS_ONLY) || defined(_PR_GLOBAL_THREADS_ONLY)
#define _MD_CLEANUP_BEFORE_EXIT()
#endif
#ifndef IRIX
#define _MD_EXIT(status) _exit(status)
#endif
/************************************************************************/
#define _MD_GET_ENV getenv
#define _MD_PUT_ENV putenv
/************************************************************************/
#define _MD_INIT_FILEDESC(fd)
extern void _MD_MakeNonblock(PRFileDesc *fd);
#define _MD_MAKE_NONBLOCK _MD_MakeNonblock
/************************************************************************/
#if !defined(_PR_PTHREADS)
extern void _MD_InitSegs(void);
extern PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size,
void *vaddr);
extern void _MD_FreeSegment(PRSegment *seg);
#define _MD_INIT_SEGS _MD_InitSegs
#define _MD_ALLOC_SEGMENT _MD_AllocSegment
#define _MD_FREE_SEGMENT _MD_FreeSegment
#endif /* !defined(_PR_PTHREADS) */
/************************************************************************/
#ifdef _MD_INTERVAL_USE_GTOD
extern PRIntervalTime _PR_UNIX_GetInterval(void);
extern PRIntervalTime _PR_UNIX_TicksPerSecond(void);
#define _MD_INTERVAL_INIT()
#define _MD_GET_INTERVAL _PR_UNIX_GetInterval
#define _MD_INTERVAL_PER_SEC _PR_UNIX_TicksPerSecond
#endif
#ifdef HAVE_CLOCK_MONOTONIC
extern PRIntervalTime _PR_UNIX_GetInterval2(void);
extern PRIntervalTime _PR_UNIX_TicksPerSecond2(void);
#define _MD_INTERVAL_INIT()
#define _MD_GET_INTERVAL _PR_UNIX_GetInterval2
#define _MD_INTERVAL_PER_SEC _PR_UNIX_TicksPerSecond2
#endif
#define _MD_INTERVAL_PER_MILLISEC() (_PR_MD_INTERVAL_PER_SEC() / 1000)
#define _MD_INTERVAL_PER_MICROSEC() (_PR_MD_INTERVAL_PER_SEC() / 1000000)
/************************************************************************/
#define _MD_ERRNO() (errno)
#define _MD_GET_SOCKET_ERROR() (errno)
/************************************************************************/
extern PRInt32 _MD_AvailableSocket(PRInt32 osfd);
extern void _MD_StartInterrupts(void);
extern void _MD_StopInterrupts(void);
extern void _MD_DisableClockInterrupts(void);
extern void _MD_BlockClockInterrupts(void);
extern void _MD_UnblockClockInterrupts(void);
extern void _MD_PauseCPU(PRIntervalTime timeout);
extern PRStatus _MD_open_dir(struct _MDDir *, const char *);
extern PRInt32 _MD_close_dir(struct _MDDir *);
extern char * _MD_read_dir(struct _MDDir *, PRIntn);
extern PRInt32 _MD_open(const char *name, PRIntn osflags, PRIntn mode);
extern PRInt32 _MD_delete(const char *name);
extern PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info);
extern PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info);
extern PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info);
extern PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info);
extern PRInt32 _MD_rename(const char *from, const char *to);
extern PRInt32 _MD_access(const char *name, PRAccessHow how);
extern PRInt32 _MD_mkdir(const char *name, PRIntn mode);
extern PRInt32 _MD_rmdir(const char *name);
extern PRInt32 _MD_accept_read(PRInt32 sock, PRInt32 *newSock,
PRNetAddr **raddr, void *buf, PRInt32 amount);
extern PRInt32 _PR_UnixSendFile(PRFileDesc *sd, PRSendFileData *sfd,
PRTransmitFileFlags flags, PRIntervalTime timeout);
extern PRStatus _MD_LockFile(PRInt32 osfd);
extern PRStatus _MD_TLockFile(PRInt32 osfd);
extern PRStatus _MD_UnlockFile(PRInt32 osfd);
#define _MD_OPEN_DIR(dir, name) _MD_open_dir(dir, name)
#define _MD_CLOSE_DIR(dir) _MD_close_dir(dir)
#define _MD_READ_DIR(dir, flags) _MD_read_dir(dir, flags)
#define _MD_OPEN(name, osflags, mode) _MD_open(name, osflags, mode)
#define _MD_OPEN_FILE(name, osflags, mode) _MD_open(name, osflags, mode)
extern PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount);
#define _MD_READ(fd,buf,amount) _MD_read(fd,buf,amount)
extern PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount);
#define _MD_WRITE(fd,buf,amount) _MD_write(fd,buf,amount)
#define _MD_DELETE(name) _MD_delete(name)
#define _MD_GETFILEINFO(fn, info) _MD_getfileinfo(fn, info)
#define _MD_GETFILEINFO64(fn, info) _MD_getfileinfo64(fn, info)
#define _MD_GETOPENFILEINFO(fd, info) _MD_getopenfileinfo(fd, info)
#define _MD_GETOPENFILEINFO64(fd, info) _MD_getopenfileinfo64(fd, info)
#define _MD_RENAME(from, to) _MD_rename(from, to)
#define _MD_ACCESS(name, how) _MD_access(name, how)
#define _MD_MKDIR(name, mode) _MD_mkdir(name, mode)
#define _MD_MAKE_DIR(name, mode) _MD_mkdir(name, mode)
#define _MD_RMDIR(name) _MD_rmdir(name)
#define _MD_ACCEPT_READ(sock, newSock, raddr, buf, amount) _MD_accept_read(sock, newSock, raddr, buf, amount)
#define _MD_LOCKFILE _MD_LockFile
#define _MD_TLOCKFILE _MD_TLockFile
#define _MD_UNLOCKFILE _MD_UnlockFile
extern PRInt32 _MD_socket(int af, int type, int flags);
#define _MD_SOCKET _MD_socket
extern PRInt32 _MD_connect(PRFileDesc *fd, const PRNetAddr *addr,
PRUint32 addrlen, PRIntervalTime timeout);
#define _MD_CONNECT _MD_connect
extern PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr, PRUint32 *addrlen,
PRIntervalTime timeout);
#define _MD_ACCEPT _MD_accept
extern PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen);
#define _MD_BIND _MD_bind
extern PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog);
#define _MD_LISTEN _MD_listen
extern PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how);
#define _MD_SHUTDOWN _MD_shutdown
extern PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount,
PRIntn flags, PRIntervalTime timeout);
#define _MD_RECV _MD_recv
extern PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount,
PRIntn flags, PRIntervalTime timeout);
#define _MD_SEND _MD_send
extern PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount,
PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen,
PRIntervalTime timeout);
#define _MD_RECVFROM _MD_recvfrom
extern PRInt32 _MD_sendto(PRFileDesc *fd, const void *buf, PRInt32 amount,
PRIntn flags, const PRNetAddr *addr, PRUint32 addrlen,
PRIntervalTime timeout);
#define _MD_SENDTO _MD_sendto
extern PRInt32 _MD_writev(PRFileDesc *fd, const struct PRIOVec *iov,
PRInt32 iov_size, PRIntervalTime timeout);
#define _MD_WRITEV _MD_writev
extern PRInt32 _MD_socketavailable(PRFileDesc *fd);
#define _MD_SOCKETAVAILABLE _MD_socketavailable
extern PRInt64 _MD_socketavailable64(PRFileDesc *fd);
#define _MD_SOCKETAVAILABLE64 _MD_socketavailable64
#define _MD_PIPEAVAILABLE _MD_socketavailable
extern PRInt32 _MD_pr_poll(PRPollDesc *pds, PRIntn npds,
PRIntervalTime timeout);
#define _MD_PR_POLL _MD_pr_poll
extern PRInt32 _MD_close(PRInt32 osfd);
#define _MD_CLOSE_FILE _MD_close
extern PRInt32 _MD_lseek(PRFileDesc*, PRInt32, PRSeekWhence);
#define _MD_LSEEK _MD_lseek
extern PRInt64 _MD_lseek64(PRFileDesc*, PRInt64, PRSeekWhence);
#define _MD_LSEEK64 _MD_lseek64
extern PRInt32 _MD_fsync(PRFileDesc *fd);
#define _MD_FSYNC _MD_fsync
extern PRInt32 _MD_socketpair(int af, int type, int flags, PRInt32 *osfd);
#define _MD_SOCKETPAIR _MD_socketpair
#define _MD_CLOSE_SOCKET _MD_close
#ifndef NO_NSPR_10_SUPPORT
#define _MD_STAT stat
#endif
extern PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr,
PRUint32 *addrlen);
#define _MD_GETPEERNAME _MD_getpeername
extern PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr,
PRUint32 *addrlen);
#define _MD_GETSOCKNAME _MD_getsockname
extern PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level,
PRInt32 optname, char* optval, PRInt32* optlen);
#define _MD_GETSOCKOPT _MD_getsockopt
extern PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level,
PRInt32 optname, const char* optval, PRInt32 optlen);
#define _MD_SETSOCKOPT _MD_setsockopt
extern PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable);
#define _MD_SET_FD_INHERITABLE _MD_set_fd_inheritable
extern void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported);
#define _MD_INIT_FD_INHERITABLE _MD_init_fd_inheritable
extern void _MD_query_fd_inheritable(PRFileDesc *fd);
#define _MD_QUERY_FD_INHERITABLE _MD_query_fd_inheritable
extern PRStatus _MD_gethostname(char *name, PRUint32 namelen);
#define _MD_GETHOSTNAME _MD_gethostname
extern PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen);
#define _MD_GETSYSINFO _MD_getsysinfo
extern int _MD_unix_get_nonblocking_connect_error(int osfd);
/* Memory-mapped files */
struct _MDFileMap {
PRIntn prot;
PRIntn flags;
PRBool isAnonFM; /* when true, PR_CloseFileMap() must close the related fd */
};
extern PRStatus _MD_CreateFileMap(struct PRFileMap *fmap, PRInt64 size);
#define _MD_CREATE_FILE_MAP _MD_CreateFileMap
#define _MD_GET_MEM_MAP_ALIGNMENT() PR_GetPageSize()
extern void * _MD_MemMap(struct PRFileMap *fmap, PRInt64 offset,
PRUint32 len);
#define _MD_MEM_MAP _MD_MemMap
extern PRStatus _MD_MemUnmap(void *addr, PRUint32 size);
#define _MD_MEM_UNMAP _MD_MemUnmap
extern PRStatus _MD_CloseFileMap(struct PRFileMap *fmap);
#define _MD_CLOSE_FILE_MAP _MD_CloseFileMap
extern PRStatus _MD_SyncMemMap(
PRFileDesc *fd,
void *addr,
PRUint32 len);
#define _MD_SYNC_MEM_MAP _MD_SyncMemMap
/*
* The standard (XPG4) gettimeofday() (from BSD) takes two arguments.
* On some SVR4 derivatives, gettimeofday() takes only one argument.
* The GETTIMEOFDAY macro is intended to hide this difference.
*/
#ifdef HAVE_SVID_GETTOD
#define GETTIMEOFDAY(tp) gettimeofday(tp)
#else
#define GETTIMEOFDAY(tp) gettimeofday((tp), NULL)
#endif
#if defined(_PR_PTHREADS) && !defined(_PR_POLL_AVAILABLE)
#define _PR_NEED_FAKE_POLL
#endif
#if defined(_PR_NEED_FAKE_POLL)
/*
* Some platforms don't have poll(), but our pthreads code calls poll().
* As a temporary measure, I implemented a fake poll() using select().
* Here are the struct and macro definitions copied from sys/poll.h
* on Solaris 2.5.
*/
struct pollfd {
int fd;
short events;
short revents;
};
/* poll events */
#define POLLIN 0x0001 /* fd is readable */
#define POLLPRI 0x0002 /* high priority info at fd */
#define POLLOUT 0x0004 /* fd is writeable (won't block) */
#define POLLRDNORM 0x0040 /* normal data is readable */
#define POLLWRNORM POLLOUT
#define POLLRDBAND 0x0080 /* out-of-band data is readable */
#define POLLWRBAND 0x0100 /* out-of-band data is writeable */
#define POLLNORM POLLRDNORM
#define POLLERR 0x0008 /* fd has error condition */
#define POLLHUP 0x0010 /* fd has been hung up on */
#define POLLNVAL 0x0020 /* invalid pollfd entry */
extern int poll(struct pollfd *, unsigned long, int);
#endif /* _PR_NEED_FAKE_POLL */
/*
** A vector of the UNIX I/O calls we use. These are here to smooth over
** the rough edges needed for large files. All of NSPR's implmentaions
** go through this vector using syntax of the form
** result = _md_iovector.xxx64(args);
*/
#if defined(SOLARIS2_5)
/*
** Special case: Solaris 2.5.1
** Solaris starts to have 64-bit file I/O in 2.6. We build on Solaris
** 2.5.1 so that we can use the same binaries on both Solaris 2.5.1 and
** 2.6. At run time, we detect whether 64-bit file I/O is available by
** looking up the 64-bit file function symbols in libc. At build time,
** we need to define the 64-bit file I/O datatypes that are compatible
** with their definitions on Solaris 2.6.
*/
typedef PRInt64 off64_t;
typedef PRUint64 ino64_t;
typedef PRInt64 blkcnt64_t;
struct stat64 {
dev_t st_dev;
long st_pad1[3];
ino64_t st_ino;
mode_t st_mode;
nlink_t st_nlink;
uid_t st_uid;
gid_t st_gid;
dev_t st_rdev;
long t_pad2[2];
off64_t st_size;
timestruc_t st_atim;
timestruc_t st_mtim;
timestruc_t st_ctim;
long st_blksize;
blkcnt64_t st_blocks;
char st_fstype[_ST_FSTYPSZ];
long st_pad4[8];
};
typedef struct stat64 _MDStat64;
typedef off64_t _MDOff64_t;
#elif defined(_PR_HAVE_OFF64_T)
typedef struct stat64 _MDStat64;
typedef off64_t _MDOff64_t;
#elif defined(_PR_HAVE_LARGE_OFF_T)
typedef struct stat _MDStat64;
typedef off_t _MDOff64_t;
#elif defined(_PR_NO_LARGE_FILES)
typedef struct stat _MDStat64;
typedef PRInt64 _MDOff64_t;
#else
#error "I don't know yet"
#endif
typedef PRIntn (*_MD_Fstat64)(PRIntn osfd, _MDStat64 *buf);
typedef PRIntn (*_MD_Open64)(const char *path, int oflag, ...);
typedef PRIntn (*_MD_Stat64)(const char *path, _MDStat64 *buf);
typedef _MDOff64_t (*_MD_Lseek64)(PRIntn osfd, _MDOff64_t, PRIntn whence);
typedef void* (*_MD_Mmap64)(
void *addr, PRSize len, PRIntn prot, PRIntn flags,
PRIntn fildes, _MDOff64_t offset);
struct _MD_IOVector
{
_MD_Open64 _open64;
_MD_Mmap64 _mmap64;
_MD_Stat64 _stat64;
_MD_Fstat64 _fstat64;
_MD_Lseek64 _lseek64;
};
extern struct _MD_IOVector _md_iovector;
#endif /* prunixos_h___ */
| 8,544 |
1,327 | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef ONEAPI_DNNL_DNNL_THREADPOOL_HPP
#define ONEAPI_DNNL_DNNL_THREADPOOL_HPP
#include "oneapi/dnnl/dnnl.hpp"
#include "oneapi/dnnl/dnnl_threadpool.h"
#include "oneapi/dnnl/dnnl_threadpool_iface.hpp"
/// @addtogroup dnnl_api
/// @{
namespace dnnl {
/// @addtogroup dnnl_api_interop
/// @{
/// @addtogroup dnnl_api_threadpool_interop Threadpool interoperability API
/// API extensions to interact with the underlying Threadpool run-time.
/// @{
/// Threadpool interoperability namespace
namespace threadpool_interop {
/// Constructs an execution stream for the specified engine and threadpool.
///
/// @sa @ref dev_guide_threadpool
///
/// @param aengine Engine to create the stream on.
/// @param threadpool Pointer to an instance of a C++ class that implements
/// dnnl::threapdool_iface interface.
/// @returns An execution stream.
inline dnnl::stream make_stream(
const dnnl::engine &aengine, threadpool_iface *threadpool) {
dnnl_stream_t c_stream;
dnnl::error::wrap_c_api(dnnl_threadpool_interop_stream_create(
&c_stream, aengine.get(), threadpool),
"could not create stream");
return dnnl::stream(c_stream);
}
/// Returns the pointer to a threadpool that is used by an execution stream.
///
/// @sa @ref dev_guide_threadpool
///
/// @param astream An execution stream.
/// @returns Output pointer to an instance of a C++ class that implements
/// dnnl::threapdool_iface interface or NULL if the stream was created
/// without threadpool.
inline threadpool_iface *get_threadpool(const dnnl::stream &astream) {
void *tp;
dnnl::error::wrap_c_api(
dnnl_threadpool_interop_stream_get_threadpool(astream.get(), &tp),
"could not get stream threadpool");
return static_cast<threadpool_iface *>(tp);
}
/// @copydoc dnnl_sgemm_tp()
inline status sgemm(char transa, char transb, dnnl_dim_t M, dnnl_dim_t N,
dnnl_dim_t K, float alpha, const float *A, dnnl_dim_t lda,
const float *B, dnnl_dim_t ldb, float beta, float *C, dnnl_dim_t ldc,
threadpool_iface *tp) {
return static_cast<status>(dnnl_threadpool_interop_sgemm(
transa, transb, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc, tp));
}
/// @copydoc dnnl_gemm_u8s8s32_tp()
inline status gemm_u8s8s32(char transa, char transb, char offsetc, dnnl_dim_t M,
dnnl_dim_t N, dnnl_dim_t K, float alpha, const uint8_t *A,
dnnl_dim_t lda, uint8_t ao, const int8_t *B, dnnl_dim_t ldb, int8_t bo,
float beta, int32_t *C, dnnl_dim_t ldc, const int32_t *co,
threadpool_iface *tp) {
return static_cast<status>(
dnnl_threadpool_interop_gemm_u8s8s32(transa, transb, offsetc, M, N,
K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co, tp));
}
/// @copydoc dnnl_gemm_s8s8s32_tp()
inline status gemm_s8s8s32(char transa, char transb, char offsetc, dnnl_dim_t M,
dnnl_dim_t N, dnnl_dim_t K, float alpha, const int8_t *A,
dnnl_dim_t lda, int8_t ao, const int8_t *B, dnnl_dim_t ldb, int8_t bo,
float beta, int32_t *C, dnnl_dim_t ldc, const int32_t *co,
threadpool_iface *tp) {
return static_cast<status>(
dnnl_threadpool_interop_gemm_s8s8s32(transa, transb, offsetc, M, N,
K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co, tp));
}
} // namespace threadpool_interop
/// @} dnnl_api_threadpool_interop
/// @} dnnl_api_interop
} // namespace dnnl
/// @} dnnl_api
#endif
| 1,741 |
2,453 | <filename>XVim2/XcodeHeader/IDEKit/IDESwiftCompletionItem.h
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 30 2020 21:18:12).
//
// Copyright (C) 1997-2019 <NAME>.
//
#import <objc/NSObject.h>
#import <IDEKit/DVTTextCompletionItem-Protocol.h>
@class DVTRangeArray, DVTSourceCodeLanguage, NSArray, NSAttributedString, NSImage, NSString;
@interface IDESwiftCompletionItem : NSObject <DVTTextCompletionItem>
{
CDStruct_4c46f3f5 _obj;
struct _sourcekit_uid_s *_completionsKind;
BOOL _containsOptionalArgumentLabelOnly;
NSString *_briefDisplayText;
NSAttributedString *_attributedDisplayText;
NSAttributedString *_attributedDisplayType;
NSAttributedString *_attributedDisplaySignature;
DVTRangeArray *_nameRanges;
DVTRangeArray *_displayTextRanges;
DVTRangeArray *_briefDisplayTextRanges;
unsigned long long _options;
BOOL _isAnnotatedDescriptionParsed;
BOOL _isAnnotatedDescriptionParsedSuccessfully;
BOOL _isAnnotatedTypenameParsed;
BOOL _isAnnotatedTypenameParsedSuccessfully;
BOOL _isColorLiteral;
BOOL _isImageLiteral;
BOOL _notRecommended;
double _priority;
double _fuzzyMatchingScore;
DVTRangeArray *_fuzzyMatchingRanges;
NSString *_action;
long long _priorityBucket;
NSString *_name;
NSString *_displayText;
DVTSourceCodeLanguage *_language;
}
+ (id)_attributesForElementName:(id)arg1 options:(unsigned long long)arg2;
+ (id)plainTextAttributes;
- (void).cxx_destruct;
@property(readonly) DVTSourceCodeLanguage *language; // @synthesize language=_language;
@property(readonly, copy, nonatomic) NSString *name; // @synthesize name=_name;
@property(readonly) long long priorityBucket; // @synthesize priorityBucket=_priorityBucket;
@property(retain) DVTRangeArray *fuzzyMatchingRanges; // @synthesize fuzzyMatchingRanges=_fuzzyMatchingRanges;
@property double fuzzyMatchingScore; // @synthesize fuzzyMatchingScore=_fuzzyMatchingScore;
@property double priority; // @synthesize priority=_priority;
@property(readonly) NSString *usr;
- (unsigned long long)leadingCharactersToReplaceFromString:(id)arg1 location:(unsigned long long)arg2;
- (void)attributedInfoWithContext:(id)arg1 completionBlock:(CDUnknownBlockType)arg2;
@property(readonly) NSArray *associatedUSRs;
@property(readonly, copy) NSString *description;
@property(readonly, copy) NSString *action; // @synthesize action=_action;
@property(readonly, copy) NSString *accessibilityLabel;
@property(readonly) BOOL notRecommended; // @synthesize notRecommended=_notRecommended;
@property(readonly) unsigned long long priorityComparatorKind;
@property(readonly) NSImage *icon;
- (id)symbolKind;
@property(readonly, copy) NSAttributedString *descriptionText;
@property(readonly, copy) NSString *parentText;
@property(readonly, copy) NSString *completionText;
@property(readonly, copy) NSString *displaySignature;
@property(readonly, copy) NSAttributedString *attributedDisplaySignature;
@property(readonly, copy) DVTRangeArray *displayTextRanges;
@property(readonly, copy) DVTRangeArray *briefDisplayTextRanges;
@property(readonly, copy) DVTRangeArray *nameRanges;
@property(readonly, copy) NSString *briefDisplayText;
@property(readonly, copy) NSString *displayType;
@property(readonly, copy) NSAttributedString *attributedDisplayType;
- (BOOL)_parseAnnotatedTypename;
@property(readonly, copy) NSString *displayText; // @synthesize displayText=_displayText;
- (id)_attributedDisplayText;
- (BOOL)_parseAnnotatedDescription;
- (id)initWithSourceKitDictionary:(CDStruct_4c46f3f5)arg1 completionsKind:(struct _sourcekit_uid_s *)arg2 language:(id)arg3 options:(unsigned long long)arg4;
// Remaining properties
@property(readonly, copy) NSArray *additionalCompletions;
@property(readonly) int completionItemStyle;
@property(readonly, copy) NSString *debugDescription;
@property(readonly) unsigned long long hash;
@property(readonly) NSImage *highlightedStatusIcon;
@property(readonly) NSImage *statusIcon;
@property(readonly) Class superclass;
@end
| 1,357 |
527 | # Generated by Django 2.2.9 on 2020-01-22 04:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drfpasswordless', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='callbacktoken',
name='to_alias',
field=models.CharField(blank=True, max_length=254),
),
]
| 177 |
757 | <reponame>da3dsoul/superpaper
#!/usr/bin/env python3
"""
Superpaper is a cross-platform multi monitor wallpaper manager.
Written by <NAME>.
"""
#__all__ to be set at some point. Defines the APIs of the module(s).
__author__ = "<NAME>"
import sys
from superpaper.cli import cli_logic
from superpaper.tray import tray_loop
from superpaper.spanmode import set_spanmode
def main():
"""Runs tray applet if no command line arguments are passed, CLI parsing otherwise."""
set_spanmode()
if len(sys.argv) <= 1:
tray_loop()
else:
cli_logic()
if __name__ == "__main__":
main()
| 220 |
421 | <reponame>rswinkle/lowrez21
#ifndef CVECTOR_float_H
#define CVECTOR_float_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for float vector. */
typedef struct cvector_float
{
float* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_float;
extern size_t CVEC_float_SZ;
int cvec_float(cvector_float* vec, size_t size, size_t capacity);
int cvec_init_float(cvector_float* vec, float* vals, size_t num);
cvector_float* cvec_float_heap(size_t size, size_t capacity);
cvector_float* cvec_init_float_heap(float* vals, size_t num);
int cvec_copyc_float(void* dest, void* src);
int cvec_copy_float(cvector_float* dest, cvector_float* src);
int cvec_push_float(cvector_float* vec, float a);
float cvec_pop_float(cvector_float* vec);
int cvec_extend_float(cvector_float* vec, size_t num);
int cvec_insert_float(cvector_float* vec, size_t i, float a);
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num);
float cvec_replace_float(cvector_float* vec, size_t i, float a);
void cvec_erase_float(cvector_float* vec, size_t start, size_t end);
int cvec_reserve_float(cvector_float* vec, size_t size);
int cvec_set_cap_float(cvector_float* vec, size_t size);
void cvec_set_val_sz_float(cvector_float* vec, float val);
void cvec_set_val_cap_float(cvector_float* vec, float val);
float* cvec_back_float(cvector_float* vec);
void cvec_clear_float(cvector_float* vec);
void cvec_free_float_heap(void* vec);
void cvec_free_float(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_float_H */
#endif
#ifdef CVECTOR_float_IMPLEMENTATION
size_t CVEC_float_SZ = 50;
#define CVEC_float_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_float* cvec_float_heap(size_t size, size_t capacity)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_float* cvec_init_float_heap(float* vals, size_t num)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return vec;
}
int cvec_float(cvector_float* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_float(cvector_float* vec, float* vals, size_t num)
{
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return 1;
}
int cvec_copyc_float(void* dest, void* src)
{
cvector_float* vec1 = (cvector_float*)dest;
cvector_float* vec2 = (cvector_float*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_float(vec1, vec2);
}
int cvec_copy_float(cvector_float* dest, cvector_float* src)
{
float* tmp = NULL;
if (!(tmp = (float*)CVEC_REALLOC(dest->a, src->capacity*sizeof(float)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(float));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_float(cvector_float* vec, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
float cvec_pop_float(cvector_float* vec)
{
return vec->a[--vec->size];
}
float* cvec_back_float(cvector_float* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_float(cvector_float* vec, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_float(cvector_float* vec, size_t i, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(float));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(float));
vec->size += num;
return 1;
}
float cvec_replace_float(cvector_float* vec, size_t i, float a)
{
float tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_float(cvector_float* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(float));
vec->size -= d;
}
int cvec_reserve_float(cvector_float* vec, size_t size)
{
float* tmp;
if (vec->capacity < size) {
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*(size+CVEC_float_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_float_SZ;
}
return 1;
}
int cvec_set_cap_float(cvector_float* vec, size_t size)
{
float* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_float(cvector_float* vec) { vec->size = 0; }
void cvec_free_float_heap(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_float(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
| 3,644 |
303 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
import paramiko
from slugify import slugify as slugify_function
from django.contrib.auth.models import User
from django.http import HttpResponse
import json
import logging
import subprocess
import signal
import os
import traceback
import sys
from billiard import current_process
from django.utils.module_loading import import_by_path
from dns.resolver import Resolver
from dns.exception import DNSException
import requests
LOG = logging.getLogger(__name__)
# See http://docs.python.org/2/library/subprocess.html#popen-constructor if you
# have questions about this variable
DEFAULT_OUTPUT_BUFFER_SIZE = 16384
PROCESS_TIMEOUT = 4 * 60 * 60 # 4 horas
class AuthRequest(object):
@staticmethod
def _request(credential, action, url, **kw):
auth = (credential.user, credential.password,)
kw.update(**{'auth': auth} if credential.user else {})
return action(url, **kw)
@classmethod
def get(cls, credential, url, **kw):
return cls._request(credential, requests.get, url, **kw)
@classmethod
def post(cls, credential, url, **kw):
return cls._request(credential, requests.post, url, **kw)
@classmethod
def delete(cls, credential, url, **kw):
return cls._request(credential, requests.delete, url, **kw)
class AlarmException(Exception):
pass
class GetCredentialException(Exception):
pass
def alarm_handler(signum, frame):
raise AlarmException
def slugify(string):
return slugify_function(string, separator="_")
def make_db_random_password():
return User.objects.make_random_password()
def as_json(f):
def wrapper(request, *args, **kw):
output = f(request, *args, **kw)
if isinstance(output, HttpResponse):
return output
elif isinstance(output, basestring):
return HttpResponse(output, content_type="text/plain")
output = json.dumps(output, indent=4)
return HttpResponse(output, content_type="application/json")
return wrapper
def call_script(script_name, working_dir=None, split_lines=True, args=[],
envs={}, shell=False, python_bin=None):
args_copy = []
for arg in args:
if type(arg) == 'str' and arg.startswith("PASSWORD"):
args_copy.append("xxx")
else:
args_copy.append(arg)
if not working_dir:
raise RuntimeError("Working dir is null")
logging_cmdline = "%s %s" % (
" ".join(["%s=%s" % (k, "xxx" if k.endswith("_PASSWORD") else v)
for (k, v) in envs.items()]),
" ".join([script_name] + args_copy),
)
return_code = None
output = []
try:
envs_with_path = {'PATH': os.getenv("PATH")}
if envs:
envs_with_path.update(envs)
# For future, if scripts have lot of output can be better
# create a temporary file for stdout. Scripts with lot
# of output and subprocess.PIPE
# can lock because this method not consume stdout without script finish
# execute.
if python_bin:
exec_script = [python_bin, working_dir + script_name] + args
else:
exec_script = [working_dir + script_name] + args
process = subprocess.Popen(
exec_script,
bufsize=DEFAULT_OUTPUT_BUFFER_SIZE,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # stderr and stdout are the same
close_fds=True,
cwd=working_dir,
env=envs_with_path,
universal_newlines=True,
shell=shell)
if not shell:
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(PROCESS_TIMEOUT)
try:
process.wait()
signal.alarm(0) # Disable the alarm
except AlarmException:
LOG.error("Timeout %s exceeded for process id %s" %
(PROCESS_TIMEOUT, process.pid))
process.kill()
output = process.stdout.read()
return_code = process.returncode
LOG.debug("output: {} \n return_code: {}".format(output, return_code))
if split_lines:
return return_code, [s.strip() for s in output.splitlines()]
else:
return return_code, output
except Exception:
# if any error happen, log cmdline to error
LOG.error("Error running cmdline (exit code %s): %s",
return_code, logging_cmdline, exc_info=True)
if not return_code:
return_code = 1
return return_code, output
def check_dns(dns_to_check, dns_server, retries=90, wait=10, ip_to_check=None):
LOG.info("Cheking dns for {}...".format(dns_to_check))
resolver = Resolver()
resolver.nameservers = [dns_server]
LOG.info("CHECK DNS: dns to check {}".format(dns_to_check))
for attempt in range(0, retries):
LOG.info("Cheking dns for {}... attempt number {}...".format(
dns_to_check,
str(attempt + 1)
))
try:
answer = resolver.query(dns_to_check)
except DNSException:
pass
else:
ips = map(str, answer)
LOG.info("CHECK DNS: ips {}".format(ips))
LOG.info("CHECK DNS: ip to check {}".format(ip_to_check))
if ((ip_to_check and ip_to_check in ips) or
(not ip_to_check and ips)):
return True
sleep(wait)
return False
def scp_file(server, username, password, localpath, remotepath, option):
try:
transport = paramiko.Transport((server, 22))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
if option == 'PUT':
sftp.put(localpath, remotepath)
elif option == 'GET':
sftp.get(remotepath, localpath)
else:
raise Exception("Invalid option...")
sftp.close()
transport.close()
return True
except Exception as e:
LOG.error("We caught an exception: %s ." % (e))
return False
def scp_put_file(server, username, password, localpath, remotepath):
return scp_file(server, username, password, localpath, remotepath, 'PUT')
def scp_get_file(server, username, password, localpath, remotepath):
return scp_file(server, username, password, localpath, remotepath, 'GET')
def get_remote_file_content(file_path, host):
output = {}
script = 'cat {}'.format(file_path)
output = host.ssh.run_script(script)
return output['stdout'][0].strip()
def get_host_os_description(host):
return get_remote_file_content('/etc/redhat-release', host)
def get_mongodb_key_file(infra):
instance = infra.instances.first()
return get_remote_file_content('/data/mongodb.key', instance.hostname)
def get_vm_name(prefix, sufix, vm_number):
return "{}-{:02d}-{}".format(prefix, vm_number, sufix)
def gen_infra_names(name, qt):
import time
import re
stamp = str(time.time()).replace(".", "")
name = re.compile("[^\w']|_").sub("", name.lower())
name = name[:10]
names = {
"infra": name + stamp,
"vms": [],
"name_prefix": name,
"name_stamp": stamp,
}
for x in range(qt):
vm_name = get_vm_name(name, stamp, x + 1)
names['vms'].append(vm_name)
return names
def get_credentials_in_any_env(credential_type, **kwargs):
from dbaas_credentials.models import Credential
if "environment" in kwargs:
kwargs.pop("environment")
return Credential.objects.filter(
integration_type__type=credential_type, **kwargs
)[0]
def get_credentials_for(environment, credential_type, **kwargs):
from dbaas_credentials.models import Credential
creds = Credential.objects.filter(
integration_type__type=credential_type, environments=environment,
**kwargs
)
if not creds.exists():
raise GetCredentialException(
("Credentials not found for type %s and env %s" %
(credential_type, environment)))
return creds[0]
def get_or_none_credentials_for(environment, credential_type, **kwargs):
try:
return get_credentials_for(environment, credential_type, **kwargs)
except (IndexError, GetCredentialException):
return None
def build_dict(**kwargs):
my_dict = {}
for name, value in kwargs.items():
my_dict[name] = value
LOG.info(my_dict)
return my_dict
def full_stack():
exc = sys.exc_info()[0]
stack = traceback.extract_stack()[:-1] # last one would be full_stack()
if exc is not None: # i.e. if an exception is present
del stack[-1] # remove call of full_stack, the printed exception
# will contain the caught exception caller instead
trc = 'Traceback (most recent call last):\n'
stackstr = trc + ''.join(traceback.format_list(stack))
if exc is not None:
stackstr += ' ' + traceback.format_exc().lstrip(trc)
return stackstr
def dict_to_string(dict):
''.join('{}: {}'.format(key, val) for key, val in sorted(dict.items()))
def retry(ExceptionToCheck, tries=10, delay=3, backoff=2):
import time
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 0:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
print "%s, Retrying in %d seconds..." % (str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
lastException = e
raise lastException
return f_retry
# true decorator
return deco_retry
def build_context_script(contextdict, script):
from django.template import Context, Template
import re
regex = re.compile(r'[\r]')
script = regex.sub('', str(script))
context = Context(contextdict)
template = Template(script)
return template.render(context)
def get_worker_name():
p = current_process()
return p.initargs[1].split('@')[1]
def get_now():
import datetime
return datetime.datetime.now()
def get_dict_lines(my_dict={}):
final_str = ''
for key in my_dict.keys():
final_str += key.upper() + ': \n\n'
for line in my_dict[key]:
final_str += line
final_str += '\n'
return final_str
def get_replication_topology_instance(class_path):
topology_class = import_by_path(class_path)
return topology_class()
| 4,648 |
2,499 | <gh_stars>1000+
/*
* Copyright 2018 Qunar, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package qunar.tc.qmq.delay.config;
import com.google.common.base.Preconditions;
import qunar.tc.qmq.configuration.DynamicConfig;
import qunar.tc.qmq.constants.BrokerConstants;
import java.io.File;
import java.util.concurrent.TimeUnit;
/**
* @author xufeng.deng <EMAIL>
* @since 2018-07-11 9:54
*/
public class DefaultStoreConfiguration implements StoreConfiguration {
private static final String MESSAGE_LOG = "message_log";
private static final String SCHEDULE_LOG = "schedule_log";
private static final String DISPATCH_LOG = "dispatch_log";
private static final String CHECKPOINT = "checkpoint";
private static final String PER_MESSAGE_SEGMENT_FILE_SIZE = "per.segment.file.size";
private static final String PER_MESSAGE_SILE = "per.message.size";
private static final String LOAD_SEGMENT_DELAY_MIN = "load.segment.delay.min";
private static final String DISPATCH_LOG_KEEP_HOUR = "dispatch.log.keep.hour";
private static final String SCHEDULE_CLEAN_BEFORE_DISPATCH_HOUR = "schedule.clean.before.dispatch.hour";
private static final String LOAD_IN_ADVANCE_MIN = "load.in.advance.min";
private static final String LOAD_BLOCKING_EXIT_SEC = "load.blocking.exit.sec";
private static final String SEGMENT_SCALE_MIN = "segment.scale.minute";
private static final long MS_PER_HOUR = TimeUnit.HOURS.toMillis(1);
private static final long MS_PER_MINUTE = TimeUnit.MINUTES.toMillis(1);
private static final long MS_PER_SECONDS = TimeUnit.SECONDS.toMillis(1);
private static final int SEC_PER_MINUTE = (int) TimeUnit.MINUTES.toSeconds(1);
private static final int MESSAGE_SEGMENT_LOG_FILE_SIZE = 1024 * 1024 * 1024;
private static final int SINGLE_MESSAGE_LIMIT_SIZE = 50 * 1024 * 1024;
private static final int SEGMENT_LOAD_DELAY_TIMES_IN_MIN = 1;
private static final int DISPATCH_LOG_KEEP_TIMES_IN_HOUR = 3 * 24;
private static final int SCHEDULE_CLEAN_BEFORE_DISPATCH_TIMES_IN_HOUR = 24;
private static final int DEFAULT_SEGMENT_SCALE_IN_MIN = 60;
private volatile int segmentScale;
private volatile long inAdvanceLoadMillis;
private volatile long loadBlockingExitMillis;
private final DynamicConfig config;
public DefaultStoreConfiguration(DynamicConfig config) {
setup(config);
this.config = config;
}
private void setup(DynamicConfig config) {
int segmentScale = config.getInt(SEGMENT_SCALE_MIN, DEFAULT_SEGMENT_SCALE_IN_MIN);
validateArguments((segmentScale >= 5) && (segmentScale <= 60), "segment scale in [5, 60] min");
int inAdvanceLoadMin = config.getInt(LOAD_IN_ADVANCE_MIN, (segmentScale + 1) / 2);
validateArguments((inAdvanceLoadMin >= 1) && (inAdvanceLoadMin <= ((segmentScale + 1) / 2)), "load in advance time in [1, segmentScale/2] min");
int loadBlockingExitSec = config.getInt(LOAD_BLOCKING_EXIT_SEC, SEC_PER_MINUTE * (inAdvanceLoadMin + 2) / 3);
int inAdvanceLoadSec = inAdvanceLoadMin * SEC_PER_MINUTE;
int loadBlockExitFront = inAdvanceLoadSec / 3;
int loadBlockExitRear = inAdvanceLoadSec / 2;
validateArguments((loadBlockingExitSec >= loadBlockExitFront) && (loadBlockingExitSec <= loadBlockExitRear), "load exit block exit time in [inAdvanceLoadMin/3,inAdvanceLoadMin/2] sec. note.");
this.segmentScale = segmentScale;
this.inAdvanceLoadMillis = inAdvanceLoadMin * MS_PER_MINUTE;
this.loadBlockingExitMillis = loadBlockingExitSec * MS_PER_SECONDS;
}
private void validateArguments(boolean expression, String errorMessage) {
Preconditions.checkArgument(expression, errorMessage);
}
@Override
public DynamicConfig getConfig() {
return config;
}
@Override
public String getMessageLogStorePath() {
return buildStorePath(MESSAGE_LOG);
}
@Override
public String getScheduleLogStorePath() {
return buildStorePath(SCHEDULE_LOG);
}
@Override
public String getDispatchLogStorePath() {
return buildStorePath(DISPATCH_LOG);
}
@Override
public int getMessageLogSegmentFileSize() {
return config.getInt(PER_MESSAGE_SEGMENT_FILE_SIZE, MESSAGE_SEGMENT_LOG_FILE_SIZE);
}
@Override
public int getSingleMessageLimitSize() {
return config.getInt(PER_MESSAGE_SILE, SINGLE_MESSAGE_LIMIT_SIZE);
}
@Override
public String getCheckpointStorePath() {
return buildStorePath(CHECKPOINT);
}
@Override
public long getMessageLogRetentionMs() {
final int retentionHours = config.getInt(BrokerConstants.MESSAGE_LOG_RETENTION_HOURS, BrokerConstants.DEFAULT_MESSAGE_LOG_RETENTION_HOURS);
return retentionHours * MS_PER_HOUR;
}
@Override
public long getDispatchLogKeepTime() {
return config.getInt(DISPATCH_LOG_KEEP_HOUR, DISPATCH_LOG_KEEP_TIMES_IN_HOUR) * MS_PER_HOUR;
}
@Override
public long getCheckCleanTimeBeforeDispatch() {
return config.getInt(SCHEDULE_CLEAN_BEFORE_DISPATCH_HOUR, SCHEDULE_CLEAN_BEFORE_DISPATCH_TIMES_IN_HOUR) * MS_PER_HOUR;
}
@Override
public long getLogCleanerIntervalSeconds() {
return config.getInt(BrokerConstants.LOG_RETENTION_CHECK_INTERVAL_SECONDS, BrokerConstants.DEFAULT_LOG_RETENTION_CHECK_INTERVAL_SECONDS);
}
@Override
public String getScheduleOffsetCheckpointPath() {
return buildStorePath(CHECKPOINT);
}
@Override
public long getLoadInAdvanceTimesInMillis() {
return inAdvanceLoadMillis;
}
@Override
public long getLoadBlockingExitTimesInMillis() {
return loadBlockingExitMillis;
}
@Override
public boolean isDeleteExpiredLogsEnable() {
return config.getBoolean(BrokerConstants.ENABLE_DELETE_EXPIRED_LOGS, false);
}
@Override
public int getSegmentScale() {
return segmentScale;
}
@Override
public int getLoadSegmentDelayMinutes() {
return config.getInt(LOAD_SEGMENT_DELAY_MIN, SEGMENT_LOAD_DELAY_TIMES_IN_MIN);
}
private String buildStorePath(final String name) {
final String root = config.getString(BrokerConstants.STORE_ROOT, BrokerConstants.LOG_STORE_ROOT);
return new File(root, name).getAbsolutePath();
}
}
| 2,564 |
5,102 | package com.nepxion.discovery.common.entity;
/**
* <p>Title: Nepxion Discovery</p>
* <p>Description: Nepxion Discovery</p>
* <p>Copyright: Copyright (c) 2017-2050</p>
* <p>Company: Nepxion</p>
* @author <NAME>
* @version 1.0
*/
public class WeightEntity extends MapWeightEntity {
private static final long serialVersionUID = 4242297554671632704L;
private String consumerServiceName;
private String providerServiceName;
private WeightType type;
public String getConsumerServiceName() {
return consumerServiceName;
}
public void setConsumerServiceName(String consumerServiceName) {
this.consumerServiceName = consumerServiceName;
}
public String getProviderServiceName() {
return providerServiceName;
}
public void setProviderServiceName(String providerServiceName) {
this.providerServiceName = providerServiceName;
}
public WeightType getType() {
return type;
}
public void setType(WeightType type) {
this.type = type;
}
} | 358 |
335 | <gh_stars>100-1000
{
"word": "Replica",
"definitions": [
"An exact copy or model of something, especially one on a smaller scale.",
"A duplicate of an original artistic work."
],
"parts-of-speech": "Noun"
} | 93 |
392 | <filename>jetbrick-template/src/main/java/jetbrick/template/parser/ast/AstInvokeFieldStatic.java
/**
* Copyright 2013-2019 <NAME>, Shanghai, China. All rights reserved.
*
* Author: <NAME>
* Email: <EMAIL>
* WebURL: https://github.com/subchen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrick.template.parser.ast;
import jetbrick.bean.FieldInfo;
import jetbrick.bean.KlassInfo;
import jetbrick.template.Errors;
import jetbrick.template.JetSecurityManager;
import jetbrick.template.resolver.SignatureUtils;
import jetbrick.template.runtime.InterpretContext;
import jetbrick.template.runtime.InterpretException;
public final class AstInvokeFieldStatic extends AstExpression {
private final Class<?> cls;
private final String name;
private FieldInfo field;
private boolean unsafe;
public AstInvokeFieldStatic(Class<?> cls, String name, Position position) {
super(position);
this.cls = cls;
this.name = name;
this.field = null;
this.unsafe = true;
}
@Override
public Object execute(InterpretContext ctx) throws InterpretException {
if (field == null) {
if ("class".equals(name)) {
return cls;
}
field = KlassInfo.create(cls).getDeclaredField(name);
if (field == null) {
String signature = SignatureUtils.getFieldSignature(cls, name);
throw new InterpretException(Errors.STATIC_FIELD_NOT_FOUND, signature).set(position);
}
}
if (unsafe) {
JetSecurityManager securityManager = ctx.getSecurityManager();
if (securityManager != null) {
try {
securityManager.checkAccess(field.getField());
} catch (RuntimeException e) {
throw new InterpretException(e).set(position);
}
}
unsafe = false;
}
try {
return field.get(null);
} catch (Exception e) {
throw new InterpretException(Errors.STATIC_FIELD_GET_ERROR, cls.getName(), name).cause(e).set(position);
}
}
}
| 1,048 |
1,760 | <filename>Implementations/content/numerical/Polynomials/ChirpZ.h
/**
* Description: Modified Chirp-Z. For $x\neq 0$, computes
* $A(x^0), A(x^1), \ldots, A(x^{n-1})$.
* Time: FFT(2|A|+n)
* Source: https://cp-algorithms.com/algebra/polynomial.html
* https://github.com/e-maxx-eng/e-maxx-eng-aux/blob/master/src/polynomial.cpp
* https://codeforces.com/blog/entry/82165
* Verification:
* https://codeforces.com/contest/1054/problem/H
*/
#include "FFT.h"
poly chirpz(poly A, T x, int n) {
auto gen = [&](T c) -> poly {
poly p(sz(A)+n); p[0] = 1;
T t = 1; FOR(i,1,sz(p)) p[i] = p[i-1]*t, t *= c;
return p;
}; // uses ij = -C(i,2)+C(i+j,2)-C(j,2)
poly ip = gen(1/x); F0R(i,sz(A)) A[i] *= ip[i];
reverse(all(A)); poly res = mul(A,gen(x));
res = poly(sz(A)-1+all(res)); F0R(i,n) res[i] *= ip[i];
return res;
}
// poly chirpzNaive(poly A, T x, int n) {
// poly res; F0R(i,n) res.pb(eval(A,pow(x,i)));
// return res; } | 487 |
6,180 | // This file is a stub header file of nccl for Read the Docs.
#ifndef INCLUDE_GUARD_STUB_CUPY_NCCL_H
#define INCLUDE_GUARD_STUB_CUPY_NCCL_H
#define NCCL_MAJOR 0
#define NCCL_MINOR 0
#define NCCL_PATCH 0
extern "C" {
typedef struct ncclComm* ncclComm_t;
enum {
NCCL_UNIQUE_ID_BYTES = 128
};
typedef struct {
char internal[NCCL_UNIQUE_ID_BYTES];
} ncclUniqueId;
typedef enum {
ncclSuccess
} ncclResult_t;
typedef enum {} ncclRedOp_t;
typedef enum {
ncclChar = 0,
ncclInt = 1,
ncclHalf = 2,
ncclFloat = 3,
ncclDouble = 4,
ncclInt64 = 5,
ncclUint64 = 6,
nccl_NUM_TYPES = 7 } ncclDataType_t;
const char* ncclGetErrorString(...) {
return "";
}
ncclResult_t ncclCommGetAsyncError(...) {
return ncclSuccess;
}
ncclResult_t ncclGetUniqueId(...) {
return ncclSuccess;
}
ncclResult_t ncclCommInitRank(...) {
return ncclSuccess;
}
ncclResult_t ncclCommInitAll(...) {
return ncclSuccess;
}
ncclResult_t ncclGroupStart(...) {
return ncclSuccess;
}
ncclResult_t ncclGroupEnd(...) {
return ncclSuccess;
}
void ncclCommDestroy(...) {
}
void ncclCommAbort(...) {
}
ncclResult_t ncclCommCuDevice(...) {
return ncclSuccess;
}
ncclResult_t ncclCommUserRank(...) {
return ncclSuccess;
}
ncclResult_t ncclCommCount(...) {
return ncclSuccess;
}
ncclResult_t ncclAllReduce(...) {
return ncclSuccess;
}
ncclResult_t ncclReduce(...) {
return ncclSuccess;
}
ncclResult_t ncclBroadcast(...) {
return ncclSuccess;
}
ncclResult_t ncclBcast(...) {
return ncclSuccess;
}
ncclResult_t ncclReduceScatter(...) {
return ncclSuccess;
}
ncclResult_t ncclAllGather(...) {
return ncclSuccess;
}
ncclResult_t ncclSend(...) {
return ncclSuccess;
}
ncclResult_t ncclRecv(...) {
return ncclSuccess;
}
typedef struct CUstream_st *cudaStream_t;
} // extern "C"
#endif // INCLUDE_GUARD_STUB_CUPY_NCCL_H
| 963 |
371 | /*
Copyright (c) 2014 ideawu. All rights reserved.
Use of this source code is governed by a license that can be
found in the LICENSE file.
@author: ideawu
@website: http://www.cocoaui.com/
*/
#import <Foundation/Foundation.h>
@class IView;
/*
CSS(级联样式表) 的主要特性:
1. 级联(Cascade, Selector)
2. 样式(Style)
3. 继承(Inherit)
其它:
1. Group Style(如 a, b, c{})
*/
// @see http://www.w3.org/TR/2011/REC-CSS2-20110607/selector.html
@interface IStyleSheet : NSObject
@property (nonatomic, readonly) NSArray *rules;
+ (IStyleSheet *)builtin;
- (void)mergeWithStyleSheet:(IStyleSheet *)sheet;
- (void)parseCss:(NSString *)css;
- (void)parseCss:(NSString *)css baseUrl:(NSString *)baseUrl;
@end
| 316 |
3,428 | <filename>lib/node_modules/@stdlib/datasets/spam-assassin/data/easy-ham-2/00168.716b0a26aa6ae53b72d9d3c297390424.json
{"id":"00168","group":"easy-ham-2","checksum":{"type":"MD5","value":"716b0a26aa6ae53b72d9d3c297390424"},"text":"From <EMAIL> Wed Jul 31 11:19:10 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 9F001440C8\n\tfor <jm@localhost>; Wed, 31 Jul 2002 06:19:07 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Wed, 31 Jul 2002 11:19:07 +0100 (IST)\nReceived: from lugh.tuatha.org (<EMAIL> [172.16.17.325]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g6VAEv215553 for\n <<EMAIL>>; Wed, 31 Jul 2002 11:14:57 +0100\nReceived: from lugh (root@localhost [127.0.0.1]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id LAA28005; Wed, 31 Jul 2002 11:13:15 +0100\nReceived: from mail.aculink.net (65-173-158-7.aculink.net [65.173.158.7])\n by lugh.tuatha.org (8.9.3/8.9.3) with ESMTP id LAA27973 for\n <<EMAIL>>; Wed, 31 Jul 2002 11:13:08 +0100\nX-Authentication-Warning: lugh.tuatha.org: Host 65-173-158-7.aculink.net\n [172.16.17.32] claimed to be mail.aculink.net\nReceived: from cdm01.deedsmiscentral.net ([204.118.157.88]) by\n mail.aculink.net (Merak 4.4.2) with ESMTP id EDA37003 for <<EMAIL>>;\n Wed, 31 Jul 2002 04:20:20 -0600\nReceived: (from redneck@localhost) by cdm01.deedsmiscentral.net\n (8.11.6/8.11.6/ver) id g6VAhuq10699; Wed, 31 Jul 2002 04:43:56 -0600\nDate: Wed, 31 Jul 2002 04:43:56 -0600\nMessage-Id: <<EMAIL>>\nX-No-Archive: yes\nFrom: SoloCDM <<EMAIL>>\nReply-To: <<EMAIL>>, <<EMAIL>>\nTo: ILUG (Request) <<EMAIL>>\nSubject: [ILUG] C++ and C Mailing Lists for Beginners and Advanced\nSender: ilug-<EMAIL>\nErrors-To: ilug-admin@<EMAIL>.ie\nX-Mailman-Version: 1.1\nPrecedence: bulk\nList-Id: Irish Linux Users' Group <ilug.linux.ie>\nX-Beenthere: <EMAIL>\n\nAre there any mailing lists (non-newsgroups) for C++ and C Beginners\nand Advanced programmers?\n\nLinks are welcomed!\n\n-- \nNote: When you reply to this message, please include the mailing\n list/newsgroup address and my email address in To:.\n\n*********************************************************************\nSigned,\nSoloCDM\n\n-- \nIrish Linux Users' Group: <EMAIL>\nhttp://www.linux.ie/mailman/listinfo/ilug for (un)subscription information.\nList maintainer: <EMAIL>\n\n\n"} | 1,070 |
335 | {
"word": "Radiochemical",
"definitions": [
"Relating to the branch of chemistry concerned with radioactive substances."
],
"parts-of-speech": "Adjective"
} | 66 |
947 | /*-------------------------------------------------------------------------
*
* oxid.h
* Decalarations for transaction management routines.
*
* Copyright (c) 2021-2022, Oriole DB Inc.
*
* IDENTIFICATION
* contrib/orioledb/include/transam/oxid.h
*
*-------------------------------------------------------------------------
*/
#ifndef __OXID_H__
#define __OXID_H__
typedef struct
{
pg_atomic_uint64 nextXid;
pg_atomic_uint64 lastXidWhenUpdatedGlobalXmin;
pg_atomic_uint64 runXmin;
pg_atomic_uint64 globalXmin;
pg_atomic_uint64 writeInProgressXmin;
pg_atomic_uint64 writtenXmin;
pg_atomic_uint64 checkpointRetainXmin;
pg_atomic_uint64 checkpointRetainXmax;
pg_atomic_uint64 cleanedXmin;
pg_atomic_uint64 cleanedCheckpointXmin;
pg_atomic_uint64 cleanedCheckpointXmax;
slock_t xminMutex;
int xidMapTrancheId;
LWLock xidMapWriteLock;
} XidMeta;
extern XidMeta *xid_meta;
extern Size oxid_shmem_needs(void);
extern void oxid_init_shmem(Pointer ptr, bool found);
extern bool wait_for_oxid(OXid oxid);
extern void oxid_notify(OXid oxid);
extern void advance_oxids(OXid new_xid);
extern OXid get_current_oxid(void);
extern void set_oxid_csn(OXid oxid, CommitSeqNo csn);
extern LocalTransactionId get_current_local_xid(void);
extern void set_current_oxid(OXid oxid);
extern void reset_current_oxid(void);
extern OXid get_current_oxid_if_any(void);
extern void current_oxid_precommit(void);
extern void current_oxid_commit(CommitSeqNo csn);
extern void current_oxid_abort(void);
extern CommitSeqNo oxid_get_csn(OXid oxid);
extern void fill_current_oxid_csn(OXid *oxid, CommitSeqNo *csn);
extern int oxid_get_procnum(OXid oxid);
extern bool xid_is_finished(OXid xid);
extern bool xid_is_finished_for_everybody(OXid xid);
extern void fsync_xidmap_range(OXid xmin, OXid xmax, uint32 wait_event_info);
#endif /* __OXID_H__ */
| 687 |
875 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sqoop.mapreduce.hcat;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hive.hcatalog.mapreduce.HCatSplit;
/**
* An abstraction of a combined HCatSplits.
*
*/
public class SqoopHCatInputSplit extends InputSplit implements Writable {
private List<HCatSplit> hCatSplits;
private String[] hCatLocations;
private long inputLength;
public SqoopHCatInputSplit() {
}
public SqoopHCatInputSplit(List<InputSplit> splits) {
hCatSplits = new ArrayList<HCatSplit>();
Set<String> locations = new HashSet<String>();
for (int i = 0; i < splits.size(); ++i) {
HCatSplit hsSplit = (HCatSplit) splits.get(i);
hCatSplits.add(hsSplit);
this.inputLength += hsSplit.getLength();
locations.addAll(Arrays.asList(hsSplit.getLocations()));
}
this.hCatLocations = locations.toArray(new String[0]);
}
public int length() {
return this.hCatSplits.size();
}
public HCatSplit get(int index) {
return this.hCatSplits.get(index);
}
@Override
public long getLength() throws IOException, InterruptedException {
if (this.inputLength == 0L) {
for (HCatSplit split : this.hCatSplits) {
this.inputLength += split.getLength();
}
}
return this.inputLength;
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
if (this.hCatLocations == null) {
Set<String> locations = new HashSet<String>();
for (HCatSplit split : this.hCatSplits) {
locations.addAll(Arrays.asList(split.getLocations()));
}
this.hCatLocations = locations.toArray(new String[0]);
}
return this.hCatLocations;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(this.inputLength);
out.writeInt(this.hCatSplits.size());
for (HCatSplit split : this.hCatSplits) {
split.write(out);
}
}
@Override
public void readFields(DataInput in) throws IOException {
this.inputLength = in.readLong();
int size = in.readInt();
this.hCatSplits = new ArrayList<HCatSplit>(size);
for (int i = 0; i < size; ++i) {
HCatSplit hs = new HCatSplit();
hs.readFields(in);
hCatSplits.add(hs);
}
}
}
| 1,144 |
683 | /*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.jetty.v11_0;
import com.google.auto.service.AutoService;
import io.opentelemetry.javaagent.extension.instrumentation.InstrumentationModule;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.instrumentation.jetty.common.JettyHandlerInstrumentation;
import java.util.Collections;
import java.util.List;
@AutoService(InstrumentationModule.class)
public class Jetty11InstrumentationModule extends InstrumentationModule {
public Jetty11InstrumentationModule() {
super("jetty", "jetty-11.0");
}
@Override
public List<TypeInstrumentation> typeInstrumentations() {
return Collections.singletonList(
new JettyHandlerInstrumentation(
"jakarta.servlet",
Jetty11InstrumentationModule.class.getPackage().getName() + ".Jetty11HandlerAdvice"));
}
}
| 329 |
2,420 | from context import Instagram # pylint: disable=no-name-in-module
instagram = Instagram()
instagram.with_credentials('', '', '/pathtofolder')
instagram.login()
# Get media comments by shortcode
likes = instagram.get_media_likes_by_code('BG3Iz-No1IZ', 100)
print("Result count: " + str(len(likes['accounts'])))
for like in likes['accounts']:
print(like)
# ...
| 132 |
1,259 | <filename>src/organisations/migrations/0014_organisation_stop_serving_flags.py<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-26 15:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisations', '0013_organisation_alerted_over_plan_limit'),
]
operations = [
migrations.AddField(
model_name='organisation',
name='stop_serving_flags',
field=models.BooleanField(default=False, help_text='Enable this to cease serving flags for this organisation.'),
),
]
| 255 |
2,868 | #include "ftxui/component/captured_mouse.hpp" // for ftxui
#include "ftxui/component/component.hpp" // for Checkbox, Vertical
#include "ftxui/component/screen_interactive.hpp" // for ScreenInteractive
using namespace ftxui;
int main(int argc, const char* argv[]) {
bool build_examples_state = false;
bool build_tests_state = false;
bool use_webassembly_state = true;
auto component = Container::Vertical({
Checkbox("Build examples", &build_examples_state),
Checkbox("Build tests", &build_tests_state),
Checkbox("Use WebAssembly", &use_webassembly_state),
});
auto screen = ScreenInteractive::TerminalOutput();
screen.Loop(component);
return 0;
}
// Copyright 2020 <NAME>. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file.
| 283 |
983 | package org.xm.similarity.word.pinyin;
import org.xm.similarity.ISimilarity;
import org.xm.similarity.util.EditDistance;
import org.xm.similarity.util.MathUtil;
import java.util.Set;
/**
* 拼音计算两个词相似度,拼音用编辑距离表示相似程度
*
* @author xuming
*/
public class PinyinSimilarity implements ISimilarity {
@Override
public double getSimilarity(String word1, String word2) {
double max = 0.0;
Set<String> pinyinSet1 = PinyinDictionary.getInstance().getPinyin(word1);
Set<String> pinyinSet2 = PinyinDictionary.getInstance().getPinyin(word2);
for (String pinyin1 : pinyinSet1) {
for (String pinyin2 : pinyinSet2) {
double distance = new EditDistance().getEditDistance(pinyin1, pinyin2);
double similarity = 1 - distance / (MathUtil.max(pinyin1.length(), pinyin2.length()));
max = (max > similarity) ? max : similarity;
if (max == 1.0) {
return max;
}
}
}
return max;
}
}
| 535 |
764 | <filename>eos-token/[email protected]
{
"symbol": "EDNA",
"account_name": "ednazztokens",
"overview": {
"en": "EDNA is a Blockchain-Bio Tech company who dedicated to insuring Human DNA.And the knowledge about that DNA belongs to humans, not governments or mega-corps. The company operates a secure sequencing service at the direction of and in service to the EDNA Community.EDNA operates using digital stored units of value called EDNA's They are crypto-currency, it is regards as service tokens that can be exchanged for DNA sequencing services . Holding these tokens also entitles a person to membership, voting & participation rights in the EDNA Community .DAC (Decentralized Autonomous Community), which governs the activities of the EDNA sequencing organization.The ultimate aim of EDNA is to end human suffering caused by genetic disease, improve life longevity and to insure the future of DNA and the future affected by DNA is governed by the Humans ."
},
"website": "https://edna.life"
} | 266 |
4,879 | import os
import sys
def total_virtual_memory():
if sys.platform.startswith("linux"):
return os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES")
else:
return 0
| 82 |
190,993 | <reponame>yage99/tensorflow
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdint.h>
#include <initializer_list>
#include <iostream>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class UnpackOpModel : public SingleOpModel {
public:
UnpackOpModel(const TensorData& input, int axis) {
if (axis < 0) {
axis += input.shape.size();
}
const int num_outputs = input.shape[axis];
input_ = AddInput(input);
for (int i = 0; i < num_outputs; ++i) {
outputs_.push_back(AddOutput(input.type));
}
SetBuiltinOp(BuiltinOperator_UNPACK, BuiltinOptions_UnpackOptions,
CreateUnpackOptions(builder_, num_outputs, axis).Union());
BuildInterpreter({GetShape(input_)});
}
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
std::vector<std::vector<T>> GetOutputDatas() {
std::vector<std::vector<T>> output_datas;
for (const int output : outputs_) {
std::cerr << "the output is " << output << std::endl;
output_datas.push_back(ExtractVector<T>(output));
}
return output_datas;
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int input_;
std::vector<int> outputs_;
};
template <typename T>
void Check(int axis, const std::initializer_list<int>& input_shape,
const std::initializer_list<T>& input_data,
const std::vector<std::vector<int>>& exp_output_shape,
const std::vector<std::vector<T>>& exp_output_data,
const TensorType& type = TensorType_FLOAT32) {
UnpackOpModel<T> m({type, input_shape}, axis);
m.SetInput(input_data);
m.Invoke();
// Check outputs shapes.
EXPECT_THAT(m.GetOutputShapes(), ElementsAreArray(exp_output_shape));
// Check outputs values.
EXPECT_THAT(m.GetOutputDatas(), ElementsAreArray(exp_output_data));
}
template <typename InputType>
struct UnpackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value
? TensorType_UINT8
: (std::is_same<InputType, int8_t>::value
? TensorType_INT8
: (std::is_same<InputType, int32_t>::value
? TensorType_INT32
: TensorType_FLOAT32))));
};
using TestTypes = testing::Types<float, int32_t, int8_t, uint8_t, int16_t>;
TYPED_TEST_CASE(UnpackOpTest, TestTypes);
TYPED_TEST(UnpackOpTest, ThreeOutputs) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/0, /*input_shape=*/{3, 2},
/*input_data=*/{1, 2, 3, 4, 5, 6},
/*exp_output_shape=*/{{2}, {2}, {2}},
/*exp_output_data=*/{{1, 2}, {3, 4}, {5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsAxisOne) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/1, /*input_shape=*/{3, 2},
/*input_data=*/{1, 2, 3, 4, 5, 6},
/*exp_output_shape=*/{{3}, {3}},
/*exp_output_data=*/{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsNegativeAxisOne) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/-1, /*input_shape=*/{3, 2},
/*input_data=*/{1, 2, 3, 4, 5, 6},
/*exp_output_shape=*/{{3}, {3}},
/*exp_output_data=*/{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, OneOutput) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/0, /*input_shape=*/{1, 6},
/*input_data=*/{1, 2, 3, 4, 5, 6},
/*exp_output_shape=*/{{6}},
/*exp_output_data=*/{{1, 2, 3, 4, 5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/2, /*input_shape=*/{2, 2, 2},
/*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8},
/*exp_output_shape=*/{{2, 2}, {2, 2}},
/*exp_output_data=*/{{1, 3, 5, 7}, {2, 4, 6, 8}},
TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, FiveDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/2, /*input_shape=*/{2, 2, 2, 2, 1},
/*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
/*exp_output_shape=*/{{2, 2, 2, 1}, {2, 2, 2, 1}},
/*exp_output_data=*/
{{1, 2, 5, 6, 9, 10, 13, 14}, {3, 4, 7, 8, 11, 12, 15, 16}},
/*type=*/TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, VectorToScalar) {
Check<typename TestFixture::TypeToTest>(
/*axis=*/0, /*input_shape=*/{5},
/*input_data=*/{1, 2, 3, 4, 5},
/*exp_output_shape=*/{{}, {}, {}, {}, {}},
/*exp_output_data=*/{{1}, {2}, {3}, {4}, {5}}, TestFixture::TENSOR_TYPE);
}
// bool tests.
TEST(UnpackOpTestBool, BoolThreeOutputs) {
Check<bool>(
/*axis=*/0, /*input_shape=*/{3, 2},
/*input_data=*/{true, false, true, false, true, false},
/*exp_output_shape=*/{{2}, {2}, {2}},
/*exp_output_data=*/{{true, false}, {true, false}, {true, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsAxisOne) {
Check<bool>(
/*axis=*/1, /*input_shape=*/{3, 2},
/*input_data=*/{true, false, true, false, true, false},
/*exp_output_shape=*/{{3}, {3}},
/*exp_output_data=*/{{true, true, true}, {false, false, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisOne) {
Check<bool>(
/*axis=*/-1, /*input_shape=*/{3, 2},
/*input_data=*/{true, false, true, false, true, false},
/*exp_output_shape=*/{{3}, {3}},
/*exp_output_data=*/{{true, true, true}, {false, false, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisTwo) {
Check<bool>(
/*axis=*/-2, /*input_shape=*/{3, 2},
/*input_data=*/{true, false, true, false, true, false},
/*exp_output_shape=*/{{2}, {2}, {2}},
/*exp_output_data=*/{{true, false}, {true, false}, {true, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolOneOutput) {
Check<bool>(
/*axis=*/0, /*input_shape=*/{1, 6},
/*input_data=*/{true, false, true, false, true, false},
/*exp_output_shape=*/{{6}},
/*exp_output_data=*/{{true, false, true, false, true, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeDimensionsOutputs) {
Check<bool>(
/*axis=*/2, /*input_shape=*/{2, 2, 2},
/*input_data=*/{true, false, true, false, true, false, true, false},
/*exp_output_shape=*/{{2, 2}, {2, 2}},
/*exp_output_data=*/
{{true, true, true, true}, {false, false, false, false}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTest, BoolFiveDimensionsOutputs) {
Check<bool>(
/*axis=*/2, /*input_shape=*/{2, 2, 2, 2, 1},
/*input_data=*/
{true, false, true, false, true, false, true, false, true, true, true,
true, true, true, true, true},
/*exp_output_shape=*/{{2, 2, 2, 1}, {2, 2, 2, 1}},
/*exp_output_data=*/
{{true, false, true, false, true, true, true, true},
{true, false, true, false, true, true, true, true}},
/*type=*/TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolVectorToScalar) {
Check<bool>(/*axis=*/0, /*input_shape=*/{5},
/*input_data=*/{true, false, true, false, true},
/*exp_output_shape=*/{{}, {}, {}, {}, {}},
/*exp_output_data=*/{{true}, {false}, {true}, {false}, {true}},
/*type=*/TensorType_BOOL);
}
} // namespace
} // namespace tflite
| 3,842 |
535 | <filename>sys/config/selftest-fcb2/src/testcases/config_test_custom_compress.c
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "conf_test_fcb2.h"
static int unique_val_cnt;
static int
test_custom_compress_filter1(const char *val, const char *name, void *arg)
{
unique_val_cnt++;
return 0;
}
static int
test_custom_compress_filter2(const char *val, const char *name, void *arg)
{
if (!strcmp(val, "myfoo/mybar")) {
return 0;
}
return 1;
}
TEST_CASE_SELF(config_test_custom_compress)
{
int rc;
struct conf_fcb2 cf;
char test_value[CONF_TEST_FCB_VAL_STR_CNT][CONF_MAX_VAL_LEN];
int elems[4];
int i;
config_wipe_srcs();
config_wipe_fcb2(fcb_range, CONF_TEST_FCB_RANGE_CNT);
cf.cf2_fcb.f_magic = MYNEWT_VAL(CONFIG_FCB_MAGIC);
cf.cf2_fcb.f_range_cnt = CONF_TEST_FCB_RANGE_CNT;
cf.cf2_fcb.f_sector_cnt = fcb_range[0].fsr_sector_count;
cf.cf2_fcb.f_ranges = fcb_range;
rc = conf_fcb2_src(&cf);
TEST_ASSERT(rc == 0);
rc = conf_fcb2_dst(&cf);
TEST_ASSERT(rc == 0);
c2_var_count = 1;
test_export_block = 0;
val8 = 4;
val64 = 8;
memset(elems, 0, sizeof(elems));
for (i = 0; ; i++) {
config_test_fill_area(test_value, i);
memcpy(val_string, test_value, sizeof(val_string));
rc = conf_save();
TEST_ASSERT(rc == 0);
if (cf.cf2_fcb.f_active_id == fcb_range[0].fsr_sector_count - 2) {
/*
* Started using space just before scratch.
*/
break;
}
memset(val_string, 0, sizeof(val_string));
rc = conf_load();
TEST_ASSERT(rc == 0);
TEST_ASSERT(!memcmp(val_string, test_value, CONF_MAX_VAL_LEN));
}
for (i = 0; i < cf.cf2_fcb.f_sector_cnt - 1; i++) {
conf_fcb2_compress(&cf, test_custom_compress_filter1, NULL);
}
TEST_ASSERT(unique_val_cnt == 4); /* c2, c3 and ctest together */
test_export_block = 1;
/*
* Read values back, make sure they were carried over.
*/
memset(val_string, 0, sizeof(val_string));
val8 = 0;
val64 = 0;
rc = conf_load();
TEST_ASSERT(rc == 0);
TEST_ASSERT(!memcmp(val_string, test_value, CONF_MAX_VAL_LEN));
TEST_ASSERT(val8 == 4);
TEST_ASSERT(val64 == 8);
/*
* Only leave one var.
*/
for (i = 0; i < cf.cf2_fcb.f_sector_cnt - 1; i++) {
conf_fcb2_compress(&cf, test_custom_compress_filter2, NULL);
}
memset(val_string, 0, sizeof(val_string));
val8 = 0;
val64 = 0;
rc = conf_load();
TEST_ASSERT(rc == 0);
TEST_ASSERT(val_string[0][0] == 0);
TEST_ASSERT(val8 == 4);
TEST_ASSERT(val64 == 0);
}
| 1,509 |
764 | {"symbol": "GHOST","address": "0x4c327471C44B2dacD6E90525f9D629bd2e4f662C","overview":{"en": ""},"email": "<EMAIL>","website": "https://www.ghostbymcafee.com/","state": "NORMAL","links": {"blog": "","twitter": "https://twitter.com/ghostbymcafee","telegram": "","github": ""}} | 108 |
4,071 | #ifndef PS5_NETWORK_CONTEXT_HH_
#define PS5_NETWORK_CONTEXT_HH_
#include "core/ps_queue_hub/queue_hub.hh"
#include "core/ps_queue_hub/queue_hub_future.hh"
#include "service/session_context.hh"
#include "core/app-template.hh"
#include <vector>
#include <assert.h>
namespace ps
{
namespace network
{
using namespace std;
class NetworkContext
{
public:
NetworkContext(int32_t serverCount, int32_t clientCount, int32_t coreCount, int32_t userThreadCount,
string queueName = "SEASTAR") :
mServerCount(serverCount), mClientCount(clientCount),
mCoreCount(coreCount), mUserThreadCount(userThreadCount),
mSessionsContext(),
mQueueHubPair(ps::network::QueueHubFactory::GetInstance().GetHub<ps::network::Item>(queueName,
userThreadCount, coreCount)),
mRunning(false)
{}
virtual ~NetworkContext() {}
virtual void Reconnect()
{
assert("NetworkContext not impl Reconnect() !");
}
void SetRunning(bool r)
{
mRunning = r;
}
bool GetRunning() const
{
return mRunning;
}
int32_t GetServerCount() const
{
return mServerCount;
}
int32_t GetCoresCount() const
{
return mCoreCount;
}
int32_t GetClientCount() const
{
return mClientCount;
}
int32_t GetUserThreadCount() const
{
return mUserThreadCount;
}
virtual void SetSessionContextOfId(std::unique_ptr<SessionContext>&& sc, int64_t id)
{
assert("NetworkContext not impl SetSessionContextOfId(...) !");
}
virtual SessionContext* GetSessionOfId(int64_t id) const
{
assert("NetworkContext not impl GetSessionOfId(...) !");
return NULL;
}
virtual std::unique_ptr<SessionContext>* GetSessionAddrOfId(int64_t id)
{
assert("NetworkContext not impl GetSessionAddrOfId(...) !");
return NULL;
}
std::pair<ps::network::QueueHub<ps::network::Item>*, ps::network::QueueHub<ps::network::Item>*>
GetQueueHubPair() const
{
return mQueueHubPair;
}
protected:
seastar::app_template mApp;
int32_t mServerCount;
int32_t mClientCount;
int32_t mCoreCount;
int32_t mUserThreadCount;
std::vector<std::unique_ptr<SessionContext>> mSessionsContext;
std::pair<ps::network::QueueHub<ps::network::Item>*,
ps::network::QueueHub<ps::network::Item>*> mQueueHubPair;
bool mRunning;
};
} // namespace network
} // namespace ps
#endif // PS5_NETWORK_CONTEXT_HH_
| 1,051 |
1,425 | <reponame>helpspace-co/tinkerpop<filename>gremlin-tools/gremlin-benchmark/src/main/java/org/apache/tinkerpop/gremlin/driver/SerializationBenchmark.java<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.buffer.UnpooledByteBufAllocator;
import org.apache.tinkerpop.benchmark.util.AbstractBenchmarkBase;
import org.apache.tinkerpop.gremlin.driver.message.RequestMessage;
import org.apache.tinkerpop.gremlin.driver.message.ResponseMessage;
import org.apache.tinkerpop.gremlin.driver.message.ResponseStatusCode;
import org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1;
import org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0;
import org.apache.tinkerpop.gremlin.driver.ser.SerializationException;
import org.apache.tinkerpop.gremlin.process.traversal.Bytecode;
import org.apache.tinkerpop.gremlin.structure.io.binary.DataType;
import org.apache.tinkerpop.gremlin.structure.util.reference.ReferenceVertex;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Warmup;
import java.nio.charset.StandardCharsets;
import java.util.UUID;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
@Warmup(time = 200, timeUnit = MILLISECONDS)
public class SerializationBenchmark extends AbstractBenchmarkBase {
private static final UnpooledByteBufAllocator allocator = new UnpooledByteBufAllocator(false);
private static final ByteBuf RequestMessageGraphSONBuffer1 = Unpooled.wrappedBuffer(
("{\"requestId\":{\"@type\":\"g:UUID\",\"@value\":\"9b6d17c0-c5a9-418e-bff6-a25fbb1b175e\"}," +
"\"op\":\"a\",\"processor\":\"b\",\"args\":{}}")
.getBytes(StandardCharsets.UTF_8));
private static final ByteBuf RequestMessageGraphSONBuffer2 = Unpooled.wrappedBuffer(
("{\"requestId\":{\"@type\":\"g:UUID\",\"@value\":\"042b8400-d586-4fcb-b085-2cf2ab2bd5cb\"}," +
"\"op\":\"bytecode\",\"processor\":\"traversal\",\"args\":{\"gremlin\":" +
"{\"@type\":\"g:Bytecode\",\"@value\":{\"step\":[[\"V\"],[\"tail\"]]}},\"aliases\":{\"g\":\"g\"}}}")
.getBytes(StandardCharsets.UTF_8));
private static final ByteBuf RequestMessageBinaryBuffer1 = Unpooled.wrappedBuffer(new byte[]{
// flag
(byte)0x81,
// uuid
(byte) 0xd3, (byte) 0xfd, 0x35, 0x40, 0x67, 0x18, 0x46, (byte) 0x87,(byte) 0x95, 0x6b, (byte) 0xc8, 0x61,
(byte) 0x8a, 0x26, (byte) 0xe3, 0x35,
// string length and string value (a)
0, 0, 0, 0x01, 0x61,
// string length and string value (b)
0, 0, 0, 0x01, 0x62,
// Map (no items)
0, 0, 0, 0
});
private static final ByteBuf RequestMessageBinaryBuffer2 = Unpooled.wrappedBuffer(new byte[]{
// flag
(byte)0x81,
// uuid
(byte) 0xd3, (byte) 0xfd, 0x35, 0x40, 0x67, 0x18, 0x46, (byte) 0x87,(byte) 0x95, 0x6b, (byte) 0xc8, 0x61,
(byte) 0x8a, 0x26, (byte) 0xe3, 0x35,
// string length and string value (a)
0, 0, 0, 0x01, 0x61,
// string length and string value (b)
0, 0, 0, 0x01, 0x62,
// Map (2 items)
0, 0, 0, 0x2,
// "aliases"
DataType.STRING.getCodeByte(), 0, 0, 0, 0, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
// map { g: g }
DataType.MAP.getCodeByte(), 0, 0, 0, 0, 0x1,
DataType.STRING.getCodeByte(), 0, 0, 0, 0, 0x01, 0x67,
DataType.STRING.getCodeByte(), 0, 0, 0, 0, 0x01, 0x67,
// "gremlin"
DataType.STRING.getCodeByte(), 0, 0, 0, 0, 0x07, 0x67, 0x72, 0x65, 0x6d, 0x6c, 0x69, 0x6e,
// Bytecode for ['V', 'tail']
DataType.BYTECODE.getCodeByte(), 0, 0, 0, 0, 0x02,
// "V" (no values)
0, 0, 0, 0x1, 0x56, 0, 0, 0, 0,
// tail (no values)
0, 0, 0, 0x4, 0x74, 0x61, 0x69, 0x6c, 0, 0, 0, 0,
// no sources
0, 0, 0, 0
});
private static final UUID id = UUID.randomUUID();
private static final ResponseMessage response = ResponseMessage
.build(UUID.randomUUID()).code(ResponseStatusCode.SUCCESS).result(new ReferenceVertex(1, "person"))
.create();
private static final Bytecode bytecode = new Bytecode();
private static final RequestMessage request = RequestMessage
.build(Tokens.OPS_BYTECODE).processor("traversal").overrideRequestId(UUID.randomUUID())
.add(Tokens.ARGS_GREMLIN, bytecode)
.create();
private static final GraphBinaryMessageSerializerV1 binarySerializer = new GraphBinaryMessageSerializerV1();
private static final GraphSONMessageSerializerV3d0 graphsonSerializer = new GraphSONMessageSerializerV3d0();
static {
bytecode.addStep("V");
bytecode.addStep("values", "name");
bytecode.addStep("order");
bytecode.addStep("tail", 5);
}
@Benchmark
public RequestMessage testReadMessage1Binary() throws SerializationException {
RequestMessageBinaryBuffer1.readerIndex(0);
return binarySerializer.deserializeRequest(RequestMessageBinaryBuffer1);
}
@Benchmark
public RequestMessage testReadMessage2Binary() throws SerializationException {
RequestMessageBinaryBuffer2.readerIndex(0);
return binarySerializer.deserializeRequest(RequestMessageBinaryBuffer2);
}
@Benchmark
public RequestMessage testReadMessage1GraphSON() throws SerializationException {
RequestMessageGraphSONBuffer1.readerIndex(0);
return graphsonSerializer.deserializeRequest(RequestMessageGraphSONBuffer1);
}
@Benchmark
public RequestMessage testReadMessage2GraphSON() throws SerializationException {
RequestMessageGraphSONBuffer2.readerIndex(0);
return graphsonSerializer.deserializeRequest(RequestMessageGraphSONBuffer2);
}
@Benchmark
public void testWriteResponseBinary() throws SerializationException {
final ByteBuf buffer = binarySerializer.serializeResponseAsBinary(response, allocator);
buffer.release();
}
@Benchmark
public void testWriteResponseGraphSON() throws SerializationException {
final ByteBuf buffer = graphsonSerializer.serializeResponseAsBinary(response, allocator);
buffer.release();
}
@Benchmark
public void testWriteBytecodeBinary() throws SerializationException {
final ByteBuf buffer = binarySerializer.serializeRequestAsBinary(request, allocator);
buffer.release();
}
@Benchmark
public void testWriteBytecodeGraphSON() throws SerializationException {
final ByteBuf buffer = graphsonSerializer.serializeRequestAsBinary(request, allocator);
buffer.release();
}
@Benchmark
public RequestMessage testInstanceCreation() {
return RequestMessage.build("a").overrideRequestId(id).processor("b").create();
}
}
| 3,227 |
1,177 | <filename>exercises/nth-prime/nth_prime.py
def prime(number):
pass
| 30 |
2,151 | <filename>ash/display/mirror_window_controller.cc
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/display/mirror_window_controller.h"
#include <utility>
#include "ash/display/cursor_window_controller.h"
#include "ash/display/root_window_transformers.h"
#include "ash/display/screen_position_controller.h"
#include "ash/display/window_tree_host_manager.h"
#include "ash/host/ash_window_tree_host.h"
#include "ash/host/ash_window_tree_host_init_params.h"
#include "ash/host/root_window_transformer.h"
#include "ash/public/cpp/config.h"
#include "ash/root_window_settings.h"
#include "ash/shell.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "ui/aura/client/capture_client.h"
#include "ui/aura/env.h"
#include "ui/aura/window_delegate.h"
#include "ui/aura/window_event_dispatcher.h"
#include "ui/aura/window_tree_host.h"
#include "ui/base/layout.h"
#include "ui/base/ui_base_features.h"
#include "ui/base/ui_base_switches_util.h"
#include "ui/compositor/reflector.h"
#include "ui/display/display_layout.h"
#include "ui/display/manager/display_manager.h"
#include "ui/display/manager/managed_display_info.h"
#include "ui/display/screen.h"
#include "ui/gfx/canvas.h"
#include "ui/gfx/native_widget_types.h"
namespace ash {
namespace {
// ScreenPositionClient for mirroring windows.
class MirroringScreenPositionClient
: public aura::client::ScreenPositionClient {
public:
explicit MirroringScreenPositionClient(MirrorWindowController* controller)
: controller_(controller) {}
void ConvertPointToScreen(const aura::Window* window,
gfx::PointF* point) override {
const aura::Window* root = window->GetRootWindow();
aura::Window::ConvertPointToTarget(window, root, point);
const display::Display& display =
controller_->GetDisplayForRootWindow(root);
const gfx::Point display_origin = display.bounds().origin();
point->Offset(display_origin.x(), display_origin.y());
}
void ConvertPointFromScreen(const aura::Window* window,
gfx::PointF* point) override {
const aura::Window* root = window->GetRootWindow();
const display::Display& display =
controller_->GetDisplayForRootWindow(root);
const gfx::Point display_origin = display.bounds().origin();
point->Offset(-display_origin.x(), -display_origin.y());
aura::Window::ConvertPointToTarget(root, window, point);
}
void ConvertHostPointToScreen(aura::Window* root_window,
gfx::Point* point) override {
aura::Window* not_used;
ScreenPositionController::ConvertHostPointToRelativeToRootWindow(
root_window, controller_->GetAllRootWindows(), point, ¬_used);
aura::client::ScreenPositionClient::ConvertPointToScreen(root_window,
point);
}
void SetBounds(aura::Window* window,
const gfx::Rect& bounds,
const display::Display& display) override {
NOTREACHED();
}
private:
MirrorWindowController* controller_; // not owned.
DISALLOW_COPY_AND_ASSIGN(MirroringScreenPositionClient);
};
// A trivial CaptureClient that does nothing. That is, calls to set/release
// capture are dropped.
class NoneCaptureClient : public aura::client::CaptureClient {
public:
NoneCaptureClient() = default;
~NoneCaptureClient() override = default;
private:
// aura::client::CaptureClient:
void SetCapture(aura::Window* window) override {}
void ReleaseCapture(aura::Window* window) override {}
aura::Window* GetCaptureWindow() override { return nullptr; }
aura::Window* GetGlobalCaptureWindow() override { return nullptr; }
void AddObserver(aura::client::CaptureClientObserver* observer) override {}
void RemoveObserver(aura::client::CaptureClientObserver* observer) override {}
DISALLOW_COPY_AND_ASSIGN(NoneCaptureClient);
};
display::DisplayManager::MultiDisplayMode GetCurrentMultiDisplayMode() {
display::DisplayManager* display_manager = Shell::Get()->display_manager();
return display_manager->IsInUnifiedMode()
? display::DisplayManager::UNIFIED
: (display_manager->IsInSoftwareMirrorMode()
? display::DisplayManager::MIRRORING
: display::DisplayManager::EXTENDED);
}
int64_t GetCurrentReflectingSourceId() {
display::DisplayManager* display_manager = Shell::Get()->display_manager();
if (display_manager->IsInUnifiedMode())
return display::Screen::GetScreen()->GetPrimaryDisplay().id();
if (display_manager->IsInSoftwareMirrorMode())
return display_manager->mirroring_source_id();
return display::kInvalidDisplayId;
}
} // namespace
struct MirrorWindowController::MirroringHostInfo {
MirroringHostInfo();
~MirroringHostInfo();
std::unique_ptr<AshWindowTreeHost> ash_host;
gfx::Size mirror_window_host_size;
aura::Window* mirror_window = nullptr;
};
MirrorWindowController::MirroringHostInfo::MirroringHostInfo() = default;
MirrorWindowController::MirroringHostInfo::~MirroringHostInfo() = default;
MirrorWindowController::MirrorWindowController()
: current_event_targeter_src_host_(nullptr),
multi_display_mode_(display::DisplayManager::EXTENDED),
screen_position_client_(new MirroringScreenPositionClient(this)) {}
MirrorWindowController::~MirrorWindowController() {
// Make sure the root window gets deleted before cursor_window_delegate.
Close(false);
}
void MirrorWindowController::UpdateWindow(
const std::vector<display::ManagedDisplayInfo>& display_info_list) {
display::DisplayManager* display_manager = Shell::Get()->display_manager();
DCHECK(display_manager->IsInSoftwareMirrorMode() ||
display_manager->IsInUnifiedMode());
static int mirror_host_count = 0;
multi_display_mode_ = GetCurrentMultiDisplayMode();
reflecting_source_id_ = GetCurrentReflectingSourceId();
for (const display::ManagedDisplayInfo& display_info : display_info_list) {
std::unique_ptr<RootWindowTransformer> transformer;
if (display_manager->IsInSoftwareMirrorMode()) {
transformer.reset(CreateRootWindowTransformerForMirroredDisplay(
display_manager->GetDisplayInfo(reflecting_source_id_),
display_info));
} else {
DCHECK(display_manager->IsInUnifiedMode());
display::Display display =
display_manager->GetMirroringDisplayById(display_info.id());
transformer.reset(CreateRootWindowTransformerForUnifiedDesktop(
display::Screen::GetScreen()->GetPrimaryDisplay().bounds(), display));
}
if (mirroring_host_info_map_.find(display_info.id()) ==
mirroring_host_info_map_.end()) {
AshWindowTreeHostInitParams init_params;
init_params.initial_bounds = display_info.bounds_in_native();
init_params.display_id = display_info.id();
init_params.mirroring_delegate = this;
init_params.mirroring_unified = display_manager->IsInUnifiedMode();
init_params.device_scale_factor = display_info.device_scale_factor();
init_params.ui_scale_factor = display_info.configured_ui_scale();
MirroringHostInfo* host_info = new MirroringHostInfo;
host_info->ash_host = AshWindowTreeHost::Create(init_params);
mirroring_host_info_map_[display_info.id()] = host_info;
aura::WindowTreeHost* host = host_info->ash_host->AsWindowTreeHost();
// TODO: Config::MUS should not install an InputMethod.
// http://crbug.com/706913
if (!host->has_input_method()) {
host->SetSharedInputMethod(
Shell::Get()->window_tree_host_manager()->input_method());
}
host->window()->SetName(
base::StringPrintf("MirrorRootWindow-%d", mirror_host_count++));
host->compositor()->SetBackgroundColor(SK_ColorBLACK);
// No need to remove the observer because the WindowTreeHostManager
// outlives the host.
host->AddObserver(Shell::Get()->window_tree_host_manager());
host->AddObserver(this);
// TODO(oshima): TouchHUD is using idkey.
InitRootWindowSettings(host->window())->display_id = display_info.id();
host->InitHost();
host->window()->Show();
if (display_manager->IsInUnifiedMode()) {
host_info->ash_host->ConfineCursorToRootWindow();
AshWindowTreeHost* unified_ash_host =
Shell::Get()
->window_tree_host_manager()
->GetAshWindowTreeHostForDisplayId(reflecting_source_id_);
unified_ash_host->RegisterMirroringHost(host_info->ash_host.get());
aura::client::SetScreenPositionClient(host->window(),
screen_position_client_.get());
}
aura::client::SetCaptureClient(host->window(), new NoneCaptureClient());
host->Show();
aura::Window* mirror_window = host_info->mirror_window =
new aura::Window(nullptr);
mirror_window->Init(ui::LAYER_SOLID_COLOR);
host->window()->AddChild(mirror_window);
host_info->ash_host->SetRootWindowTransformer(std::move(transformer));
mirror_window->SetBounds(host->window()->bounds());
mirror_window->Show();
// The classic config creates the accelerated widget synchronously. Mus
// (without viz) creates the reflector in OnAcceleratedWidgetOverridden.
if (host->GetAcceleratedWidget() != gfx::kNullAcceleratedWidget) {
DCHECK_EQ(Shell::GetAshConfig(), Config::CLASSIC);
if (reflector_) {
reflector_->AddMirroringLayer(mirror_window->layer());
} else if (aura::Env::GetInstance()->context_factory_private()) {
reflector_ =
aura::Env::GetInstance()
->context_factory_private()
->CreateReflector(
Shell::GetRootWindowForDisplayId(reflecting_source_id_)
->GetHost()
->compositor(),
mirror_window->layer());
}
}
} else {
AshWindowTreeHost* ash_host =
mirroring_host_info_map_[display_info.id()]->ash_host.get();
aura::WindowTreeHost* host = ash_host->AsWindowTreeHost();
GetRootWindowSettings(host->window())->display_id = display_info.id();
ash_host->SetRootWindowTransformer(std::move(transformer));
host->SetBoundsInPixels(display_info.bounds_in_native());
}
}
// Deleting WTHs for disconnected displays.
if (mirroring_host_info_map_.size() > display_info_list.size()) {
for (MirroringHostInfoMap::iterator iter = mirroring_host_info_map_.begin();
iter != mirroring_host_info_map_.end();) {
if (std::find_if(display_info_list.begin(), display_info_list.end(),
[iter](const display::ManagedDisplayInfo& info) {
return info.id() == iter->first;
}) == display_info_list.end()) {
CloseAndDeleteHost(iter->second, true);
iter = mirroring_host_info_map_.erase(iter);
} else {
++iter;
}
}
}
if (mirroring_host_info_map_.empty() && reflector_) {
// Close the mirror window if all displays are disconnected.
aura::Env::GetInstance()->context_factory_private()->RemoveReflector(
reflector_.get());
reflector_.reset();
}
}
void MirrorWindowController::UpdateWindow() {
if (mirroring_host_info_map_.empty())
return;
display::DisplayManager* display_manager = Shell::Get()->display_manager();
display::Screen* screen = display::Screen::GetScreen();
std::vector<display::ManagedDisplayInfo> display_info_list;
// Prune the window on the removed displays.
for (auto& pair : mirroring_host_info_map_) {
MirroringHostInfo* info = pair.second;
if (screen
->GetDisplayNearestWindow(
info->ash_host->AsWindowTreeHost()->window())
.is_valid()) {
display_info_list.push_back(display_manager->GetDisplayInfo(pair.first));
}
}
UpdateWindow(display_info_list);
}
void MirrorWindowController::CloseIfNotNecessary() {
display::DisplayManager::MultiDisplayMode new_mode =
GetCurrentMultiDisplayMode();
int64_t new_reflecting_source_id = GetCurrentReflectingSourceId();
if (multi_display_mode_ != new_mode ||
reflecting_source_id_ != new_reflecting_source_id) {
Close(true);
} else {
UpdateWindow();
}
}
void MirrorWindowController::Close(bool delay_host_deletion) {
if (reflector_) {
aura::Env::GetInstance()->context_factory_private()->RemoveReflector(
reflector_.get());
reflector_.reset();
}
for (auto& info : mirroring_host_info_map_)
CloseAndDeleteHost(info.second, delay_host_deletion);
mirroring_host_info_map_.clear();
}
void MirrorWindowController::OnHostResized(aura::WindowTreeHost* host) {
for (auto& pair : mirroring_host_info_map_) {
MirroringHostInfo* info = pair.second;
if (info->ash_host->AsWindowTreeHost() == host) {
if (info->mirror_window_host_size == host->GetBoundsInPixels().size())
return;
info->mirror_window_host_size = host->GetBoundsInPixels().size();
// TODO: |reflector_| should always be non-null here, but isn't in MUS
// yet because of http://crbug.com/601869.
if (reflector_)
reflector_->OnMirroringCompositorResized();
// No need to update the transformer as new transformer is already set
// in UpdateWindow.
Shell::Get()
->window_tree_host_manager()
->cursor_window_controller()
->UpdateLocation();
return;
}
}
}
void MirrorWindowController::OnAcceleratedWidgetOverridden(
aura::WindowTreeHost* host) {
DCHECK_NE(host->GetAcceleratedWidget(), gfx::kNullAcceleratedWidget);
DCHECK_NE(Shell::GetAshConfig(), Config::CLASSIC);
DCHECK(!base::FeatureList::IsEnabled(features::kMash));
MirroringHostInfo* info = mirroring_host_info_map_[host->GetDisplayId()];
if (reflector_) {
reflector_->AddMirroringLayer(info->mirror_window->layer());
} else if (aura::Env::GetInstance()->context_factory_private()) {
reflector_ =
aura::Env::GetInstance()->context_factory_private()->CreateReflector(
Shell::GetPrimaryRootWindow()->GetHost()->compositor(),
info->mirror_window->layer());
}
}
display::Display MirrorWindowController::GetDisplayForRootWindow(
const aura::Window* root) const {
for (const auto& pair : mirroring_host_info_map_) {
if (pair.second->ash_host->AsWindowTreeHost()->window() == root) {
// Sanity check to catch an error early.
const int64_t id = pair.first;
const display::Display* display = GetMirroringDisplayById(id);
DCHECK(display);
if (display)
return *display;
}
}
return display::Display();
}
AshWindowTreeHost* MirrorWindowController::GetAshWindowTreeHostForDisplayId(
int64_t id) {
if (mirroring_host_info_map_.count(id) == 0)
return nullptr;
return mirroring_host_info_map_[id]->ash_host.get();
}
aura::Window::Windows MirrorWindowController::GetAllRootWindows() const {
aura::Window::Windows root_windows;
for (const auto& pair : mirroring_host_info_map_)
root_windows.push_back(pair.second->ash_host->AsWindowTreeHost()->window());
return root_windows;
}
const display::Display* MirrorWindowController::GetMirroringDisplayById(
int64_t display_id) const {
const display::Displays& list =
Shell::Get()->display_manager()->software_mirroring_display_list();
for (const auto& display : list) {
if (display.id() == display_id)
return &display;
}
return nullptr;
}
void MirrorWindowController::SetCurrentEventTargeterSourceHost(
aura::WindowTreeHost* targeter_src_host) {
current_event_targeter_src_host_ = targeter_src_host;
}
void MirrorWindowController::CloseAndDeleteHost(MirroringHostInfo* host_info,
bool delay_host_deletion) {
aura::WindowTreeHost* host = host_info->ash_host->AsWindowTreeHost();
aura::client::SetScreenPositionClient(host->window(), nullptr);
NoneCaptureClient* capture_client = static_cast<NoneCaptureClient*>(
aura::client::GetCaptureClient(host->window()));
aura::client::SetCaptureClient(host->window(), nullptr);
delete capture_client;
host->RemoveObserver(Shell::Get()->window_tree_host_manager());
host->RemoveObserver(this);
host_info->ash_host->PrepareForShutdown();
// TODO: |reflector_| should always be non-null here, but isn't in MUS yet
// because of http://crbug.com/601869.
if (reflector_ && host_info->mirror_window->layer()->GetCompositor())
reflector_->RemoveMirroringLayer(host_info->mirror_window->layer());
// EventProcessor may be accessed after this call if the mirroring window
// was deleted as a result of input event (e.g. shortcut), so don't delete
// now.
if (delay_host_deletion)
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, host_info);
else
delete host_info;
}
} // namespace ash
| 6,378 |
12,278 | // (C) Copyright 2013 <NAME>
// (C) Copyright 2013 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See www.boost.org/libs/thread for documentation.
#define BOOST_THREAD_VERSION 4
#include <boost/detail/lightweight_test.hpp> // BOOST_TEST
#include <boost/thread/mutex.hpp>
#include <boost/thread/with_lock_guard.hpp>
#include <boost/ref.hpp>
void func_with_0_arg() {
}
void func_with_1_arg(int arg_1) {
BOOST_TEST(arg_1 == 3);
}
bool func_with_2_arg(int arg_1, bool arg_2) {
BOOST_TEST(arg_1 == 3);
BOOST_TEST(arg_2 == true);
return !arg_2;
}
int func_with_3_arg(int arg_1, bool arg_2, const char* arg_3) {
BOOST_TEST(arg_1 == 13);
BOOST_TEST(arg_2 == false);
BOOST_TEST(std::string(arg_3) == "message for func with 3 arg");
return 12;
}
const char* func_with_4_arg(int arg_1, bool arg_2, int* arg_3, int& arg_4) {
BOOST_TEST(arg_1 == 23);
BOOST_TEST(arg_2 == false);
*arg_3 = 128;
arg_4 = 456;
return "hello";
}
void test_simple() {
boost::mutex m;
// #0
boost::with_lock_guard(m, func_with_0_arg);
// #1
boost::with_lock_guard(m, func_with_1_arg, 3);
// #2
bool res2 = boost::with_lock_guard(m, func_with_2_arg, 3, true);
BOOST_TEST(res2 == false);
// #3
int arg1 = 13;
const char* mes = "message for func with 3 arg";
int res3 = boost::with_lock_guard(m, func_with_3_arg, arg1, false, mes);
BOOST_TEST(res3 == 12);
// #4
int arg3 = 0;
int arg4 = 0;
const char* res4 = boost::with_lock_guard(
m,
func_with_4_arg,
23,
false,
&arg3,
boost::ref(arg4)
);
BOOST_TEST(arg3 == 128);
BOOST_TEST(arg4 == 456);
BOOST_TEST(std::string(res4) == "hello");
}
#if defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES)
void test_variadic_templates() {
std::cout << "C++11 variadic templates disabled" << std::endl;
}
#else
int func_with_5_args(int a1, char a2, int& a3, bool* a4, bool a5) {
BOOST_TEST(a1 == 12);
BOOST_TEST(a2 == 'x');
BOOST_TEST(a5 == false);
a3 = 135;
*a4 = false;
return 45;
}
int func_with_6_args(int a1, char a2, int& a3, bool* a4, int&& a5, bool a6) {
BOOST_TEST(a1 == 12);
BOOST_TEST(a2 == 'N');
BOOST_TEST(a5 == 2 || a5 == 13);
BOOST_TEST(a6 == false);
a3 = 200;
*a4 = true;
return 888;
}
void test_variadic_templates() {
boost::mutex m;
int a3 = 0;
bool a4 = true;
int res5 = boost::with_lock_guard(
m, func_with_5_args, 12, 'x', a3, &a4, false
);
BOOST_TEST(a3 == 135);
BOOST_TEST(a4 == false);
BOOST_TEST(res5 == 45);
int res6 = boost::with_lock_guard(
m, func_with_6_args, 12, 'N', a3, &a4, 2, false
);
BOOST_TEST(a3 == 200);
BOOST_TEST(a4 == true);
BOOST_TEST(res6 == 888);
a3 = 0;
a4 = false;
int a5 = 13;
int res6_move = boost::with_lock_guard(
m, func_with_6_args, 12, 'N', a3, &a4, boost::move(a5), false
);
BOOST_TEST(a3 == 200);
BOOST_TEST(a4 == true);
BOOST_TEST_EQ(res6_move, 888);
}
#endif
int main() {
test_simple();
test_variadic_templates();
return boost::report_errors();
}
| 1,468 |
3,139 | /*
* Copyright (c) 2009-2012 jMonkeyEngine
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of 'jMonkeyEngine' nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.jme3.scene.plugins.ogre;
import com.jme3.asset.ModelKey;
import com.jme3.material.MaterialList;
/**
* OgreMeshKey is used to load Ogre3D mesh.xml models with a specific
* material file or list. This allows customizing from where the materials
* are retrieved, instead of loading the material file as the same
* name as the model (the default).
*
* @author <NAME>
*/
public class OgreMeshKey extends ModelKey {
private MaterialList materialList;
private String materialName;
public OgreMeshKey(){
super();
}
public OgreMeshKey(String name){
super(name);
}
public OgreMeshKey(String name, MaterialList materialList){
super(name);
this.materialList = materialList;
}
public OgreMeshKey(String name, String materialName){
super(name);
this.materialName = materialName;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final OgreMeshKey other = (OgreMeshKey) obj;
if (!super.equals(other)) {
return false;
}
if (this.materialList != other.materialList && (this.materialList == null || !this.materialList.equals(other.materialList))) {
return false;
}
if ((this.materialName == null) ? (other.materialName != null) : !this.materialName.equals(other.materialName)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 5;
hash = 31 * hash + (super.hashCode());
hash = 31 * hash + (this.materialList != null ? this.materialList.hashCode() : 0);
hash = 31 * hash + (this.materialName != null ? this.materialName.hashCode() : 0);
return hash;
}
public MaterialList getMaterialList() {
return materialList;
}
public void setMaterialList(MaterialList materialList){
this.materialList = materialList;
}
public String getMaterialName() {
return materialName;
}
public void setMaterialName(String name) {
materialName = name;
}
}
| 1,337 |
348 | <filename>docs/data/leg-t2/054/05405252.json<gh_stars>100-1000
{"nom":"Haroué","circ":"5ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":272,"abs":125,"votants":147,"blancs":16,"nuls":4,"exp":127,"res":[{"nuance":"SOC","nom":"<NAME>","voix":74},{"nuance":"REM","nom":"<NAME>","voix":53}]} | 126 |
393 | <reponame>nFnK/otros-log-viewer
package pl.otros.swing.suggest;
import javax.swing.text.BadLocationException;
import javax.swing.text.Document;
public class StringInsertSuggestionListener implements SelectionListener<BasicSuggestion> {
@Override
public void selected(SuggestionResult<BasicSuggestion> result) {
final Document document = result.getTextComponent().getDocument();
try {
final int caretLocation = result.getSuggestionSource().getCaretLocation();
final String toInsert = result.getValue().getToInsert();
document.insertString(caretLocation, toInsert, null);
} catch (BadLocationException e) {
//TODO
e.printStackTrace();
}
}
}
| 228 |
936 | <filename>source/sqlsrv/stmt.cpp
//---------------------------------------------------------------------------------------------------------------------------------
// File: stmt.cpp
//
// Contents: Routines that use statement handles
//
// Microsoft Drivers 5.10 for PHP for SQL Server
// Copyright(c) Microsoft Corporation
// All rights reserved.
// MIT License
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files(the ""Software""),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and / or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions :
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//---------------------------------------------------------------------------------------------------------------------------------
// *** header files ***
extern "C" {
#include "php_sqlsrv.h"
}
#include "php_sqlsrv_int.h"
#ifdef _WIN32
#include <sal.h>
#endif // _WIN32
//
// *** internal variables and constants ***
//
// our resource descriptor assigned in minit
int ss_sqlsrv_stmt::descriptor = 0;
const char* ss_sqlsrv_stmt::resource_name = "ss_sqlsrv_stmt";
namespace {
// current subsytem. defined for the CHECK_SQL_{ERROR|WARNING} macros
unsigned int current_log_subsystem = LOG_STMT;
// constants used as invalid types for type errors
const zend_uchar PHPTYPE_INVALID = SQLSRV_PHPTYPE_INVALID;
const int SQLSRV_INVALID_PRECISION = -1;
const SQLUINTEGER SQLSRV_INVALID_SIZE = (~1U);
const int SQLSRV_INVALID_SCALE = -1;
const int SQLSRV_SIZE_MAX_TYPE = -1;
// constants for maximums in SQL Server
const int SQL_SERVER_MAX_FIELD_SIZE = 8000;
const int SQL_SERVER_MAX_PRECISION = 38;
// default class used when no class is specified by sqlsrv_fetch_object
const char STDCLASS_NAME[] = "stdclass";
const char STDCLASS_NAME_LEN = sizeof( STDCLASS_NAME ) - 1;
// map a Zend PHP type constant to our constant type
enum SQLSRV_PHPTYPE zend_to_sqlsrv_phptype[] = {
SQLSRV_PHPTYPE_INVALID,
SQLSRV_PHPTYPE_NULL,
SQLSRV_PHPTYPE_INVALID,
SQLSRV_PHPTYPE_INVALID,
SQLSRV_PHPTYPE_INT,
SQLSRV_PHPTYPE_FLOAT,
SQLSRV_PHPTYPE_STRING,
SQLSRV_PHPTYPE_TABLE,
SQLSRV_PHPTYPE_DATETIME,
SQLSRV_PHPTYPE_STREAM,
SQLSRV_PHPTYPE_INVALID,
SQLSRV_PHPTYPE_INVALID,
SQLSRV_PHPTYPE_INVALID
};
// constant strings used for the field metadata results
// (char to avoid having to cast them where they are used)
namespace FieldMetaData {
const char* NAME = "Name";
const char* TYPE = "Type";
const char* SIZE = "Size";
const char* PREC = "Precision";
const char* SCALE = "Scale";
const char* NULLABLE = "Nullable";
}
/* internal functions */
void convert_to_zval( _Inout_ sqlsrv_stmt* stmt, _In_ SQLSRV_PHPTYPE sqlsrv_php_type, _In_opt_ void* in_val, _In_ SQLLEN field_len, _Inout_ zval& out_zval );
SQLSMALLINT get_resultset_meta_data(_Inout_ sqlsrv_stmt* stmt);
void fetch_fields_common( _Inout_ ss_sqlsrv_stmt* stmt, _In_ zend_long fetch_type, _Out_ zval& fields, _In_ bool allow_empty_field_names );
bool determine_column_size_or_precision( sqlsrv_stmt const* stmt, _In_ sqlsrv_sqltype sqlsrv_type, _Inout_ SQLULEN* column_size,
_Out_ SQLSMALLINT* decimal_digits );
sqlsrv_phptype determine_sqlsrv_php_type( sqlsrv_stmt const* stmt, SQLINTEGER sql_type, SQLUINTEGER size, bool prefer_string );
void determine_stmt_has_rows( _Inout_ ss_sqlsrv_stmt* stmt );
bool is_valid_sqlsrv_phptype( _In_ sqlsrv_phptype type );
bool is_valid_sqlsrv_sqltype( _In_ sqlsrv_sqltype type );
void type_and_encoding( INTERNAL_FUNCTION_PARAMETERS, _In_ int type );
void type_and_size_calc( INTERNAL_FUNCTION_PARAMETERS, _In_ int type );
void type_and_precision_calc( INTERNAL_FUNCTION_PARAMETERS, _In_ int type );
bool verify_and_set_encoding( _In_ const char* encoding_string, _Inout_ sqlsrv_phptype& phptype_encoding );
zval* parse_param_array(_Inout_ ss_sqlsrv_stmt* stmt, _Inout_ HashTable* param_ht, zend_ulong index,
_Out_ SQLSMALLINT& direction, _Out_ SQLSRV_PHPTYPE& php_out_type,
_Out_ SQLSRV_ENCODING& encoding, _Out_ SQLSMALLINT& sql_type,
_Out_ SQLULEN& column_size, _Out_ SQLSMALLINT& decimal_digits);
}
// query options for cursor types
namespace SSCursorTypes {
const char QUERY_OPTION_SCROLLABLE_STATIC[] = "static";
const char QUERY_OPTION_SCROLLABLE_DYNAMIC[] = "dynamic";
const char QUERY_OPTION_SCROLLABLE_KEYSET[] = "keyset";
const char QUERY_OPTION_SCROLLABLE_FORWARD[] = "forward";
const char QUERY_OPTION_SCROLLABLE_BUFFERED[] = "buffered";
}
ss_sqlsrv_stmt::ss_sqlsrv_stmt( _In_ sqlsrv_conn* c, _In_ SQLHANDLE handle, _In_ error_callback e, _In_ void* drv ) :
sqlsrv_stmt( c, handle, e, drv ),
prepared( false ),
conn_index( -1 ),
params_z( NULL ),
fetch_field_names( NULL ),
fetch_fields_count ( 0 )
{
core_sqlsrv_set_buffered_query_limit( this, SQLSRV_G( buffered_query_limit ) );
// inherit other values based on the corresponding connection options
ss_sqlsrv_conn* ss_conn = static_cast<ss_sqlsrv_conn*>(conn);
date_as_string = ss_conn->date_as_string;
format_decimals = ss_conn->format_decimals;
decimal_places = ss_conn->decimal_places;
}
ss_sqlsrv_stmt::~ss_sqlsrv_stmt( void )
{
if( fetch_field_names != NULL ) {
for( int i=0; i < fetch_fields_count; ++i ) {
sqlsrv_free( fetch_field_names[i].name );
}
sqlsrv_free( fetch_field_names );
}
if( params_z ) {
zval_ptr_dtor( params_z );
sqlsrv_free(params_z);
}
}
// to be called whenever a new result set is created, such as after an
// execute or next_result. Resets the state variables and calls the subclass.
void ss_sqlsrv_stmt::new_result_set( void )
{
if( fetch_field_names != NULL ) {
for( int i=0; i < fetch_fields_count; ++i ) {
sqlsrv_free( fetch_field_names[i].name );
}
sqlsrv_free( fetch_field_names );
}
fetch_field_names = NULL;
fetch_fields_count = 0;
sqlsrv_stmt::new_result_set();
}
// Returns a php type for a given sql type. Also sets the encoding wherever applicable.
sqlsrv_phptype ss_sqlsrv_stmt::sql_type_to_php_type( _In_ SQLINTEGER sql_type, _In_ SQLUINTEGER size, _In_ bool prefer_string_to_stream )
{
sqlsrv_phptype ss_phptype;
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_INVALID;
ss_phptype.typeinfo.encoding = SQLSRV_ENCODING_INVALID;
switch( sql_type ) {
case SQL_BIGINT:
case SQL_CHAR:
case SQL_DECIMAL:
case SQL_GUID:
case SQL_NUMERIC:
case SQL_WCHAR:
case SQL_SS_VARIANT:
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
ss_phptype.typeinfo.encoding = this->conn->encoding();
break;
case SQL_VARCHAR:
case SQL_WVARCHAR:
case SQL_LONGVARCHAR:
case SQL_WLONGVARCHAR:
case SQL_SS_XML:
if( prefer_string_to_stream || size != SQL_SS_LENGTH_UNLIMITED ) {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
ss_phptype.typeinfo.encoding = this->conn->encoding();
}
else {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STREAM;
ss_phptype.typeinfo.encoding = this->conn->encoding();
}
break;
case SQL_BIT:
case SQL_INTEGER:
case SQL_SMALLINT:
case SQL_TINYINT:
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_INT;
break;
case SQL_BINARY:
case SQL_LONGVARBINARY:
case SQL_VARBINARY:
case SQL_SS_UDT:
if( prefer_string_to_stream ) {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
ss_phptype.typeinfo.encoding = SQLSRV_ENCODING_BINARY;
}
else {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STREAM;
ss_phptype.typeinfo.encoding = SQLSRV_ENCODING_BINARY;
}
break;
case SQL_FLOAT:
case SQL_REAL:
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_FLOAT;
break;
case SQL_SS_TABLE:
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_TABLE;
break;
case SQL_TYPE_DATE:
case SQL_SS_TIMESTAMPOFFSET:
case SQL_SS_TIME2:
case SQL_TYPE_TIMESTAMP:
if (this->date_as_string) {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
ss_phptype.typeinfo.encoding = this->conn->encoding();
}
else {
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_DATETIME;
}
break;
default:
ss_phptype.typeinfo.type = SQLSRV_PHPTYPE_INVALID;
break;
}
return ss_phptype;
}
// statement specific parameter proccessing. Uses the generic function specialised to return a statement
// resource.
#define PROCESS_PARAMS( rsrc, param_spec, calling_func, param_count, ... ) \
rsrc = process_params<ss_sqlsrv_stmt>( INTERNAL_FUNCTION_PARAM_PASSTHRU, param_spec, calling_func, param_count, ## __VA_ARGS__ );\
if( rsrc == NULL ) { \
RETURN_FALSE; \
}
// sqlsrv_execute( resource $stmt )
//
// Executes a previously prepared statement. See sqlsrv_prepare for information
// on preparing a statement for execution.
//
// This function is ideal for executing a prepared statement multiple times with
// different parameter values. See the MSDN documentation
//
// Parameters
// $stmt: A resource specifying the statement to be executed. For more
// information about how to create a statement resource, see sqlsrv_prepare.
//
// Return Value
// A Boolean value: true if the statement was successfully executed. Otherwise, false.
PHP_FUNCTION( sqlsrv_execute )
{
LOG_FUNCTION( "sqlsrv_execute" );
ss_sqlsrv_stmt* stmt = NULL;
try {
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
CHECK_CUSTOM_ERROR(( !stmt->prepared ), stmt, SS_SQLSRV_ERROR_STATEMENT_NOT_PREPARED ) {
throw ss::SSException();
}
// prepare for the next execution by flushing anything remaining in the result set
if( stmt->executed ) {
// to prepare to execute the next statement, we skip any remaining results (and skip parameter finalization too)
while( stmt->past_next_result_end == false ) {
core_sqlsrv_next_result( stmt, false, false );
}
}
// bind parameters before executing
bind_params( stmt );
core_sqlsrv_execute( stmt );
RETURN_TRUE;
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_execute: Unknown exception caught." );
}
}
// sqlsrv_fetch( resource $stmt )
//
// Makes the next row of a result set available for reading. Use
// sqlsrv_get_field to read fields of the row.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement. A
// statement must be executed before results can be retrieved. For information
// on executing a statement, see sqlsrv_query and sqlsrv_execute.
//
// Return Value
// If the next row of the result set was successfully retrieved, true is
// returned. If there are no more results in the result set, null is
// returned. If an error occured, false is returned
PHP_FUNCTION( sqlsrv_fetch )
{
LOG_FUNCTION( "sqlsrv_fetch" );
ss_sqlsrv_stmt* stmt = NULL;
// NOTE: zend_parse_parameter expect zend_long when the type spec is 'l',and core_sqlsrv_fetch expect short int
zend_long fetch_style = SQL_FETCH_NEXT; // default value for parameter if one isn't supplied
zend_long fetch_offset = 0; // default value for parameter if one isn't supplied
// take only the statement resource
PROCESS_PARAMS( stmt, "r|ll", _FN_, 2, &fetch_style, &fetch_offset );
try {
CHECK_CUSTOM_ERROR(( fetch_style < SQL_FETCH_NEXT || fetch_style > SQL_FETCH_RELATIVE ), stmt,
SS_SQLSRV_ERROR_INVALID_FETCH_STYLE ) {
throw ss::SSException();
}
bool result = core_sqlsrv_fetch( stmt, static_cast<SQLSMALLINT>(fetch_style), fetch_offset );
if( !result ) {
RETURN_NULL();
}
RETURN_TRUE;
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_fetch: Unknown exception caught." );
}
}
// sqlsrv_fetch_array( resource $stmt [, int $fetchType] )
//
// Retrieves the next row of data as an array.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement.
// $fetchType [OPTIONAL]: A predefined constant. See SQLSRV_FETCH_TYPE in php_sqlsrv.h
//
// Return Value
// If a row of data is retrieved, an array is returned. If there are no more
// rows to retrieve, null is returned. If an error occurs, false is returned.
// Based on the value of the $fetchType parameter, the returned array can be a
// numerically indexed array, an associative array, or both. By default, an
// array with both numeric and associative keys is returned. The data type of a
// value in the returned array will be the default PHP data type. For
// information about default PHP data types, see Default PHP Data Types.
PHP_FUNCTION( sqlsrv_fetch_array )
{
LOG_FUNCTION( "sqlsrv_fetch_array" );
ss_sqlsrv_stmt* stmt = NULL;
zend_long fetch_type = SQLSRV_FETCH_BOTH; // default value for parameter if one isn't supplied
zend_long fetch_style = SQL_FETCH_NEXT; // default value for parameter if one isn't supplied
zend_long fetch_offset = 0; // default value for parameter if one isn't supplied
// retrieve the statement resource and optional fetch type (see enum SQLSRV_FETCH_TYPE),
// fetch style (see SQLSRV_SCROLL_* constants) and fetch offset
PROCESS_PARAMS( stmt, "r|lll", _FN_, 3, &fetch_type, &fetch_style, &fetch_offset );
try {
CHECK_CUSTOM_ERROR(( fetch_type < MIN_SQLSRV_FETCH || fetch_type > MAX_SQLSRV_FETCH ), stmt,
SS_SQLSRV_ERROR_INVALID_FETCH_TYPE ) {
throw ss::SSException();
}
CHECK_CUSTOM_ERROR(( fetch_style < SQL_FETCH_NEXT || fetch_style > SQL_FETCH_RELATIVE ), stmt,
SS_SQLSRV_ERROR_INVALID_FETCH_STYLE ) {
throw ss::SSException();
}
bool result = core_sqlsrv_fetch( stmt, static_cast<SQLSMALLINT>(fetch_style), fetch_offset );
if( !result ) {
RETURN_NULL();
}
zval fields;
ZVAL_UNDEF( &fields );
fetch_fields_common( stmt, fetch_type, fields, true /*allow_empty_field_names*/ );
RETURN_ARR( Z_ARRVAL( fields ));
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_fetch_array: Unknown exception caught." );
}
}
// sqlsrv_field_metadata( resource $stmt )
//
// Retrieves metadata for the fields of a prepared statement. For information
// about preparing a statement, see sqlsrv_query or sqlsrv_prepare. Note that
// sqlsrv_field_metadata can be called on any prepared statement, pre- or
// post-execution.
//
// Parameters
// $stmt: A statement resource for which field metadata is sought.
//
// Return Value
// retrieve an array of metadata for the current result set on a statement. Each element of the
// array is a sub-array containing 5 elements accessed by key:
// name - name of the field.
// type - integer of the type. Can be compared against the SQLSRV_SQLTYPE constants.
// size - length of the field. null if the field uses precision and scale instead.
// precision - number of digits in a numeric field. null if the field uses size.
// scale - number of decimal digits in a numeric field. null if the field uses sizes.
// is_nullable - if the field may contain a NULL instead of a value
// false is returned if an error occurs retrieving the metadata
PHP_FUNCTION( sqlsrv_field_metadata )
{
sqlsrv_stmt* stmt = NULL;
LOG_FUNCTION( "sqlsrv_field_metadata" );
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// get the number of fields in the resultset and its metadata if not exists
SQLSMALLINT num_cols = get_resultset_meta_data(stmt);
if (stmt->data_classification) {
core_sqlsrv_sensitivity_metadata(stmt);
}
zval result_meta_data;
ZVAL_UNDEF(&result_meta_data);
array_init(&result_meta_data);
for( SQLSMALLINT f = 0; f < num_cols; ++f ) {
field_meta_data* core_meta_data = stmt->current_meta_data[f];
// initialize the array
zval field_array;
ZVAL_UNDEF( &field_array );
array_init(&field_array );
// add the field name to the associative array but keep a copy
add_assoc_string(&field_array, FieldMetaData::NAME, reinterpret_cast<char*>(core_meta_data->field_name.get()));
//core::sqlsrv_add_assoc_long( *stmt, &field_array, FieldMetaData::TYPE, core_meta_data->field_type );
add_assoc_long(&field_array, FieldMetaData::TYPE, core_meta_data->field_type);
switch( core_meta_data->field_type ) {
case SQL_DECIMAL:
case SQL_NUMERIC:
case SQL_TYPE_TIMESTAMP:
case SQL_TYPE_DATE:
case SQL_SS_TIME2:
case SQL_SS_TIMESTAMPOFFSET:
add_assoc_null(&field_array, FieldMetaData::SIZE);
add_assoc_long(&field_array, FieldMetaData::PREC, core_meta_data->field_precision);
add_assoc_long(&field_array, FieldMetaData::SCALE, core_meta_data->field_scale);
break;
case SQL_BIT:
case SQL_TINYINT:
case SQL_SMALLINT:
case SQL_INTEGER:
case SQL_BIGINT:
case SQL_REAL:
case SQL_FLOAT:
case SQL_DOUBLE:
add_assoc_null(&field_array, FieldMetaData::SIZE);
add_assoc_long(&field_array, FieldMetaData::PREC, core_meta_data->field_precision);
add_assoc_null(&field_array, FieldMetaData::SCALE);
break;
default:
add_assoc_long(&field_array, FieldMetaData::SIZE, core_meta_data->field_size);
add_assoc_null(&field_array, FieldMetaData::PREC);
add_assoc_null(&field_array, FieldMetaData::SCALE);
break;
}
// add the nullability to the array
add_assoc_long(&field_array, FieldMetaData::NULLABLE, core_meta_data->field_is_nullable);
if (stmt->data_classification) {
data_classification::fill_column_sensitivity_array(stmt, f, &field_array);
}
// add this field's meta data to the result set meta data
add_next_index_zval(&result_meta_data, &field_array);
}
// return our built collection and transfer ownership
RETURN_ZVAL(&result_meta_data, 1, 1);
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_field_metadata: Unknown exception caught." );
}
}
// sqlsrv_next_result( resource $stmt )
//
// Makes the next result (result set, row count, or output parameter) of the
// specified statement active. The first (or only) result returned by a batch
// query or stored procedure is active without a call to sqlsrv_next_result.
// Any output parameters bound are only available after sqlsrv_next_result returns
// null as per ODBC Driver 11 for SQL Server specs: http://msdn.microsoft.com/en-us/library/ms403283.aspx
//
// Parameters
// $stmt: The executed statement on which the next result is made active.
//
// Return Value
// If the next result was successfully made active, the Boolean value true is
// returned. If an error occurred in making the next result active, false is
// returned. If no more results are available, null is returned.
PHP_FUNCTION( sqlsrv_next_result )
{
LOG_FUNCTION( "sqlsrv_next_result" );
ss_sqlsrv_stmt* stmt = NULL;
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
core_sqlsrv_next_result( stmt, true );
// clear the current meta data since the new result will generate new meta data
stmt->clean_up_results_metadata();
if( stmt->past_next_result_end ) {
RETURN_NULL();
}
RETURN_TRUE;
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_next_result: Unknown exception caught." );
}
}
// sqlsrv_rows_affected( resource $stmt )
//
// Returns the number of rows modified by the last statement executed. This
// function does not return the number of rows returned by a SELECT statement.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement.
//
// Return Value
// An integer indicating the number of rows modified by the last executed
// statement. If no rows were modified, zero (0) is returned. If no information
// about the number of modified rows is available, negative one (-1) is
// returned. If an error occurred in retrieving the number of modified rows,
// false is returned. See SQLRowCount in the MSDN ODBC documentation.
PHP_FUNCTION( sqlsrv_rows_affected )
{
LOG_FUNCTION( "sqlsrv_rows_affected" );
ss_sqlsrv_stmt* stmt = NULL;
SQLLEN rows = -1;
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// make sure that the statement has already been executed.
CHECK_CUSTOM_ERROR( !stmt->executed, stmt, SQLSRV_ERROR_STATEMENT_NOT_EXECUTED ) {
throw ss::SSException();
}
// make sure it is not scrollable. This function should only work for inserts, updates, and deletes,
// but this is the best we can do to enforce that.
CHECK_CUSTOM_ERROR( stmt->cursor_type != SQL_CURSOR_FORWARD_ONLY, stmt, SS_SQLSRV_ERROR_STATEMENT_SCROLLABLE ) {
throw ss::SSException();
}
rows = stmt->current_results->row_count();
RETURN_LONG( rows );
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_rows_affected: Unknown exception caught." );
}
}
// sqlsrv_num_rows( resource $stmt )
//
// Retrieves the number of rows in an active result set. The statement must
// have been created with the Scrollable attribute set to 'static'.
//
// Parameters
// $stmt: The statement on which the targeted result set is active.
//
// Return Value
// An integer value that represents the number of rows in the active result
// set. If an error occurs, the boolean value false is returned.
PHP_FUNCTION( sqlsrv_num_rows )
{
LOG_FUNCTION( "sqlsrv_num_rows" );
ss_sqlsrv_stmt* stmt = NULL;
SQLLEN rows = -1;
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// make sure that the statement has already been executed.
CHECK_CUSTOM_ERROR( !stmt->executed, stmt, SQLSRV_ERROR_STATEMENT_NOT_EXECUTED ) {
throw ss::SSException();
}
// make sure that the statement is scrollable and the cursor is not dynamic.
// if the cursor is dynamic, then the number of rows returned is always -1.
CHECK_CUSTOM_ERROR( stmt->cursor_type == SQL_CURSOR_FORWARD_ONLY || stmt->cursor_type == SQL_CURSOR_DYNAMIC, stmt,
SS_SQLSRV_ERROR_STATEMENT_NOT_SCROLLABLE ) {
throw ss::SSException();
}
rows = stmt->current_results->row_count();
RETURN_LONG( rows );
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_num_rows: Unknown exception caught." );
}
}
// sqlsrv_num_fields( resource $stmt )
//
// Retrieves the number of fields in an active result set. Note that
// sqlsrv_num_fields can be called on any prepared statement, before or after
// execution.
//
// Parameters
// $stmt: The statement on which the targeted result set is active.
//
// Return Value
// An integer value that represents the number of fields in the active result
// set. If an error occurs, the boolean value false is returned.
PHP_FUNCTION( sqlsrv_num_fields )
{
LOG_FUNCTION( "sqlsrv_num_fields" );
ss_sqlsrv_stmt* stmt = NULL;
SQLSMALLINT fields = -1;
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// retrieve the number of columns from ODBC
fields = core::SQLNumResultCols( stmt );
RETURN_LONG( fields );
}
catch( ss::SSException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_num_fields: Unknown exception caught." );
}
}
// sqlsrv_fetch_object( resource $stmt [, string $className [, array $ctorParams]])
//
// Retrieves the next row of data as a PHP object.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement.
//
// $className [OPTIONAL]: A string specifying the name of the class to
// instantiate. If a value for the $className parameter is not specified, an
// instance of the PHP stdClass is instantiated.
//
// $ctorParams [OPTIONAL]: An array that contains values passed to the
// constructor of the class specified with the $className parameter. If the
// constructor of the specified class accepts parameter values, the $ctorParams
// parameter must be used when calling sqlsrv_fetch_object.
//
// Return Value
// A PHP object with properties that correspond to result set field
// names. Property values are populated with the corresponding result set field
// values. If the class specified with the optional $className parameter does
// not exist or if there is no active result set associated with the specified
// statement, false is returned.
// The data type of a value in the returned object will be the default PHP data
// type. For information on default PHP data types, see Default PHP Data Types.
//
// Remarks
// If a class name is specified with the optional $className parameter, an
// object of this class type is instantiated. If the class has properties whose
// names match the result set field names, the corresponding result set values
// are applied to the properties. If a result set field name does not match a
// class property, a property with the result set field name is added to the
// object and the result set value is applied to the property. For more
// information about calling sqlsrv_fetch_object with the $className parameter,
// see How to: Retrieve Data as an Object (Microsoft Drivers for PHP for SQL Server).
//
// If a field with no name is returned, sqlsrv_fetch_object will discard the
// field value and issue a warning.
PHP_FUNCTION( sqlsrv_fetch_object )
{
LOG_FUNCTION( "sqlsrv_fetch_object" );
ss_sqlsrv_stmt* stmt = NULL;
zval* class_name_z = NULL;
zval* ctor_params_z = NULL;
zend_long fetch_style = SQL_FETCH_NEXT; // default value for parameter if one isn't supplied
zend_long fetch_offset = 0; // default value for parameter if one isn't supplied
// stdClass is the name of the system's default base class in PHP
char* class_name = const_cast<char*>( STDCLASS_NAME );
std::size_t class_name_len = STDCLASS_NAME_LEN;
HashTable* properties_ht = NULL;
zval retval_z;
ZVAL_UNDEF( &retval_z );
// retrieve the statement resource and optional fetch type (see enum SQLSRV_FETCH_TYPE),
// fetch style (see SQLSRV_SCROLL_* constants) and fetch offset
// we also use z! instead of s and a so that null may be passed in as valid values for
// the class name and ctor params
PROCESS_PARAMS( stmt, "r|z!z!ll", _FN_, 4, &class_name_z, &ctor_params_z, &fetch_style, &fetch_offset );
try {
CHECK_CUSTOM_ERROR(( fetch_style < SQL_FETCH_NEXT || fetch_style > SQL_FETCH_RELATIVE ), stmt,
SS_SQLSRV_ERROR_INVALID_FETCH_STYLE ) {
throw ss::SSException();
}
if( class_name_z ) {
CHECK_CUSTOM_ERROR(( Z_TYPE_P( class_name_z ) != IS_STRING ), stmt, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ ) {
throw ss::SSException();
}
class_name = Z_STRVAL( *class_name_z );
class_name_len = Z_STRLEN( *class_name_z );
}
if( ctor_params_z && Z_TYPE_P( ctor_params_z ) != IS_ARRAY ) {
THROW_SS_ERROR( stmt, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ );
}
// fetch the data
bool result = core_sqlsrv_fetch( stmt, static_cast<SQLSMALLINT>(fetch_style), fetch_offset );
if( !result ) {
RETURN_NULL();
}
fetch_fields_common( stmt, SQLSRV_FETCH_ASSOC, retval_z, false /*allow_empty_field_names*/ );
properties_ht = Z_ARRVAL( retval_z );
// find the zend_class_entry of the class the user requested (stdClass by default) for use below
zend_class_entry* class_entry = NULL;
zend_string* class_name_str_z = zend_string_init( class_name, class_name_len, 0 );
int zr = ( NULL != ( class_entry = zend_lookup_class( class_name_str_z ))) ? SUCCESS : FAILURE;
zend_string_release( class_name_str_z );
CHECK_ZEND_ERROR( zr, stmt, SS_SQLSRV_ERROR_ZEND_BAD_CLASS, class_name ) {
throw ss::SSException();
}
// create an instance of the object with its default properties
// we pass NULL for the properties so that the object will be populated by its default properties
zr = object_and_properties_init( &retval_z, class_entry, NULL /*properties*/ );
CHECK_ZEND_ERROR( zr, stmt, SS_SQLSRV_ERROR_ZEND_OBJECT_FAILED, class_name ) {
throw ss::SSException();
}
// merge in the "properties" (associative array) returned from the fetch doing this vice versa
// since putting properties_ht into object_and_properties_init and merging the default properties
// causes duplicate properties when the visibilities are different and also references the
// default parameters directly in the object, meaning the default property value is changed when
// the object's property is changed.
zend_merge_properties( &retval_z, properties_ht );
zend_hash_destroy( properties_ht );
FREE_HASHTABLE( properties_ht );
// find and call the object's constructor
// The header files (zend.h and zend_API.h) declare
// these functions and structures, so by working with those, we were able to
// develop this as a suitable snippet for calling constructors. Some observations:
// params must be an array of zval**, not a zval** to an array as we originally
// thought. Also, a constructor doesn't show up in the function table, but
// is put into the "magic methods" section of the class entry.
//
// The default values of the fci and fcic structures were determined by
// calling zend_fcall_info_init with a test callable.
// if there is a constructor (e.g., stdClass doesn't have one)
if( class_entry->constructor ) {
// take the parameters given as our last argument and put them into a sequential array
sqlsrv_malloc_auto_ptr<zval> params_m;
zval ctor_retval_z;
ZVAL_UNDEF( &ctor_retval_z );
int num_params = 0;
if ( ctor_params_z ) {
HashTable* ctor_params_ht = Z_ARRVAL( *ctor_params_z );
num_params = zend_hash_num_elements( ctor_params_ht );
params_m = reinterpret_cast<zval*>( sqlsrv_malloc( num_params * sizeof( zval ) ));
int i = 0;
zval* value_z = NULL;
ZEND_HASH_FOREACH_VAL( ctor_params_ht, value_z ) {
zr = ( value_z ) ? SUCCESS : FAILURE;
CHECK_ZEND_ERROR( zr, stmt, SS_SQLSRV_ERROR_ZEND_OBJECT_FAILED, class_name ) {
throw ss::SSException();
}
ZVAL_COPY_VALUE(¶ms_m[i], value_z);
i++;
} ZEND_HASH_FOREACH_END();
} //if( !Z_ISUNDEF( ctor_params_z ))
// call the constructor function itself.
zend_fcall_info fci;
zend_fcall_info_cache fcic;
memset( &fci, 0, sizeof( fci ));
fci.size = sizeof( fci );
#if PHP_VERSION_ID < 70100
fci.function_table = &( class_entry )->function_table;
#endif
ZVAL_UNDEF( &( fci.function_name ) );
fci.retval = &ctor_retval_z;
fci.param_count = num_params;
fci.params = params_m; // purposefully not transferred since ownership isn't actually transferred.
fci.object = Z_OBJ_P( &retval_z );
memset( &fcic, 0, sizeof( fcic ));
#if PHP_VERSION_ID < 70300
fcic.initialized = 1;
#endif
fcic.function_handler = class_entry->constructor;
fcic.calling_scope = class_entry;
fcic.object = Z_OBJ_P( &retval_z );
zr = zend_call_function( &fci, &fcic );
CHECK_ZEND_ERROR( zr, stmt, SS_SQLSRV_ERROR_ZEND_OBJECT_FAILED, class_name ) {
throw ss::SSException();
}
} //if( class_entry->constructor )
RETURN_ZVAL( &retval_z, 1, 1 );
}
catch( core::CoreException& ) {
if( properties_ht != NULL ) {
zend_hash_destroy( properties_ht );
FREE_HASHTABLE( properties_ht );
}
else if ( Z_TYPE( retval_z ) == IS_ARRAY ) {
zend_hash_destroy( Z_ARRVAL( retval_z ));
FREE_HASHTABLE( Z_ARRVAL( retval_z ));
}
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_fetch_object: Unknown exception caught." );
}
}
// sqlsrv_has_rows( resource $stmt )
//
// Parameters
// $stmt: The statement on which the targeted result set is active.
//
// Return Value
// Returns whether or not there are rows waiting to be processed. There are two scenarios
// for using a function like this:
// 1) To know if there are any actual rows, not just a result set (empty or not). Use sqlsrv_has_rows to determine this.
// The guarantee is that if sqlsrv_has_rows returns true immediately after a query, that sqlsrv_fetch_* will return at least
// one row of data.
// 2) To know if there is any sort of result set, empty or not, that has to be bypassed to get to something else, such as
// output parameters being returned. Use sqlsrv_num_fields > 0 to check if there is any result set that must be bypassed
// until sqlsrv_fetch returns NULL.
// The last caveat is that this function can still return FALSE if there is an error, which is fine since an error
// most likely means that there is no result data anyways.
// If this functions returs true one time, then it will return true even after the result set is exhausted
// (sqlsrv_fetch returns null)
PHP_FUNCTION( sqlsrv_has_rows )
{
LOG_FUNCTION( "sqlsrv_has_rows" );
ss_sqlsrv_stmt* stmt = NULL;
try {
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
CHECK_CUSTOM_ERROR( !stmt->executed, stmt, SQLSRV_ERROR_STATEMENT_NOT_EXECUTED ) {
throw ss::SSException();
}
if( !stmt->has_rows && !stmt->fetch_called ) {
determine_stmt_has_rows( stmt );
}
if( stmt->has_rows ) {
RETURN_TRUE;
}
}
catch( core::CoreException& ) {
}
catch( ... ) {
DIE( "sqlsrv_has_rows: Unknown exception caught." );
}
RETURN_FALSE;
}
// sqlsrv_send_stream_data( resource $stmt )
//
// Sends data from parameter streams to the server. Up to eight kilobytes (8K)
// of data is sent with each call to sqlsrv_send_stream_data.
// By default, all stream data is sent to the server when a query is
// executed. If this default behavior is not changed, you do not have to use
// sqlsrv_send_stream_data to send stream data to the server. For information
// about changing the default behavior, see the Parameters section of
// sqlsrv_query or sqlsrv_prepare.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement.
//
// Return Value
// true if there is more data to be sent. null, if all the data has been sent,
// and false if an error occurred
PHP_FUNCTION( sqlsrv_send_stream_data )
{
sqlsrv_stmt* stmt = NULL;
LOG_FUNCTION( "sqlsrv_send_stream_data" );
// get the statement resource that we've bound streams to
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// if everything was sent at execute time, just return that there is nothing more to send.
if( stmt->send_streams_at_exec ) {
RETURN_NULL();
}
// send the next packet
bool more = core_sqlsrv_send_stream_packet( stmt );
// if more to send, return true
if( more ) {
RETURN_TRUE;
}
// otherwise we're done, so return null
else {
RETURN_NULL();
}
}
catch( core::CoreException& ) {
// return false if an error occurred
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_send_stream_data: Unknown exception caught." );
}
}
// sqlsrv_get_field( resource $stmt, int $fieldIndex [, int $getAsType] )
//
// Retrieves data from the specified field of the current row. Field data must
// be accessed in order. For example, data from the first field cannot be
// accessed after data from the second field has been accessed.
//
// Parameters
// $stmt: A statement resource corresponding to an executed statement.
// $fieldIndex: The index of the field to be retrieved. Indexes begin at zero.
// $getAsType [OPTIONAL]: A SQLSRV constant (SQLSRV_PHPTYPE) that determines
// the PHP data type for the returned data. For information about supported data
// types, see SQLSRV Constants (Microsoft Drivers for PHP for SQL Server). If no return
// type is specified, a default PHP type will be returned. For information about
// default PHP types, see Default PHP Data Types. For information about
// specifying PHP data types, see How to: Specify PHP Data Types.
//
// Return Value
// The field data. You can specify the PHP data type of the returned data by
// using the $getAsType parameter. If no return data type is specified, the
// default PHP data type will be returned. For information about default PHP
// types, see Default PHP Data Types. For information about specifying PHP data
// types, see How to: Specify PHP Data Types.
PHP_FUNCTION( sqlsrv_get_field )
{
LOG_FUNCTION( "sqlsrv_get_field" );
ss_sqlsrv_stmt* stmt = NULL;
sqlsrv_phptype sqlsrv_php_type;
sqlsrv_php_type.typeinfo.type = SQLSRV_PHPTYPE_INVALID;
SQLSRV_PHPTYPE sqlsrv_php_type_out = SQLSRV_PHPTYPE_INVALID;
void* field_value = NULL;
zend_long field_index = -1;
SQLLEN field_len = -1;
zval retval_z;
ZVAL_UNDEF(&retval_z);
// get the statement, the field index and the optional type
PROCESS_PARAMS( stmt, "rl|l", _FN_, 2, &field_index, &sqlsrv_php_type );
try {
// validate that the field index is within range
SQLSMALLINT num_cols = get_resultset_meta_data(stmt);
if( field_index < 0 || field_index >= num_cols ) {
THROW_SS_ERROR( stmt, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ );
}
core_sqlsrv_get_field( stmt, static_cast<SQLUSMALLINT>( field_index ), sqlsrv_php_type, false, field_value, &field_len, false/*cache_field*/,
&sqlsrv_php_type_out );
convert_to_zval( stmt, sqlsrv_php_type_out, field_value, field_len, retval_z );
sqlsrv_free( field_value );
RETURN_ZVAL( &retval_z, 1, 1 );
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_get_field: Unknown exception caught." );
}
}
// ** type functions. **
// When specifying PHP and SQL Server types that take parameters, such as VARCHAR(2000), we use functions
// to match that notation and return a specially encoded integer that tells us what type and size/precision
// are. For PHP types specifically we munge the type and encoding into the integer.
// As is easily seen, since they are so similar, we delegate the actual encoding to helper methods defined
// below.
// takes an encoding of the stream
PHP_FUNCTION( SQLSRV_PHPTYPE_STREAM )
{
type_and_encoding( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQLSRV_PHPTYPE_STREAM );
}
// takes an encoding of the string
PHP_FUNCTION( SQLSRV_PHPTYPE_STRING )
{
type_and_encoding( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQLSRV_PHPTYPE_STRING );
}
// takes the size of the binary field
PHP_FUNCTION(SQLSRV_SQLTYPE_BINARY)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_BINARY );
}
// takes the size of the char field
PHP_FUNCTION(SQLSRV_SQLTYPE_CHAR)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_CHAR );
}
// takes the precision and scale of the decimal field
PHP_FUNCTION(SQLSRV_SQLTYPE_DECIMAL)
{
type_and_precision_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_DECIMAL );
}
// takes the size of the nchar field
PHP_FUNCTION(SQLSRV_SQLTYPE_NCHAR)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_WCHAR );
}
// takes the precision and scale of the numeric field
PHP_FUNCTION(SQLSRV_SQLTYPE_NUMERIC)
{
type_and_precision_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_NUMERIC );
}
// takes the size (in characters, not bytes) of the nvarchar field
PHP_FUNCTION(SQLSRV_SQLTYPE_NVARCHAR)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_WVARCHAR );
}
// takes the size of the varbinary field
PHP_FUNCTION(SQLSRV_SQLTYPE_VARBINARY)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_VARBINARY );
}
// takes the size of the varchar field
PHP_FUNCTION(SQLSRV_SQLTYPE_VARCHAR)
{
type_and_size_calc( INTERNAL_FUNCTION_PARAM_PASSTHRU, SQL_VARCHAR );
}
void bind_params( _Inout_ ss_sqlsrv_stmt* stmt )
{
// if there's nothing to do, just return
if( stmt->params_z == NULL ) {
return;
}
try {
stmt->executed = false;
zval* params_z = stmt->params_z;
HashTable* params_ht = Z_ARRVAL_P( params_z );
zend_ulong index = -1;
zend_string *key = NULL;
zval* param_z = NULL;
ZEND_HASH_FOREACH_KEY_VAL( params_ht, index, key, param_z ) {
// make sure it's an integer index
int type = key ? HASH_KEY_IS_STRING : HASH_KEY_IS_LONG;
CHECK_CUSTOM_ERROR(type != HASH_KEY_IS_LONG, stmt, SS_SQLSRV_ERROR_PARAM_INVALID_INDEX) {
throw ss::SSException();
}
zval* value_z = NULL;
SQLSMALLINT direction = SQL_PARAM_INPUT;
SQLSRV_ENCODING encoding = stmt->encoding();
if( stmt->encoding() == SQLSRV_ENCODING_DEFAULT ) {
encoding = stmt->conn->encoding();
}
SQLSMALLINT sql_type = SQL_UNKNOWN_TYPE;
SQLULEN column_size = SQLSRV_UNKNOWN_SIZE;
SQLSMALLINT decimal_digits = 0;
SQLSRV_PHPTYPE php_out_type = SQLSRV_PHPTYPE_INVALID;
// if it's a parameter array
if (Z_TYPE_P(param_z) == IS_ARRAY) {
try {
HashTable* param_ht = Z_ARRVAL_P(param_z);
// Check the number of elements in the array
int num_elems = zend_hash_num_elements(param_ht);
if (num_elems > 1) {
value_z = parse_param_array(stmt, param_ht, index, direction, php_out_type, encoding, sql_type, column_size, decimal_digits);
} else {
// Simply get the first variable and use the defaults
value_z = zend_hash_index_find(param_ht, 0);
if (value_z == NULL) {
THROW_SS_ERROR(stmt, SS_SQLSRV_ERROR_VAR_REQUIRED, index + 1);
}
}
} catch (core::CoreException&) {
SQLFreeStmt(stmt->handle(), SQL_RESET_PARAMS);
throw;
}
}
else {
CHECK_CUSTOM_ERROR(!stmt->prepared && stmt->conn->ce_option.enabled, stmt, SS_SQLSRV_ERROR_AE_QUERY_SQLTYPE_REQUIRED) {
throw ss::SSException();
}
value_z = param_z;
}
// If the user specifies a certain type for an output parameter, we have to convert the zval
// to that type so that when the buffer is filled, the type is correct. But first,
// should check if a LOB type is specified.
CHECK_CUSTOM_ERROR(direction != SQL_PARAM_INPUT && (sql_type == SQL_LONGVARCHAR
|| sql_type == SQL_WLONGVARCHAR || sql_type == SQL_LONGVARBINARY),
stmt, SQLSRV_ERROR_OUTPUT_PARAM_TYPES_NOT_SUPPORTED) {
throw core::CoreException();
}
// Table-valued parameters are input-only
CHECK_CUSTOM_ERROR(direction != SQL_PARAM_INPUT && (sql_type == SQL_SS_TABLE || php_out_type == SQLSRV_PHPTYPE_TABLE), stmt, SQLSRV_ERROR_TVP_INPUT_PARAM_ONLY) {
throw ss::SSException();
}
// bind the parameter
core_sqlsrv_bind_param( stmt, static_cast<SQLUSMALLINT>( index ), direction, value_z, php_out_type, encoding, sql_type, column_size,
decimal_digits );
} ZEND_HASH_FOREACH_END();
}
catch( core::CoreException& ) {
stmt->free_param_data();
SQLFreeStmt( stmt->handle(), SQL_RESET_PARAMS );
zval_ptr_dtor( stmt->params_z );
sqlsrv_free( stmt->params_z );
stmt->params_z = NULL;
throw;
}
}
// sqlsrv_cancel( resource $stmt )
//
// Cancels a statement. This means that any pending results for the statement
// are discarded. After this function is called, the statement can be
// re-executed if it was prepared with sqlsrv_prepare. Calling this function is
// not necessary if all the results associated with the statement have been
// consumed.
//
// Parameters
// $stmt: The statement to be canceled.
//
// Return Value
// A Boolean value: true if the operation was successful. Otherwise, false.
PHP_FUNCTION( sqlsrv_cancel )
{
LOG_FUNCTION( "sqlsrv_cancel" );
ss_sqlsrv_stmt* stmt = NULL;
PROCESS_PARAMS( stmt, "r", _FN_, 0 );
try {
// close the stream to release the resource
close_active_stream( stmt );
SQLRETURN r = SQLCancel( stmt->handle() );
CHECK_SQL_ERROR_OR_WARNING( r, stmt ) {
throw ss::SSException();
}
RETURN_TRUE;
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_cancel: Unknown exception caught." );
}
}
void __cdecl sqlsrv_stmt_dtor( _Inout_ zend_resource *rsrc )
{
LOG_FUNCTION( "sqlsrv_stmt_dtor" );
// get the structure
ss_sqlsrv_stmt *stmt = static_cast<ss_sqlsrv_stmt*>( rsrc->ptr );
if( stmt->conn ) {
int zr = zend_hash_index_del( static_cast<ss_sqlsrv_conn*>( stmt->conn )->stmts, stmt->conn_index );
if( zr == FAILURE ) {
LOG( SEV_ERROR, "Failed to remove statement reference from the connection" );
}
}
stmt->~ss_sqlsrv_stmt();
sqlsrv_free( stmt );
rsrc->ptr = NULL;
}
// sqlsrv_free_stmt( resource $stmt )
//
// Frees all resources associated with the specified statement. The statement
// cannot be used again after this function has been called.
//
// Parameters
// $stmt: The statement to be closed.
//
// Return Value
// The Boolean value true unless the function is called with an invalid
// parameter. If the function is called with an invalid parameter, false is
// returned.
//
// Null is a valid parameter for this function. This allows the function to be
// called multiple times in a script. For example, if you free a statement in an
// error condition and free it again at the end of the script, the second call
// to sqlsrv_free_stmt will return true because the first call to
// sqlsrv_free_stmt (in the error condition) sets the statement resource to
// null.
PHP_FUNCTION( sqlsrv_free_stmt )
{
LOG_FUNCTION( "sqlsrv_free_stmt" );
zval* stmt_r = NULL;
ss_sqlsrv_stmt* stmt = NULL;
sqlsrv_context_auto_ptr error_ctx;
reset_errors();
try {
// dummy context to pass to the error handler
error_ctx = new (sqlsrv_malloc( sizeof( sqlsrv_context ))) sqlsrv_context( 0, ss_error_handler, NULL );
error_ctx->set_func(_FN_);
// take only the statement resource
if( zend_parse_parameters( ZEND_NUM_ARGS(), "r", &stmt_r ) == FAILURE ) {
// Check if it was a zval
int zr = zend_parse_parameters( ZEND_NUM_ARGS(), "z", &stmt_r );
CHECK_CUSTOM_ERROR(( zr == FAILURE ), error_ctx, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ ) {
throw ss::SSException();
}
if( Z_TYPE_P( stmt_r ) == IS_NULL ) {
RETURN_TRUE;
}
else {
THROW_CORE_ERROR( error_ctx, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ );
}
}
// verify the resource so we know we're deleting a statement
stmt = static_cast<ss_sqlsrv_stmt*>(zend_fetch_resource_ex(stmt_r, ss_sqlsrv_stmt::resource_name, ss_sqlsrv_stmt::descriptor));
// if sqlsrv_free_stmt was called on an already closed statment then we just return success.
// zend_list_close sets the type of the closed statment to -1.
SQLSRV_ASSERT( stmt_r != NULL, "sqlsrv_free_stmt: stmt_r is null." );
if ( Z_RES_TYPE_P( stmt_r ) == RSRC_INVALID_TYPE ) {
RETURN_TRUE;
}
if( stmt == NULL ) {
THROW_CORE_ERROR( error_ctx, SS_SQLSRV_ERROR_INVALID_FUNCTION_PARAMETER, _FN_ );
}
// delete the resource from Zend's master list, which will trigger the statement's destructor
#if PHP_VERSION_ID < 80000
if (zend_list_close(Z_RES_P(stmt_r)) == FAILURE) {
LOG(SEV_ERROR, "Failed to remove stmt resource %1!d!", Z_RES_P(stmt_r)->handle);
}
#else
zend_list_close(Z_RES_P(stmt_r));
#endif
// when stmt_r is first parsed in zend_parse_parameters, stmt_r becomes a zval that points to a zend_resource with a refcount of 2
// need to DELREF here so the refcount becomes 1 and stmt_r can be appropriate destroyed by the garbage collector when it goes out of scope
// zend_list_close only destroy the resource pointed to by Z_RES_P( stmt_r ), not the zend_resource itself
Z_TRY_DELREF_P(stmt_r);
ZVAL_NULL( stmt_r );
RETURN_TRUE;
}
catch( core::CoreException& ) {
RETURN_FALSE;
}
catch( ... ) {
DIE( "sqlsrv_free_stmt: Unknown exception caught." );
}
}
void stmt_option_ss_scrollable:: operator()( _Inout_ sqlsrv_stmt* stmt, stmt_option const* /*opt*/, _In_ zval* value_z )
{
CHECK_CUSTOM_ERROR(( Z_TYPE_P( value_z ) != IS_STRING ), stmt, SQLSRV_ERROR_INVALID_OPTION_SCROLLABLE ) {
throw ss::SSException();
}
const char* scroll_type = Z_STRVAL_P( value_z );
unsigned long cursor_type = -1;
// find which cursor type they would like and set the ODBC statement attribute as such
if( !stricmp( scroll_type, SSCursorTypes::QUERY_OPTION_SCROLLABLE_STATIC )) {
cursor_type = SQL_CURSOR_STATIC;
}
else if( !stricmp( scroll_type, SSCursorTypes::QUERY_OPTION_SCROLLABLE_DYNAMIC )) {
cursor_type = SQL_CURSOR_DYNAMIC;
}
else if( !stricmp( scroll_type, SSCursorTypes::QUERY_OPTION_SCROLLABLE_KEYSET )) {
cursor_type = SQL_CURSOR_KEYSET_DRIVEN;
}
else if( !stricmp( scroll_type, SSCursorTypes::QUERY_OPTION_SCROLLABLE_FORWARD )) {
cursor_type = SQL_CURSOR_FORWARD_ONLY;
}
else if( !stricmp( scroll_type, SSCursorTypes::QUERY_OPTION_SCROLLABLE_BUFFERED )) {
cursor_type = SQLSRV_CURSOR_BUFFERED;
}
else {
THROW_SS_ERROR( stmt, SQLSRV_ERROR_INVALID_OPTION_SCROLLABLE );
}
core_sqlsrv_set_scrollable( stmt, cursor_type );
}
namespace {
void convert_to_zval( _Inout_ sqlsrv_stmt* stmt, _In_ SQLSRV_PHPTYPE sqlsrv_php_type, _In_opt_ void* in_val, _In_ SQLLEN field_len, _Inout_ zval& out_zval)
{
if ( in_val == NULL ) {
ZVAL_NULL( &out_zval);
return;
}
switch (sqlsrv_php_type) {
case SQLSRV_PHPTYPE_INT:
case SQLSRV_PHPTYPE_FLOAT:
{
if (sqlsrv_php_type == SQLSRV_PHPTYPE_INT) {
ZVAL_LONG( &out_zval, *(static_cast<int*>( in_val )));
}
else {
ZVAL_DOUBLE( &out_zval, *(static_cast<double*>( in_val )));
}
break;
}
case SQLSRV_PHPTYPE_STRING:
{
ZVAL_STRINGL( &out_zval, static_cast<const char*>( in_val ), field_len);
break;
}
case SQLSRV_PHPTYPE_STREAM:
{
out_zval = *( static_cast<zval*>( in_val ));
stmt->active_stream = out_zval;
//addref here because deleting out_zval later will decrement the refcount
Z_TRY_ADDREF( out_zval );
break;
}
case SQLSRV_PHPTYPE_DATETIME:
{
convert_datetime_string_to_zval(stmt, static_cast<char*>(in_val), field_len, out_zval);
break;
}
case SQLSRV_PHPTYPE_NULL:
ZVAL_NULL(&out_zval);
break;
default:
DIE("Unknown php type");
break;
}
return;
}
// put in the column size and scale/decimal digits of the sql server type
// these values are taken from the MSDN page at http://msdn2.microsoft.com/en-us/library/ms711786(VS.85).aspx
// for SQL_VARBINARY, SQL_VARCHAR, and SQL_WLONGVARCHAR types, see https://msdn.microsoft.com/en-CA/library/ms187993.aspx
bool determine_column_size_or_precision( sqlsrv_stmt const* stmt, _In_ sqlsrv_sqltype sqlsrv_type, _Inout_ SQLULEN* column_size,
_Out_ SQLSMALLINT* decimal_digits )
{
*decimal_digits = 0;
switch( sqlsrv_type.typeinfo.type ) {
case SQL_BIGINT:
*column_size = 19;
break;
case SQL_BIT:
*column_size = 1;
break;
case SQL_INTEGER:
*column_size = 10;
break;
case SQL_SMALLINT:
*column_size = 5;
break;
case SQL_TINYINT:
*column_size = 3;
break;
case SQL_GUID:
*column_size = 36;
break;
case SQL_FLOAT:
*column_size = 53;
break;
case SQL_REAL:
*column_size = 24;
break;
case SQL_LONGVARBINARY:
case SQL_LONGVARCHAR:
*column_size = INT_MAX;
break;
case SQL_WLONGVARCHAR:
*column_size = INT_MAX >> 1;
break;
case SQL_SS_XML:
case SQL_SS_TABLE:
*column_size = SQL_SS_LENGTH_UNLIMITED;
break;
case SQL_BINARY:
case SQL_CHAR:
case SQL_VARBINARY:
case SQL_VARCHAR:
case SQL_SS_VARIANT:
*column_size = sqlsrv_type.typeinfo.size;
if( *column_size == SQLSRV_SIZE_MAX_TYPE ) {
*column_size = SQL_SS_LENGTH_UNLIMITED;
}
else if( *column_size > SQL_SERVER_MAX_FIELD_SIZE || *column_size == SQLSRV_INVALID_SIZE ) {
*column_size = SQLSRV_INVALID_SIZE;
return false;
}
break;
case SQL_WCHAR:
case SQL_WVARCHAR:
*column_size = sqlsrv_type.typeinfo.size;
if( *column_size == SQLSRV_SIZE_MAX_TYPE ) {
*column_size = SQL_SS_LENGTH_UNLIMITED;
break;
}
if( *column_size > SQL_SERVER_MAX_FIELD_SIZE || *column_size == SQLSRV_INVALID_SIZE ) {
*column_size = SQLSRV_INVALID_SIZE;
return false;
}
break;
case SQL_DECIMAL:
case SQL_NUMERIC:
*column_size = sqlsrv_type.typeinfo.size;
*decimal_digits = sqlsrv_type.typeinfo.scale;
// if there was something wrong with the values given on type_and_precision_calc, these are set to invalid precision
if( *column_size == SQLSRV_INVALID_PRECISION || *decimal_digits == SQLSRV_INVALID_PRECISION ) {
*column_size = SQLSRV_INVALID_SIZE;
return false;
}
break;
// this can represent one of three data types: smalldatetime, datetime, and datetime2
// we present the largest for the version and let SQL Server downsize it
case SQL_TYPE_TIMESTAMP:
*column_size = sqlsrv_type.typeinfo.size;
*decimal_digits = sqlsrv_type.typeinfo.scale;
break;
case SQL_SS_TIMESTAMPOFFSET:
*column_size = 34;
*decimal_digits = 7;
break;
case SQL_TYPE_DATE:
*column_size = 10;
*decimal_digits = 0;
break;
case SQL_SS_TIME2:
*column_size = 16;
*decimal_digits = 7;
break;
default:
// an invalid sql type should have already been dealt with, so we assert here.
DIE( "Trying to determine column size for an invalid type. Type should have already been verified." );
return false;
}
return true;
}
// given a SQL Server type, return a sqlsrv php type
sqlsrv_phptype determine_sqlsrv_php_type( _In_ ss_sqlsrv_stmt const* stmt, _In_ SQLINTEGER sql_type, _In_ SQLUINTEGER size, _In_ bool prefer_string )
{
sqlsrv_phptype sqlsrv_phptype;
sqlsrv_phptype.typeinfo.type = PHPTYPE_INVALID;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_INVALID;
switch( sql_type ) {
case SQL_BIGINT:
case SQL_DECIMAL:
case SQL_NUMERIC:
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_CHAR;
break;
case SQL_CHAR:
case SQL_GUID:
case SQL_WCHAR:
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
break;
case SQL_VARCHAR:
case SQL_WVARCHAR:
case SQL_SS_VARIANT:
if( prefer_string || size != SQL_SS_LENGTH_UNLIMITED ) {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
}
else {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STREAM;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
}
break;
case SQL_BIT:
case SQL_INTEGER:
case SQL_SMALLINT:
case SQL_TINYINT:
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_INT;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_CHAR;
break;
case SQL_BINARY:
case SQL_LONGVARBINARY:
case SQL_VARBINARY:
case SQL_SS_UDT:
if( prefer_string ) {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_BINARY;
}
else {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STREAM;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_BINARY;
}
break;
case SQL_LONGVARCHAR:
case SQL_WLONGVARCHAR:
case SQL_SS_XML:
if( prefer_string ) {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
}
else {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STREAM;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
}
break;
case SQL_FLOAT:
case SQL_REAL:
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_FLOAT;
sqlsrv_phptype.typeinfo.encoding = SQLSRV_ENCODING_CHAR;
break;
case SQL_TYPE_DATE:
case SQL_SS_TIMESTAMPOFFSET:
case SQL_SS_TIME2:
case SQL_TYPE_TIMESTAMP:
{
if (stmt->date_as_string) {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_STRING;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
}
else {
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_DATETIME;
}
break;
}
case SQL_SS_TABLE:
sqlsrv_phptype.typeinfo.type = SQLSRV_PHPTYPE_TABLE;
sqlsrv_phptype.typeinfo.encoding = stmt->encoding();
break;
default:
sqlsrv_phptype.typeinfo.type = PHPTYPE_INVALID;
SQLSRV_ASSERT(false, "An invalid php type was returned with (supposedly) validated sql type and column_size");
break;
}
// if an encoding hasn't been set for the statement, then use the connection's encoding
if( sqlsrv_phptype.typeinfo.encoding == SQLSRV_ENCODING_DEFAULT ) {
sqlsrv_phptype.typeinfo.encoding = stmt->conn->encoding();
}
return sqlsrv_phptype;
}
// determine if a query returned any rows of data. It does this by actually fetching the first row
// (though not retrieving the data) and setting the has_rows flag in the stmt the fetch was successful.
// The return value simply states whether or not if an error occurred during the determination.
// (All errors are posted here before returning.)
void determine_stmt_has_rows( _Inout_ ss_sqlsrv_stmt* stmt )
{
SQLRETURN r = SQL_SUCCESS;
if( stmt->fetch_called ) {
return;
}
// default condition
stmt->has_rows = false;
// if there are no columns then there are no rows
if( core::SQLNumResultCols( stmt ) == 0 ) {
return;
}
// if the statement is scrollable, our work is easier though less performant. We simply
// fetch the first row, and then roll the cursor back to be prior to the first row
if( stmt->cursor_type != SQL_CURSOR_FORWARD_ONLY ) {
r = stmt->current_results->fetch( SQL_FETCH_FIRST, 0 );
if( SQL_SUCCEEDED( r )) {
stmt->has_rows = true;
CHECK_SQL_WARNING( r, stmt );
// restore the cursor to its original position.
r = stmt->current_results->fetch( SQL_FETCH_ABSOLUTE, 0 );
SQLSRV_ASSERT(( r == SQL_NO_DATA ), "core_sqlsrv_has_rows: Should have scrolled the cursor to the beginning "
"of the result set." );
}
}
else {
// otherwise, we fetch the first row, but record that we did. sqlsrv_fetch checks this
// flag and simply skips the first fetch, knowing it was already done. It records its own
// flags to know if it should fetch on subsequent calls.
r = core::SQLFetchScroll( stmt, SQL_FETCH_NEXT, 0 );
if( SQL_SUCCEEDED( r )) {
stmt->has_rows = true;
CHECK_SQL_WARNING( r, stmt );
return;
}
}
}
SQLSMALLINT get_resultset_meta_data(_Inout_ sqlsrv_stmt * stmt)
{
// get the numer of columns in the result set
SQLSMALLINT num_cols = -1;
num_cols = stmt->current_meta_data.size();
bool getMetaData = false;
if (num_cols == 0) {
getMetaData = true;
if (stmt->column_count == ACTIVE_NUM_COLS_INVALID) {
num_cols = core::SQLNumResultCols(stmt);
stmt->column_count = num_cols;
} else {
num_cols = stmt->column_count;
}
}
try {
if (getMetaData) {
for (int i = 0; i < num_cols; i++) {
sqlsrv_malloc_auto_ptr<field_meta_data> core_meta_data;
core_meta_data = core_sqlsrv_field_metadata(stmt, i);
stmt->current_meta_data.push_back(core_meta_data.get());
core_meta_data.transferred();
}
}
} catch( core::CoreException& ) {
throw;
}
SQLSRV_ASSERT(stmt->current_meta_data.size() == num_cols, "Meta data vector out of sync" );
return num_cols;
}
void fetch_fields_common( _Inout_ ss_sqlsrv_stmt* stmt, _In_ zend_long fetch_type, _Out_ zval& fields, _In_ bool allow_empty_field_names )
{
void* field_value = NULL;
sqlsrv_phptype sqlsrv_php_type;
sqlsrv_php_type.typeinfo.type = SQLSRV_PHPTYPE_INVALID;
SQLSRV_PHPTYPE sqlsrv_php_type_out = SQLSRV_PHPTYPE_INVALID;
// make sure that the fetch type is legal
CHECK_CUSTOM_ERROR((fetch_type < MIN_SQLSRV_FETCH || fetch_type > MAX_SQLSRV_FETCH), stmt, SS_SQLSRV_ERROR_INVALID_FETCH_TYPE, stmt->func()) {
throw ss::SSException();
}
// get the numer of columns in the result set and its metadata if not exists
SQLSMALLINT num_cols = get_resultset_meta_data(stmt);
// if this is the first fetch in a new result set, then get the field names and
// store them off for successive fetches.
if ((fetch_type & SQLSRV_FETCH_ASSOC) && stmt->fetch_field_names == NULL) {
SQLLEN field_name_len = 0;
sqlsrv_malloc_auto_ptr<sqlsrv_fetch_field_name> field_names;
field_names = static_cast<sqlsrv_fetch_field_name*>(sqlsrv_malloc(num_cols * sizeof(sqlsrv_fetch_field_name)));
for (int i = 0; i < num_cols; ++i) {
// The meta data field name is already null-terminated, and the field name len is correct.
field_name_len = stmt->current_meta_data[i]->field_name_len;
field_names[i].name = static_cast<char*>(sqlsrv_malloc(field_name_len, sizeof(char), 1));
memcpy_s((void*)field_names[i].name, (field_name_len * sizeof(char)), (void*)stmt->current_meta_data[i]->field_name, field_name_len);
field_names[i].name[field_name_len] = '\0'; // null terminate the field name after the memcpy
field_names[i].len = field_name_len; // field_name_len should not need to include the null char
}
stmt->fetch_field_names = field_names;
stmt->fetch_fields_count = num_cols;
field_names.transferred();
}
int zr = SUCCESS;
array_init(&fields);
for( int i = 0; i < num_cols; ++i ) {
SQLLEN field_len = -1;
core_sqlsrv_get_field( stmt, i, sqlsrv_php_type, true /*prefer string*/,
field_value, &field_len, false /*cache_field*/, &sqlsrv_php_type_out );
zval field;
ZVAL_UNDEF( &field );
convert_to_zval( stmt, sqlsrv_php_type_out, field_value, field_len, field );
sqlsrv_free( field_value );
if( fetch_type & SQLSRV_FETCH_NUMERIC ) {
zr = add_next_index_zval( &fields, &field );
CHECK_ZEND_ERROR( zr, stmt, SQLSRV_ERROR_ZEND_HASH ) {
throw ss::SSException();
}
}
if( fetch_type & SQLSRV_FETCH_ASSOC ) {
CHECK_CUSTOM_WARNING_AS_ERROR(( stmt->fetch_field_names[i].len == 0 && !allow_empty_field_names ), stmt,
SS_SQLSRV_WARNING_FIELD_NAME_EMPTY) {
throw ss::SSException();
}
if( stmt->fetch_field_names[i].len > 0 || allow_empty_field_names ) {
add_assoc_zval(&fields, stmt->fetch_field_names[i].name, &field);
}
}
//only addref when the fetch_type is BOTH because this is the only case when fields(hashtable)
//has 2 elements pointing to field. Do not addref if the type is NUMERIC or ASSOC because
//fields now only has 1 element pointing to field and we want the ref count to be only 1
if (fetch_type == SQLSRV_FETCH_BOTH) {
Z_TRY_ADDREF(field);
}
} //for loop
}
zval* parse_param_array(_Inout_ ss_sqlsrv_stmt* stmt, _Inout_ HashTable* param_ht, zend_ulong index, _Out_ SQLSMALLINT& direction,
_Out_ SQLSRV_PHPTYPE& php_out_type, _Out_ SQLSRV_ENCODING& encoding, _Out_ SQLSMALLINT& sql_type,
_Out_ SQLULEN& column_size, _Out_ SQLSMALLINT& decimal_digits)
{
zval* var_or_val = zend_hash_index_find(param_ht, 0);
bool php_type_param_is_null = true;
bool sql_type_param_is_null = true;
// Assumption: there are more than only the variable, parse the rest of the array
zval* dir = zend_hash_index_find(param_ht, 1);
if (Z_TYPE_P(dir) != IS_NULL) {
// if param direction is specified, make sure it's valid
CHECK_CUSTOM_ERROR(Z_TYPE_P(dir) != IS_LONG, stmt, SS_SQLSRV_ERROR_INVALID_PARAMETER_DIRECTION, index + 1) {
throw ss::SSException();
}
direction = static_cast<SQLSMALLINT>(Z_LVAL_P(dir));
CHECK_CUSTOM_ERROR(direction != SQL_PARAM_INPUT && direction != SQL_PARAM_OUTPUT && direction != SQL_PARAM_INPUT_OUTPUT,
stmt, SS_SQLSRV_ERROR_INVALID_PARAMETER_DIRECTION, index + 1) {
throw ss::SSException();
}
CHECK_CUSTOM_ERROR(direction != SQL_PARAM_INPUT && !Z_ISREF_P(var_or_val), stmt, SS_SQLSRV_ERROR_PARAM_VAR_NOT_REF, index + 1) {
throw ss::SSException();
}
}
// Check if the user provides php type or sql type or both
zval* phptype_z = zend_hash_index_find(param_ht, 2);
zval* sqltype_z = zend_hash_index_find(param_ht, 3);
php_type_param_is_null = (phptype_z == NULL || Z_TYPE_P(phptype_z) == IS_NULL);
sql_type_param_is_null = (sqltype_z == NULL || Z_TYPE_P(sqltype_z) == IS_NULL);
if (php_type_param_is_null) {
// so set default for php type based on the variable
if (Z_ISREF_P(var_or_val)) {
php_out_type = zend_to_sqlsrv_phptype[Z_TYPE_P(Z_REFVAL_P(var_or_val))];
} else {
php_out_type = zend_to_sqlsrv_phptype[Z_TYPE_P(var_or_val)];
}
} else {
CHECK_CUSTOM_ERROR(Z_TYPE_P(phptype_z) != IS_LONG, stmt, SQLSRV_ERROR_INVALID_PARAMETER_PHPTYPE, index + 1) {
throw ss::SSException();
}
sqlsrv_phptype srv_phptype;
srv_phptype.value = Z_LVAL_P(phptype_z);
CHECK_CUSTOM_ERROR(!is_valid_sqlsrv_phptype(srv_phptype), stmt, SQLSRV_ERROR_INVALID_PARAMETER_PHPTYPE, index + 1) {
throw ss::SSException();
}
php_out_type = static_cast<SQLSRV_PHPTYPE>(srv_phptype.typeinfo.type);
encoding = (SQLSRV_ENCODING)srv_phptype.typeinfo.encoding;
// if the call has a SQLSRV_PHPTYPE_STRING/STREAM('default'), then the stream is in the encoding established
// by the connection
if (encoding == SQLSRV_ENCODING_DEFAULT) {
encoding = stmt->conn->encoding();
}
}
if (sql_type_param_is_null) {
// the sql type is not specified, which is required for always encrypted for non-prepared statements
CHECK_CUSTOM_ERROR(stmt->conn->ce_option.enabled && !stmt->prepared, stmt, SS_SQLSRV_ERROR_AE_QUERY_SQLTYPE_REQUIRED) {
throw ss::SSException();
}
} else {
CHECK_CUSTOM_ERROR(Z_TYPE_P(sqltype_z) != IS_LONG, stmt, SQLSRV_ERROR_INVALID_PARAMETER_SQLTYPE, index + 1) {
throw ss::SSException();
}
// since the user supplied this type, make sure it's valid
sqlsrv_sqltype sqlsrv_sql_type;
sqlsrv_sql_type.value = Z_LVAL_P(sqltype_z);
CHECK_CUSTOM_ERROR(!is_valid_sqlsrv_sqltype(sqlsrv_sql_type), stmt, SQLSRV_ERROR_INVALID_PARAMETER_SQLTYPE, index + 1) {
throw ss::SSException();
}
bool size_okay = determine_column_size_or_precision(stmt, sqlsrv_sql_type, &column_size, &decimal_digits);
CHECK_CUSTOM_ERROR(!size_okay, stmt, SS_SQLSRV_ERROR_INVALID_PARAMETER_PRECISION, index + 1) {
throw ss::SSException();
}
sql_type = sqlsrv_sql_type.typeinfo.type;
if (direction != SQL_PARAM_INPUT && php_type_param_is_null) {
sqlsrv_phptype srv_phptype;
srv_phptype = determine_sqlsrv_php_type(stmt, sql_type, (SQLUINTEGER)column_size, true);
php_out_type = static_cast<SQLSRV_PHPTYPE>(srv_phptype.typeinfo.type);
encoding = static_cast<SQLSRV_ENCODING>(srv_phptype.typeinfo.encoding);
}
}
if (direction == SQL_PARAM_OUTPUT) {
if (php_out_type == SQLSRV_PHPTYPE_NULL || php_out_type == SQLSRV_PHPTYPE_DATETIME || php_out_type == SQLSRV_PHPTYPE_STREAM) {
THROW_CORE_ERROR(stmt, SS_SQLSRV_ERROR_INVALID_OUTPUT_PARAM_TYPE);
}
}
return var_or_val;
}
bool is_valid_sqlsrv_phptype( _In_ sqlsrv_phptype type )
{
switch( type.typeinfo.type ) {
case SQLSRV_PHPTYPE_NULL:
case SQLSRV_PHPTYPE_INT:
case SQLSRV_PHPTYPE_FLOAT:
case SQLSRV_PHPTYPE_DATETIME:
case SQLSRV_PHPTYPE_TABLE:
return true;
case SQLSRV_PHPTYPE_STRING:
case SQLSRV_PHPTYPE_STREAM:
{
if( type.typeinfo.encoding == SQLSRV_ENCODING_BINARY || type.typeinfo.encoding == SQLSRV_ENCODING_CHAR
|| type.typeinfo.encoding == CP_UTF8 || type.typeinfo.encoding == SQLSRV_ENCODING_DEFAULT ) {
return true;
}
break;
}
}
return false;
}
// return if the type is a valid sql server type not including
// size, precision or scale. Use determine_precision_and_scale for that.
bool is_valid_sqlsrv_sqltype( _In_ sqlsrv_sqltype sql_type )
{
switch( sql_type.typeinfo.type ) {
case SQL_BIGINT:
case SQL_BIT:
case SQL_INTEGER:
case SQL_SMALLINT:
case SQL_TINYINT:
case SQL_GUID:
case SQL_FLOAT:
case SQL_REAL:
case SQL_LONGVARBINARY:
case SQL_LONGVARCHAR:
case SQL_WLONGVARCHAR:
case SQL_SS_XML:
case SQL_BINARY:
case SQL_CHAR:
case SQL_WCHAR:
case SQL_WVARCHAR:
case SQL_VARBINARY:
case SQL_VARCHAR:
case SQL_DECIMAL:
case SQL_NUMERIC:
case SQL_TYPE_TIMESTAMP:
case SQL_TYPE_DATE:
case SQL_SS_TIME2:
case SQL_SS_TIMESTAMPOFFSET:
case SQL_SS_TABLE:
break;
default:
return false;
}
return true;
}
// verify an encoding given to type_and_encoding by looking through the list
// of standard encodings created at module initialization time
bool verify_and_set_encoding( _In_ const char* encoding_string, _Inout_ sqlsrv_phptype& phptype_encoding )
{
void* encoding_temp = NULL;
zend_ulong index = -1;
zend_string* key = NULL;
ZEND_HASH_FOREACH_KEY_PTR( g_ss_encodings_ht, index, key, encoding_temp ) {
if (encoding_temp) {
sqlsrv_encoding* encoding = reinterpret_cast<sqlsrv_encoding*>(encoding_temp);
encoding_temp = NULL;
if (!stricmp(encoding_string, encoding->iana)) {
phptype_encoding.typeinfo.encoding = encoding->code_page;
return true;
}
}
else {
DIE("Fatal: Error retrieving encoding from encoding hash table.");
}
} ZEND_HASH_FOREACH_END();
return false;
}
// called when one of the SQLSRV_SQLTYPE type functions is called. Encodes the type and size
// into a sqlsrv_sqltype bit fields (see php_sqlsrv.h).
void type_and_size_calc( INTERNAL_FUNCTION_PARAMETERS, _In_ int type )
{
char* size_p = NULL;
size_t size_len = 0;
int size = 0;
if( zend_parse_parameters( ZEND_NUM_ARGS(), "s", &size_p, &size_len ) == FAILURE ) {
return;
}
if (size_p) {
if (!strnicmp("max", size_p, sizeof("max") / sizeof(char))) {
size = SQLSRV_SIZE_MAX_TYPE;
}
else {
#ifndef _WIN32
errno = 0;
#else
_set_errno(0); // reset errno for atol
#endif // !_WIN32
size = atol(size_p);
if (errno != 0) {
size = SQLSRV_INVALID_SIZE;
}
}
}
else {
DIE("type_and_size_calc: size_p is null.");
}
int max_size = SQL_SERVER_MAX_FIELD_SIZE;
// size is actually the number of characters, not the number of bytes, so if they ask for a
// 2 byte per character type, then we half the maximum size allowed.
if( type == SQL_WVARCHAR || type == SQL_WCHAR ) {
max_size >>= 1;
}
if( size > max_size || size < SQLSRV_SIZE_MAX_TYPE || size == 0 ) {
LOG( SEV_ERROR, "invalid size. size must be > 0 and <= %1!d! characters or 'max'", max_size );
size = SQLSRV_INVALID_SIZE;
}
sqlsrv_sqltype sql_type;
sql_type.typeinfo.type = type;
sql_type.typeinfo.size = size;
sql_type.typeinfo.scale = SQLSRV_INVALID_SCALE;
ZVAL_LONG( return_value, sql_type.value );
}
// called when the user gives SQLSRV_SQLTYPE_DECIMAL or SQLSRV_SQLTYPE_NUMERIC sql types as the type of the
// field. encodes these into a sqlsrv_sqltype structure (see php_sqlsrv.h)
void type_and_precision_calc( INTERNAL_FUNCTION_PARAMETERS, _In_ int type )
{
zend_long prec = SQLSRV_INVALID_PRECISION;
zend_long scale = SQLSRV_INVALID_SCALE;
if( zend_parse_parameters( ZEND_NUM_ARGS(), "|ll", &prec, &scale ) == FAILURE ) {
return;
}
if( prec > SQL_SERVER_MAX_PRECISION ) {
LOG( SEV_ERROR, "Invalid precision. Precision can't be > 38" );
prec = SQLSRV_INVALID_PRECISION;
}
if( prec < 0 ) {
LOG( SEV_ERROR, "Invalid precision. Precision can't be negative" );
prec = SQLSRV_INVALID_PRECISION;
}
if( scale > prec ) {
LOG( SEV_ERROR, "Invalid scale. Scale can't be > precision" );
scale = SQLSRV_INVALID_SCALE;
}
sqlsrv_sqltype sql_type;
sql_type.typeinfo.type = type;
sql_type.typeinfo.size = prec;
sql_type.typeinfo.scale = scale;
ZVAL_LONG( return_value, sql_type.value );
}
// common code for SQLSRV_PHPTYPE_STREAM and SQLSRV_PHPTYPE_STRING php types given as parameters.
// encodes the type and encoding into a sqlsrv_phptype structure (see php_sqlsrv.h)
void type_and_encoding( INTERNAL_FUNCTION_PARAMETERS, _In_ int type )
{
SQLSRV_ASSERT(( type == SQLSRV_PHPTYPE_STREAM || type == SQLSRV_PHPTYPE_STRING ), "type_and_encoding: Invalid type passed." );
char* encoding_param;
size_t encoding_param_len = 0;
// set the default encoding values to invalid so that
// if the encoding isn't validated, it will return the invalid setting.
sqlsrv_phptype sqlsrv_php_type;
sqlsrv_php_type.typeinfo.type = type;
sqlsrv_php_type.typeinfo.encoding = SQLSRV_ENCODING_INVALID;
if( zend_parse_parameters( ZEND_NUM_ARGS(), "s", &encoding_param, &encoding_param_len ) == FAILURE ) {
ZVAL_LONG( return_value, sqlsrv_php_type.value );
}
if( !verify_and_set_encoding( encoding_param, sqlsrv_php_type )) {
LOG( SEV_ERROR, "Invalid encoding for php type." );
}
ZVAL_LONG( return_value, sqlsrv_php_type.value );
}
}
| 35,434 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/079/07902121.json
{"nom":"Fomperron","circ":"2ème circonscription","dpt":"Deux-Sèvres","inscrits":290,"abs":153,"votants":137,"blancs":0,"nuls":11,"exp":126,"res":[{"nuance":"SOC","nom":"<NAME>","voix":63},{"nuance":"REM","nom":"<NAME>","voix":63}]} | 135 |
1,094 | /***********************************************************************************************************************************
Fork Handler
***********************************************************************************************************************************/
#ifndef COMMON_FORK_H
#define COMMON_FORK_H
/***********************************************************************************************************************************
Functions
***********************************************************************************************************************************/
// Fork a new process and throw an error if it fails
int forkSafe(void);
// Detach a forked process so it can continue running after the parent process has exited. This is not a typical daemon startup
// because the parent process may continue to run and perform work for some time.
void forkDetach(void);
#endif
| 135 |
1,163 | <gh_stars>1000+
// Copyright 2020 The IREE Authors
//
// Licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef IREE_BINDINGS_TFLITE_SHIM_H_
#define IREE_BINDINGS_TFLITE_SHIM_H_
#include "iree/base/api.h"
#include "iree/hal/api.h"
#include "iree/vm/api.h"
// NOTE: we pull in our own copy here in case the tflite API changes upstream.
#define TFL_COMPILE_LIBRARY 1
#include "bindings/tflite/include/tensorflow/lite/c/c_api.h"
#include "bindings/tflite/include/tensorflow/lite/c/c_api_experimental.h"
TfLiteStatus _TfLiteStatusFromIREEStatus(iree_status_t status);
#endif // IREE_BINDINGS_TFLITE_SHIM_H_
| 297 |
471 | package com.dtflys.forest.backend.okhttp3.body;
import com.dtflys.forest.backend.ContentType;
import com.dtflys.forest.backend.body.AbstractBodyBuilder;
import com.dtflys.forest.converter.json.ForestJsonConverter;
import com.dtflys.forest.exceptions.ForestRuntimeException;
import com.dtflys.forest.handler.LifeCycleHandler;
import com.dtflys.forest.http.ForestRequest;
import com.dtflys.forest.http.ForestRequestBody;
import com.dtflys.forest.http.body.NameValueRequestBody;
import com.dtflys.forest.http.body.ObjectRequestBody;
import com.dtflys.forest.mapping.MappingTemplate;
import com.dtflys.forest.multipart.ForestMultipart;
import com.dtflys.forest.utils.ReflectUtils;
import com.dtflys.forest.utils.RequestNameValue;
import com.dtflys.forest.utils.StringUtils;
import okhttp3.*;
import java.net.URLConnection;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
/**
* @author gongjun[<EMAIL>]
* @since 2018-02-27 18:18
*/
public class OkHttp3BodyBuilder extends AbstractBodyBuilder<Request.Builder> {
@Override
protected void setStringBody(Request.Builder builder, ForestRequest request, String text, Charset charset, String contentType, boolean mergeCharset) {
MediaType mediaType = MediaType.parse(contentType);
Charset cs = StandardCharsets.UTF_8;
if (charset != null) {
cs = charset;
}
if (contentType != null) {
if (mediaType == null) {
throw new ForestRuntimeException("[Forest] '" + contentType + "' is not a valid content type");
}
Charset mtcs = mediaType.charset();
if (mtcs == null) {
if (charset != null && mergeCharset) {
mediaType = MediaType.parse(contentType + "; charset=" + charset.name().toLowerCase());
}
}
}
byte[] bytes = text.getBytes(cs);
RequestBody body = RequestBody.create(mediaType, bytes);
builder.method(request.getType().getName(), body);
}
private void addMultipart(MultipartBody.Builder bodyBuilder,
String name, Object value, String contentType,
Charset charset, ForestJsonConverter jsonConverter) {
if (StringUtils.isEmpty(contentType)) {
contentType = "text/plain";
}
MediaType partMediaType = MediaType.parse(contentType);
if (partMediaType.charset() == null) {
partMediaType.charset(charset);
}
RequestBody requestBody = RequestBody.create(partMediaType, MappingTemplate.getParameterValue(jsonConverter, value));
MultipartBody.Part part = MultipartBody.Part.createFormData(name, null, requestBody);
bodyBuilder.addPart(part);
}
@Override
protected void setFileBody(Request.Builder builder,
ForestRequest request,
Charset charset, String contentType,
LifeCycleHandler lifeCycleHandler) {
String boundary = request.getBoundary();
MultipartBody.Builder bodyBuilder = null;
if (StringUtils.isNotEmpty(boundary)) {
bodyBuilder = new MultipartBody.Builder(boundary);
} else {
bodyBuilder = new MultipartBody.Builder();
}
ContentType objContentType = new ContentType(contentType);
MediaType mediaType = MediaType.parse(objContentType.toStringWithoutParameters());
if ("multipart".equals(mediaType.type())) {
bodyBuilder.setType(mediaType);
}
ForestJsonConverter jsonConverter = request.getConfiguration().getJsonConverter();
List<ForestMultipart> multiparts = request.getMultiparts();
for (ForestRequestBody item : request.body()) {
if (item instanceof NameValueRequestBody) {
NameValueRequestBody nameValueItem = (NameValueRequestBody) item;
String name = nameValueItem.getName();
Object value = nameValueItem.getValue();
String partContentType = nameValueItem.getContentType();
addMultipart(bodyBuilder, name, value, partContentType, charset, jsonConverter);
} else if (item instanceof ObjectRequestBody) {
Object obj = ((ObjectRequestBody) item).getObject();
if (obj == null) {
continue;
}
Map<String, Object> attrs = jsonConverter.convertObjectToMap(obj);
for (Map.Entry<String, Object> entry : attrs.entrySet()) {
String name = entry.getKey();
Object value = entry.getValue();
addMultipart(bodyBuilder, name, value, null, charset, jsonConverter);
}
}
}
for (ForestMultipart multipart : multiparts) {
RequestBody fileBody = createFileBody(request, multipart, charset, lifeCycleHandler);
bodyBuilder.addFormDataPart(multipart.getName(), multipart.getOriginalFileName(), fileBody);
}
MultipartBody body = bodyBuilder.build();
builder.method(request.getType().getName(), body);
}
private RequestBody createFileBody(ForestRequest request, ForestMultipart multipart, Charset charset, LifeCycleHandler lifeCycleHandler) {
RequestBody wrappedBody, requestBody;
String partContentType = multipart.getContentType();
MediaType fileMediaType = null;
if (StringUtils.isNotEmpty(partContentType)) {
fileMediaType = MediaType.parse(partContentType);
}
if (fileMediaType == null) {
String mimeType = URLConnection.guessContentTypeFromName(multipart.getOriginalFileName());
if (mimeType == null) {
// guess this is a video uploading
fileMediaType = MediaType.parse(com.dtflys.forest.backend.ContentType.MULTIPART_FORM_DATA);
} else {
fileMediaType = MediaType.parse(mimeType);
}
}
if (fileMediaType.charset() == null) {
fileMediaType.charset(charset);
}
if (multipart.isFile()) {
requestBody = RequestBody.create(fileMediaType, multipart.getFile());
} else {
requestBody = RequestBody.create(fileMediaType, multipart.getBytes());
}
wrappedBody = new OkHttpMultipartBody(request, requestBody, lifeCycleHandler);
return wrappedBody;
}
@Override
protected void setBinaryBody(
Request.Builder builder,
ForestRequest request,
Charset charset,
String contentType,
byte[] bytes,
boolean mergeCharset) {
if (StringUtils.isBlank(contentType)) {
contentType = ContentType.APPLICATION_OCTET_STREAM;
}
MediaType mediaType = MediaType.parse(contentType);
Charset mtcs = mediaType.charset();
if (mtcs == null) {
if (charset != null && mergeCharset) {
mediaType = MediaType.parse(contentType + "; charset=" + charset.name().toLowerCase());
}
}
RequestBody body = RequestBody.create(mediaType, bytes);
builder.method(request.getType().getName(), body);
}
}
| 3,178 |
310 | <gh_stars>100-1000
/*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jitsi.android.gui.call;
import android.content.*;
import android.os.*;
import android.view.*;
import org.jitsi.*;
import org.jitsi.android.*;
import org.jitsi.android.gui.util.*;
import org.jitsi.service.osgi.*;
/**
* Fragment displayed in <tt>VideoCallActivity</tt> when the call has ended.
*
* @author <NAME>
*/
public class CallEnded
extends OSGiFragment
{
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState)
{
View v = inflater.inflate(R.layout.call_ended, container, false);
ViewUtil.setTextViewValue(v, R.id.callTime,
VideoCallActivity.callState.callDuration);
String errorReason = VideoCallActivity.callState.errorReason;
if(!errorReason.isEmpty())
{
ViewUtil.setTextViewValue(v, R.id.callErrorReason, errorReason);
}
else
{
ViewUtil.ensureVisible(v, R.id.callErrorReason, false);
}
v.findViewById(R.id.callHangupButton)
.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
Context ctx = getActivity();
getActivity().finish();
ctx.startActivity(JitsiApplication.getHomeIntent());
}
});
return v;
}
}
| 898 |
406 | <filename>gallery/media_player.py
"""
Author: <NAME>
Modified: 2021-12-11
Adapted from: https://github.com/israel-dryer/Mini-VLC-Player
"""
from pathlib import Path
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap.icons import Emoji
class MediaPlayer(ttk.Frame):
def __init__(self, master):
super().__init__(master)
self.pack(fill=BOTH, expand=YES)
self.hdr_var = ttk.StringVar()
self.elapsed_var = ttk.DoubleVar(value=0)
self.remain_var = ttk.DoubleVar(value=190)
self.create_header()
self.create_media_window()
self.create_progress_meter()
self.create_buttonbox()
def create_header(self):
"""The application header to display user messages"""
self.hdr_var.set("Open a file to begin playback")
lbl = ttk.Label(
master=self,
textvariable=self.hdr_var,
bootstyle=(LIGHT, INVERSE),
padding=10
)
lbl.pack(fill=X, expand=YES)
def create_media_window(self):
"""Create frame to contain media"""
img_path = Path(__file__).parent / 'assets/mp_background.png'
self.demo_media = ttk.PhotoImage(file=img_path)
self.media = ttk.Label(self, image=self.demo_media)
self.media.pack(fill=BOTH, expand=YES)
def create_progress_meter(self):
"""Create frame with progress meter with lables"""
container = ttk.Frame(self)
container.pack(fill=X, expand=YES, pady=10)
self.elapse = ttk.Label(container, text='00:00')
self.elapse.pack(side=LEFT, padx=10)
self.scale = ttk.Scale(
master=container,
command=self.on_progress,
bootstyle=SECONDARY
)
self.scale.pack(side=LEFT, fill=X, expand=YES)
self.remain = ttk.Label(container, text='03:10')
self.remain.pack(side=LEFT, fill=X, padx=10)
def create_buttonbox(self):
"""Create buttonbox with media controls"""
container = ttk.Frame(self)
container.pack(fill=X, expand=YES)
ttk.Style().configure('TButton', font="-size 14")
rev_btn = ttk.Button(
master=container,
text=Emoji.get('black left-pointing double triangle with vertical bar'),
padding=10,
)
rev_btn.pack(side=LEFT, fill=X, expand=YES)
play_btn = ttk.Button(
master=container,
text=Emoji.get('black right-pointing triangle'),
padding=10,
)
play_btn.pack(side=LEFT, fill=X, expand=YES)
fwd_btn = ttk.Button(
master=container,
text=Emoji.get('black right-pointing double triangle with vertical bar'),
padding=10,
)
fwd_btn.pack(side=LEFT, fill=X, expand=YES)
pause_btn = ttk.Button(
master=container,
text=Emoji.get('double vertical bar'),
padding=10,
)
pause_btn.pack(side=LEFT, fill=X, expand=YES)
stop_btn = ttk.Button(
master=container,
text=Emoji.get('black square for stop'),
padding=10,
)
stop_btn.pack(side=LEFT, fill=X, expand=YES)
stop_btn = ttk.Button(
master=container,
text=Emoji.get('open file folder'),
bootstyle=SECONDARY,
padding=10
)
stop_btn.pack(side=LEFT, fill=X, expand=YES)
def on_progress(self, val: float):
"""Update progress labels when the scale is updated."""
elapsed = self.elapsed_var.get()
remaining = self.remain_var.get()
total = int(elapsed + remaining)
elapse = int(float(val) * total)
elapse_min = elapse // 60
elapse_sec = elapse % 60
remain_tot = total - elapse
remain_min = remain_tot // 60
remain_sec = remain_tot % 60
self.elapsed_var.set(elapse)
self.remain_var.set(remain_tot)
self.elapse.configure(text=f'{elapse_min:02d}:{elapse_sec:02d}')
self.remain.configure(text=f'{remain_min:02d}:{remain_sec:02d}')
if __name__ == '__main__':
app = ttk.Window("Media Player", "yeti")
mp = MediaPlayer(app)
mp.scale.set(0.35) # set default
app.mainloop() | 2,137 |
362 | <reponame>gaganbedimsg/galaxy
// Copyright (c) 2016, Baidu.com, Inc. All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#pragma once
#include "protocol/galaxy.pb.h"
#include "util/error_code.h"
#include <map>
#include <string>
#include <vector>
namespace baidu {
namespace galaxy {
namespace resource {
class VolumResource {
public:
class Volum {
public:
Volum() :
total_(0),
assigned_(0),
medium_(baidu::galaxy::proto::kDisk) {}
int64_t total_;
int64_t assigned_;
std::string filesystem_;
std::string mount_point_;
baidu::galaxy::proto::VolumMedium medium_;
};
public:
VolumResource();
~VolumResource();
int Load();
baidu::galaxy::util::ErrorCode Allocat(const baidu::galaxy::proto::VolumRequired& require);
baidu::galaxy::util::ErrorCode Release(const baidu::galaxy::proto::VolumRequired& require);
void Resource(std::map<std::string, Volum>& r);
private:
baidu::galaxy::util::ErrorCode LoadVolum(const std::string& config, Volum& volum);
std::map<std::string, Volum> resource_;
};
}
}
}
| 497 |
537 | #pragma once
#include <turbodbc/field.h>
#include <turbodbc/parameter.h>
#include <cpp_odbc/multi_value_buffer.h>
namespace turbodbc {
/**
* @brief Check whether a parameter can hold the given value
*/
bool parameter_is_suitable_for(parameter const ¶m, field const &value);
/**
* @brief Set the destination's buffer element to a value corresponding to the
* input field
* @param value The input value
* @param destination The target which value shall be changed
*/
void set_field(field const & value, cpp_odbc::writable_buffer_element & destination);
/**
* @brief Set the destination's buffer element to NULL
*/
void set_null(cpp_odbc::writable_buffer_element & destination);
} | 219 |
6,684 | <filename>botocore/data/iotfleethub/2020-11-03/paginators-1.json
{
"pagination": {
"ListApplications": {
"input_token": "nextToken",
"output_token": "nextToken",
"result_key": "applicationSummaries"
}
}
}
| 104 |
852 | #include "CondFormats/DataRecord/interface/L1GctChannelMaskRcd.h"
#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h"
EVENTSETUP_RECORD_REG(L1GctChannelMaskRcd);
| 70 |
2,072 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any
from pyhocon import ConfigFactory, ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_utils import ModeDashboardUtils
from databuilder.extractor.restapi.rest_api_extractor import MODEL_CLASS
from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery
LOGGER = logging.getLogger(__name__)
class ModeDashboardOwnerExtractor(Extractor):
"""
An Extractor that extracts Dashboard owner.
"""
def init(self, conf: ConfigTree) -> None:
self._conf = conf
restapi_query = self._build_restapi_query()
self._extractor = ModeDashboardUtils.create_mode_rest_api_extractor(
restapi_query=restapi_query,
conf=self._conf.with_fallback(
ConfigFactory.from_dict(
{MODEL_CLASS: 'databuilder.models.dashboard.dashboard_owner.DashboardOwner', }
)
)
)
def extract(self) -> Any:
return self._extractor.extract()
def get_scope(self) -> str:
return 'extractor.mode_dashboard_owner'
def _build_restapi_query(self) -> ModePaginatedRestApiQuery:
"""
Build REST API Query to get Mode Dashboard owner
:return: A RestApiQuery that provides Mode Dashboard owner
"""
seed_query = ModeDashboardUtils.get_seed_query(conf=self._conf)
params = ModeDashboardUtils.get_auth_params(conf=self._conf, discover_auth=True)
# Reports
# https://mode.com/developer/discovery-api/analytics/reports/
url = 'https://app.mode.com/batch/{organization}/reports'
json_path = 'reports[*].[token, space_token, creator_email]'
field_names = ['dashboard_id', 'dashboard_group_id', 'email']
max_record_size = 1000
pagination_json_path = 'reports[*]'
creator_query = ModePaginatedRestApiQuery(query_to_join=seed_query, url=url, params=params,
json_path=json_path, field_names=field_names,
skip_no_result=True, max_record_size=max_record_size,
pagination_json_path=pagination_json_path)
return creator_query
| 1,063 |
348 | <gh_stars>100-1000
{"nom":"Pluvet","circ":"3ème circonscription","dpt":"Côte-d'Or","inscrits":303,"abs":179,"votants":124,"blancs":12,"nuls":9,"exp":103,"res":[{"nuance":"REM","nom":"<NAME>","voix":59},{"nuance":"FN","nom":"<NAME>","voix":44}]} | 102 |
432 | <reponame>mfkiwl/riscv_vhdl-64bit-fault-tolerant
/*
* Copyright 2019 <NAME>, <EMAIL>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <api_core.h>
#include "dsu.h"
#include <generic-isa.h>
#include "cmd_dsu_busutil.h"
#include "cmd_dsu_isrunning.h"
#include "cmd_dsu_run.h"
#include "cmd_dsu_halt.h"
#include "cmd_dsu_status.h"
namespace debugger {
DSU::DSU(const char *name) :
RegMemBankGeneric(name),
DsuRegisters(static_cast<IService *>(this)) {
registerInterface(static_cast<IMemoryOperation *>(this));
registerInterface(static_cast<IDsuGeneric *>(this));
registerInterface(static_cast<IDebug *>(this));
registerAttribute("CPU", &cpu_);
registerAttribute("CmdExecutor", &cmdexec_);
registerAttribute("Tap", &tap_);
icpulist_.make_list(0);
icmdlist_.make_list(0);;
RISCV_event_create(&nb_event_, "DSU_event_nb");
}
DSU::~DSU() {
RISCV_event_close(&nb_event_);
}
void DSU::postinitService() {
RegMemBankGeneric::postinitService();
itap_ = static_cast<ITap *>
(RISCV_get_service_iface(tap_.to_string(), IFACE_TAP));
if (!itap_) {
RISCV_error("Can't find ITap interface %s", tap_.to_string());
}
iexec_ = static_cast<ICmdExecutor *>(
RISCV_get_service_iface(cmdexec_.to_string(), IFACE_CMD_EXECUTOR));
if (!iexec_) {
RISCV_error("Can't find ICmdExecutor interface %s", cmdexec_.to_string());
} else {
int cnt = 0;
icmdlist_.new_list_item().make_iface(new CmdDsuStatus(itap_));
icmdlist_.new_list_item().make_iface(new CmdDsuBusUtil(itap_));
icmdlist_.new_list_item().make_iface(new CmdDsuIsRunning(itap_));
icmdlist_.new_list_item().make_iface(new CmdDsuStatus(itap_));
icmdlist_.new_list_item().make_iface(new CmdDsuRun(itap_));
for (unsigned i = 0; i < icmdlist_.size(); i++) {
iexec_->registerCommand(static_cast<ICommand *>(icmdlist_[i].to_iface()));
}
}
ICpuGeneric *icpu;
for (unsigned i = 0; i < cpu_.size(); i++) {
icpu = static_cast<ICpuGeneric *>(
RISCV_get_service_iface(cpu_[i].to_string(), IFACE_CPU_GENERIC));
if (!icpu) {
RISCV_error("Can't find ICpuGeneric interface %s",
cpu_[i].to_string());
} else {
AttributeType item;
item.make_iface(icpu);
icpulist_.add_to_list(&item);
}
}
// Set default context
hartSelect(0);
}
void DSU::predeleteService() {
for (unsigned i = 0; i < icmdlist_.size(); i++) {
iexec_->unregisterCommand(static_cast<ICommand *>(icmdlist_[i].to_iface()));
}
}
void DSU::nb_response_debug_port(DebugPortTransactionType *trans) {
RISCV_event_set(&nb_event_);
}
void DSU::nb_debug_write(unsigned hartid, uint16_t addr, uint64_t wdata) {
if (static_cast<int>(hartid) >= hartTotal()) {
RISCV_error("Debug Access index out of range %d", hartid);
return;
}
ICpuGeneric *icpu = static_cast<ICpuGeneric *>(icpulist_[hartid].to_iface());
nb_trans_.addr = addr;
nb_trans_.wdata = wdata;
nb_trans_.write = 1;
nb_trans_.bytes = 8;
RISCV_event_clear(&nb_event_);
icpu->nb_transport_debug_port(&nb_trans_, static_cast<IDbgNbResponse *>(this));
RISCV_event_wait(&nb_event_);
}
void DSU::incrementRdAccess(int mst_id) {
bus_util_.getp()[2*mst_id + 1].val++;
}
void DSU::incrementWrAccess(int mst_id) {
bus_util_.getp()[2*mst_id].val++;
}
void DSU::setResetPin(bool val) {
IResetListener *irst;
for (unsigned i = 0; i < cpu_.size(); i++) {
irst = static_cast<IResetListener *>(
RISCV_get_service_iface(cpu_[i].to_string(),
IFACE_RESET_LISTENER));
if (!irst) {
RISCV_error("Can't find IResetListener interface %s",
cpu_[i].to_string());
} else {
irst->reset(static_cast<IService *>(this));
}
}
}
void DSU::hartSelect(int hartidx) {
if (hartidx >= static_cast<int>(icpulist_.size())) {
hartsel_ = icpulist_.size();
RISCV_error("Context index out of range %d", hartidx);
return;
}
ICpuGeneric *pcpu = static_cast<ICpuGeneric *>(icpulist_[hartidx].to_iface());
hartsel_ = hartidx;
dport_region_.setCpu(pcpu);
}
bool DSU::isHalted(int hartidx) {
if (hartidx >= static_cast<int>(icpulist_.size())) {
return false;
}
ICpuGeneric *pcpu = static_cast<ICpuGeneric *>(icpulist_[hartidx].to_iface());
if (!pcpu) {
return false;
}
return pcpu->isHalt();
}
void DSU::reqResume(int hartidx) {
DMCONTROL_TYPE::ValueType t;
t.val = dmcontrol_.getValue().val;
t.bits.hartsello = hartidx;
t.bits.hartselhi = hartidx >> 10;
t.bits.resumereq = 1;
nb_debug_write(static_cast<uint32_t>(hartidx),
CSR_runcontrol,
t.val);
}
void DSU::reqHalt(int hartidx) {
DMCONTROL_TYPE::ValueType t;
t.val = dmcontrol_.getValue().val;
t.bits.hartsello = hartidx;
t.bits.hartselhi = hartidx >> 10;
t.bits.haltreq = 1;
nb_debug_write(static_cast<uint32_t>(hartidx),
CSR_runcontrol,
t.val);
}
} // namespace debugger
| 2,666 |
1,350 | <filename>sdk/resourcegraph/azure-resourcemanager-resourcegraph/src/main/java/com/azure/resourcemanager/resourcegraph/models/Column.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.resourcegraph.models;
import com.azure.core.annotation.Fluent;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
/** Query result column descriptor. */
@Fluent
public final class Column {
@JsonIgnore private final ClientLogger logger = new ClientLogger(Column.class);
/*
* Column name.
*/
@JsonProperty(value = "name", required = true)
private String name;
/*
* Column data type.
*/
@JsonProperty(value = "type", required = true)
private ColumnDataType type;
/**
* Get the name property: Column name.
*
* @return the name value.
*/
public String name() {
return this.name;
}
/**
* Set the name property: Column name.
*
* @param name the name value to set.
* @return the Column object itself.
*/
public Column withName(String name) {
this.name = name;
return this;
}
/**
* Get the type property: Column data type.
*
* @return the type value.
*/
public ColumnDataType type() {
return this.type;
}
/**
* Set the type property: Column data type.
*
* @param type the type value to set.
* @return the Column object itself.
*/
public Column withType(ColumnDataType type) {
this.type = type;
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
public void validate() {
if (name() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException("Missing required property name in model Column"));
}
if (type() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException("Missing required property type in model Column"));
}
}
}
| 869 |
2,813 | package org.jabref.logic.exporter;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.jabref.logic.layout.LayoutFormatterPreferences;
import org.jabref.logic.xmp.XmpPreferences;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.Answers;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
public class YamlExporterTest {
private static Charset charset;
private static Exporter yamlExporter;
private static BibDatabaseContext databaseContext;
@BeforeAll
static void setUp() {
List<TemplateExporter> customFormats = new ArrayList<>();
LayoutFormatterPreferences layoutPreferences = mock(LayoutFormatterPreferences.class, Answers.RETURNS_DEEP_STUBS);
SavePreferences savePreferences = mock(SavePreferences.class);
XmpPreferences xmpPreferences = mock(XmpPreferences.class);
BibEntryTypesManager entryTypesManager = mock(BibEntryTypesManager.class);
ExporterFactory exporterFactory = ExporterFactory.create(customFormats, layoutPreferences, savePreferences, xmpPreferences, BibDatabaseMode.BIBTEX, entryTypesManager);
databaseContext = new BibDatabaseContext();
charset = StandardCharsets.UTF_8;
yamlExporter = exporterFactory.getExporterByName("yaml").get();
}
@Test
public final void exportForNoEntriesWritesNothing(@TempDir Path tempFile) throws Exception {
Path file = tempFile.resolve("ThisIsARandomlyNamedFile");
Files.createFile(file);
yamlExporter.export(databaseContext, tempFile, charset, Collections.emptyList());
assertEquals(Collections.emptyList(), Files.readAllLines(file));
}
@Test
public final void exportsCorrectContent(@TempDir Path tempFile) throws Exception {
BibEntry entry = new BibEntry(StandardEntryType.Article)
.withCitationKey("test")
.withField(StandardField.AUTHOR, "Test Author")
.withField(StandardField.TITLE, "Test Title")
.withField(StandardField.URL, "http://example.com")
.withField(StandardField.DATE, "2020-10-14");
Path file = tempFile.resolve("RandomFileName");
Files.createFile(file);
yamlExporter.export(databaseContext, file, charset, Collections.singletonList(entry));
List<String> expected = List.of(
"---",
"references:",
"- id: test",
" type: article",
" author:",
" - literal: \"Test Author\"",
" title: \"Test Title\"",
" issued: 2020-10-14",
" url: http://example.com",
"---");
assertEquals(expected, Files.readAllLines(file));
}
@Test
public final void formatsContentCorrect(@TempDir Path tempFile) throws Exception {
BibEntry entry = new BibEntry(StandardEntryType.Misc)
.withCitationKey("test")
.withField(StandardField.AUTHOR, "Test Author")
.withField(StandardField.TITLE, "Test Title")
.withField(StandardField.URL, "http://example.com")
.withField(StandardField.DATE, "2020-10-14");
Path file = tempFile.resolve("RandomFileName");
Files.createFile(file);
yamlExporter.export(databaseContext, file, charset, Collections.singletonList(entry));
List<String> expected = List.of(
"---",
"references:",
"- id: test",
" type: no-type",
" author:",
" - literal: \"Test Author\"",
" title: \"Test Title\"",
" issued: 2020-10-14",
" url: http://example.com",
"---");
assertEquals(expected, Files.readAllLines(file));
}
@Test
void passesModifiedCharset(@TempDir Path tempFile) throws Exception {
BibEntry entry = new BibEntry(StandardEntryType.Article)
.withCitationKey("test")
.withField(StandardField.AUTHOR, "<NAME>")
.withField(StandardField.TITLE, "細雪")
.withField(StandardField.URL, "http://example.com")
.withField(StandardField.DATE, "2020-10-14");
Path file = tempFile.resolve("RandomFileName");
Files.createFile(file);
yamlExporter.export(databaseContext, file, StandardCharsets.UTF_8, Collections.singletonList(entry));
List<String> expected = List.of(
"---",
"references:",
"- id: test",
" type: article",
" author:",
" - literal: \"谷崎 潤一郎\"",
" title: \"細雪\"",
" issued: 2020-10-14",
" url: http://example.com",
"---");
assertEquals(expected, Files.readAllLines(file));
}
@Test
void passesModifiedCharsetNull(@TempDir Path tempFile) throws Exception {
BibEntry entry = new BibEntry(StandardEntryType.Article)
.withCitationKey("test")
.withField(StandardField.AUTHOR, "<NAME>")
.withField(StandardField.TITLE, "細雪")
.withField(StandardField.URL, "http://example.com")
.withField(StandardField.DATE, "2020-10-14");
Path file = tempFile.resolve("RandomFileName");
Files.createFile(file);
yamlExporter.export(databaseContext, file, null, Collections.singletonList(entry));
List<String> expected = List.of(
"---",
"references:",
"- id: test",
" type: article",
" author:",
" - literal: \"谷崎 潤一郎\"",
" title: \"細雪\"",
" issued: 2020-10-14",
" url: http://example.com",
"---");
assertEquals(expected, Files.readAllLines(file));
}
@Test
void passesModifiedCharsetASCII(@TempDir Path tempFile) throws Exception {
BibEntry entry = new BibEntry(StandardEntryType.Article)
.withCitationKey("test")
.withField(StandardField.AUTHOR, "<NAME>")
.withField(StandardField.TITLE, "細雪")
.withField(StandardField.URL, "http://example.com")
.withField(StandardField.DATE, "2020-10-14");
Path file = tempFile.resolve("RandomFileName");
Files.createFile(file);
yamlExporter.export(databaseContext, file, StandardCharsets.US_ASCII, Collections.singletonList(entry));
List<String> expected = List.of(
"---",
"references:",
"- id: test",
" type: article",
" author:",
" - literal: \"?? ???\"",
" title: \"??\"",
" issued: 2020-10-14",
" url: http://example.com",
"---");
assertEquals(expected, Files.readAllLines(file));
}
}
| 3,466 |
778 | <filename>applications/ExaquteSandboxApplication/python_scripts/WindGenerator/RandomMaterial.py
import pyfftw
from math import *
import numpy as np
from scipy.special import erfinv
from scipy import misc
from scipy.signal import convolve2d
# from scipy.special import kv as Kv
# from itertools import product
import os, sys, csv
from time import time
from multiprocessing import Pool, Process
# from joblib import Parallel, delayed
from tqdm import tqdm
import matplotlib.pyplot as plt
# from pyevtk.hl import imageToVTK
from RandomFieldModule.GaussianRandomField import GaussianRandomField
from RandomFieldModule.utilities.common import *
from RandomFieldModule.utilities.ErrorMessages import *
from RandomFieldModule.utilities.Exports import exportVTK
#######################################################################################################
def levelcut(field, level=0):
phase = np.where(field > level, 1, 0)
return phase.astype(np.intc)
def get_vf(phase):
return np.mean(phase)
#######################################################################################################
# Random material generator class
#######################################################################################################
class RandomMaterial:
def __init__(self, grid_level, ndim=2, verbose=0, levelcut_strategy="abs", **kwargs):
self.verbose = verbose
self.ndim = int(ndim) # dimension 2D or 3D
if not (self.ndim==2 or self.ndim==3): msgDimError(self.ndim)
self.N = int(2**grid_level)
self.Nd = self.N * np.ones(self.ndim, dtype=np.intc)
self.nvoxels = np.prod(self.Nd)
### Level-cut strategy:
### 'abs' - level-cut of the abs(field)
### 'sym' - standard level-cut
self.levelcut_strategy = levelcut_strategy
vf = kwargs['vf']
self.set_level(vf)
### Gaussian Random Field (Intensity)
if self.verbose: print('\nBuilding GRF...\n')
self.GRF = GaussianRandomField(grid_level=grid_level, ndim=ndim, verbose=verbose, **kwargs)
#--------------------------------------------------------------------------
# Updates
#--------------------------------------------------------------------------
def set_level(self, vf=None, tau=None):
if vf is not None:
self.vf = vf
self.tau = vf2tau(vf, strategy=self.levelcut_strategy)
elif tau is not None:
self.tau = tau
self.vf = tau2vf(tau, strategy=self.levelcut_strategy)
else:
print('Either expected volume fraction or level set value must be given.')
def reseed(self, seed=None):
self.GRF.reseed(seed)
#--------------------------------------------------------------------------
# Sampling
#--------------------------------------------------------------------------
### Generate a realization
def sample(self, noise=None):
field = self.GRF.sample(noise=noise)
if self.levelcut_strategy is 'abs': field = np.abs(field)
field = levelcut(field, self.tau)
return field
### Generate a family of realizations
def generate_samples(self, nsamples=1, path=None, output_format="png", append=False):
output = False if path is None else True
if output:
if not append or not hasattr(self, 'sample_count'):
os.system('rm -f ' + path + 'sample_*')
self.sample_count = 0
time_start = time()
expected_vf = 0
for isample in tqdm(range(nsamples)):
phase = self.sample()
expected_vf += phase.mean()
if output:
self.sample_count += 1
filename = path + 'sample_{0:d}'.format(self.sample_count)
if self.ndim==2 and output_format is "png": self.save_png(phase, filename)
elif self.ndim==3 or output_format is "vtk": self.save_vtk(phase, filename)
expected_vf /= nsamples
print('All samples generation time: {0} s'.format(time()-time_start))
print('Volume fraction: {0}'.format(expected_vf))
#--------------------------------------------------------------------------
# EXPORTS
#--------------------------------------------------------------------------
def save_png(self, phase, filename):
misc.imsave(filename, (1-phase))
def save_vtk(self, phase, filename):
exportVTK(filename, cellData = {'phase' : phase})
#--------------------------------------------------------------------------
# TESTS
#--------------------------------------------------------------------------
def test_Covariance(self, nsamples=1000):
return self.GRF.test_Covariance(nsamples)
def test_VolumeFraction(self, nsamples=1000):
from RandomMaterial.utilities import compute_ProbaDist, fit_ProbaDist, vf2tau, tau2vf
data = []
for isample in tqdm(range(nsamples)):
phase = self.sample()
vf = phase.mean()
data.append(vf)
p, x = compute_ProbaDist(data)
m, sigma, p_fit = fit_ProbaDist(x, p, type='LogNormal')
print('m={0}, s={1}'.format(exp(m), sigma))
print('expected vf = ', np.mean(data))
plt.plot(x,p)
plt.plot(x,p_fit)
plt.axvline(x=self.vf, color='black')
plt.legend(['vf pdf', 'Fit'])
plt.show()
return 0
def test_TwoPointProbability(self, nsamples=1000):
from RandomMaterial.utilities import autocorrelation
from RandomMaterial.reconstruction import RadialAverage
from utilities.image_statistics import LevelSetGaussian as LSG
C = np.zeros(self.Nd)
for isample in tqdm(range(nsamples)):
phase = self.sample()
C += autocorrelation(phase)
C /= nsamples
S2, Var = RadialAverage(C)
h = 1/self.N
Slope = (S2[1]-S2[0])/h
nu = self.GRF.Covariance.nu
rho = self.GRF.Covariance.corr_len[0]
Slope0 = LSG.S2_slope_at_zero_Matern(self.tau, nu, rho)
print("nu = ", nu)
print("Slope = ", Slope)
print("Slope (analytic) = ", Slope0)
return 0
#######################################################################################################
#######################################################################################################
# Run as main (for testing)
#######################################################################################################
if __name__ == "__main__":
import importlib
config = importlib.import_module(sys.argv[1])
RM = RandomMaterial(config)
| 2,622 |
48,021 | <gh_stars>1000+
{
"expect": {
"25": -1,
"103": "The cow jumped over the moon.",
"125": "There is some text missing from this line.",
"126": "There is some text missing from this line.",
"145": "There is some text missing from this line.",
"146": "There is some text missing from this line.",
"147": "There is also some text missing here.",
"148": "There is also some text missing here.",
"216": "There are some words that don't belong in this sentence.",
"232": "Somebody typed the end of this line twice.",
"271": -1,
"290": "This line of words is cleaned up.",
"304": -1,
"305": -1,
"306": -1,
"307": -1,
"308": -1,
"309": -1,
"310": -1,
"324": "Fix the errors on this line and replace them with undo.",
"367": -1,
"368": -1,
"369": -1,
"370": -1,
"384": "When this line was typed in, someone pressed some wrong keys!",
"385": "When this line was typed in, someone pressed some wrong keys!",
"405": "This line has a few words that need changing using the change operator.",
"406": "This line has a few words that need changing using the change operator.",
"426": "The end of this line needs to be corrected using the `c$` command.",
"427": "The end of this line needs to be corrected using the `c$` command.",
"490": -1,
"509": -1,
"532": "Usually the best time to see the flowers is in the spring.",
"725": -1,
"730": -1,
"746": "This line will allow you to practice appending text to a line.",
"747": "This line will allow you to practice appending text to a line.",
"767": "Adding 123 to 456 gives you 579.",
"768": "Adding 123 to 456 gives you 579.",
"794": "a) This is the first item.",
"795": "b) This is the second item."
}
}
| 652 |
348 | {"nom":"Asnières-en-Poitou","dpt":"Deux-Sèvres","inscrits":144,"abs":38,"votants":106,"blancs":13,"nuls":4,"exp":89,"res":[{"panneau":"1","voix":65},{"panneau":"2","voix":24}]} | 79 |
575 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An implementation of PLS (Partial Least Squares)
using numpy. Written to apply to neural network activations.
The setup is very similar to the setup in cca_core.py.
See:
https://arxiv.org/abs/1706.05806
https://arxiv.org/abs/1806.05759
for more background on the methods.
"""
import numpy as np
def get_pls_similarity(acts1, acts2):
"""
This function computes Partial Least Squares between two sets of activations.
Args:
acts1: (num_neurons1, data_points) a 2d numpy array of neurons by
datapoints where entry (i,j) is the output of neuron i on
datapoint j.
acts2: (num_neurons2, data_points) same as above, but (potentially)
for a different set of neurons. Note that acts1 and acts2
can have different numbers of neurons, but must agree on the
number of datapoints
threshold: float between 0, 1 used to get rid of trailing zeros in
the cca correlation coefficients to output more accurate
summary statistics of correlations.
compute_dirns: boolean value determining whether actual cca
directions are computed. (For very large neurons and
datasets, may be better to compute these on the fly
instead of store in memory.)
verbose: Boolean, whether info about intermediate outputs printed
Returns:
return_dict: A dictionary with outputs from the cca computations.
Contains neuron coefficients (combinations of neurons
that correspond to cca directions), the cca correlation
coefficients (how well aligned directions correlate),
x and y idxs (for computing cca directions on the fly
if compute_dirns=False), and summary statistics. If
compute_dirns=True, the cca directions are also
computed.
"""
# assert dimensionality equal
assert acts1.shape[1] == acts2.shape[1], "dimensions don't match"
# check that acts1, acts2 are transposition
assert acts1.shape[0] < acts1.shape[1], ("input must be number of neurons"
"by datapoints")
return_dict = {}
# compute covariance with numpy function for extra stability
numx = acts1.shape[0]
covariance = np.cov(acts1, acts2)
sigmaxx = covariance[:numx, :numx]
sigmaxy = covariance[:numx, numx:]
sigmayx = covariance[numx:, :numx]
sigmayy = covariance[numx:, numx:]
# compute Partial Least Squares of cross covariance using
# SVD. Columns of U are coefficients for acts1, rows of V
# are coefficients for acts2.
U, S, V = np.linalg.svd(sigmaxy, full_matrices=False)
S = np.abs(S)
# compute means
neuron_means1 = np.mean(acts1, axis=1, keepdims=True)
neuron_means2 = np.mean(acts2, axis=1, keepdims=True)
# collect return values
return_dict = {}
return_dict["eigenvals"] = S
return_dict["neuron_coeffs1"] = U.T
return_dict["neuron_coeffs2"] = V
pls_dirns1 = np.dot(U.T, (acts1 - neuron_means1)) + neuron_means1
pls_dirns2 = np.dot(V, (acts2 - neuron_means2)) + neuron_means2
return_dict["pls_dirns1"] = pls_dirns1
return_dict["pls_dirns2"] = pls_dirns2
return return_dict
| 1,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.