text
stringlengths 2
99.9k
| meta
dict |
---|---|
/*
* Copyright (c) 1995, 1999
* Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
*/
#ifndef _IFADDRS_H_
#define _IFADDRS_H_
struct ifaddrs {
struct ifaddrs *ifa_next;
char *ifa_name;
unsigned int ifa_flags;
struct sockaddr *ifa_addr;
struct sockaddr *ifa_netmask;
struct sockaddr *ifa_dstaddr;
void *ifa_data;
};
/*
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
* to be included it must be included before this header file.
*/
#ifndef ifa_broadaddr
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
#endif
#include <sys/cdefs.h>
__BEGIN_DECLS
extern int getifaddrs(struct ifaddrs **ifap);
extern void freeifaddrs(struct ifaddrs *ifa);
__END_DECLS
#endif
| {
"pile_set_name": "Github"
} |
/*
---------------------------------------------------------------------------
Copyright (c) 2003, Dr Brian Gladman < >, Worcester, UK.
All rights reserved.
LICENSE TERMS
The free distribution and use of this software in both source and binary
form is allowed (with or without changes) provided that:
1. distributions of this source code include the above copyright
notice, this list of conditions and the following disclaimer;
2. distributions in binary form include the above copyright
notice, this list of conditions and the following disclaimer
in the documentation and/or other associated materials;
3. the copyright holder's name is not used to endorse products
built using this software without specific written permission.
ALTERNATIVELY, provided that this notice is retained in full, this product
may be distributed under the terms of the GNU General Public License (GPL),
in which case the provisions of the GPL apply INSTEAD OF those given above.
DISCLAIMER
This software is provided 'as is' with no explicit or implied warranties
in respect of its properties, including, but not limited to, correctness
and/or fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 26/08/2003
This file contains the code for implementing encryption and decryption
for AES (Rijndael) for block and key sizes of 16, 24 and 32 bytes. It
can optionally be replaced by code written in assembler using NASM. For
further details see the file aesopt.h
*/
#include "aesopt.h"
#define si(y,x,k,c) (s(y,c) = word_in(x, c) ^ (k)[c])
#define so(y,x,c) word_out(y, c, s(x,c))
#if defined(ARRAYS)
#define locals(y,x) x[4],y[4]
#else
#define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3
#endif
#define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \
s(y,2) = s(x,2); s(y,3) = s(x,3);
#define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3)
#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
#define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3)
#if defined(ENCRYPTION) && !defined(AES_ASM)
/* Visual C++ .Net v7.1 provides the fastest encryption code when using
Pentium optimization with small code but this is poor for decryption
so we need to control this with the following VC++ pragmas
*/
#if defined(_MSC_VER)
#pragma optimize( "s", on )
#endif
/* Given the column (c) of the output state variable, the following
macros give the input state variables which are needed in its
computation for each row (r) of the state. All the alternative
macros give the same end values but expand into different ways
of calculating these values. In particular the complex macro
used for dynamically variable block sizes is designed to expand
to a compile time constant whenever possible but will expand to
conditional clauses on some branches (I am grateful to Frank
Yellin for this construction)
*/
#define fwd_var(x,r,c)\
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
: r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
: ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
#if defined(FT4_SET)
#undef dec_fmvars
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
#elif defined(FT1_SET)
#undef dec_fmvars
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(f,n),fwd_var,rf1,c))
#else
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ fwd_mcol(no_table(x,t_use(s,box),fwd_var,rf1,c)))
#endif
#if defined(FL4_SET)
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,l),fwd_var,rf1,c))
#elif defined(FL1_SET)
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(f,l),fwd_var,rf1,c))
#else
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(s,box),fwd_var,rf1,c))
#endif
aes_rval aes_encrypt(const void *in_blk, void *out_blk, const aes_encrypt_ctx cx[1])
{ aes_32t locals(b0, b1);
const aes_32t *kp = cx->ks;
#ifdef dec_fmvars
dec_fmvars; /* declare variables for fwd_mcol() if needed */
#endif
aes_32t nr = (kp[45] ^ kp[52] ^ kp[53] ? kp[52] : 14);
#ifdef AES_ERR_CHK
if( (nr != 10 || !(kp[0] | kp[3] | kp[4]))
&& (nr != 12 || !(kp[0] | kp[5] | kp[6]))
&& (nr != 14 || !(kp[0] | kp[7] | kp[8])) )
return aes_error;
#endif
state_in(b0, in_blk, kp);
#if (ENC_UNROLL == FULL)
switch(nr)
{
case 14:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
kp += 2 * N_COLS;
case 12:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
kp += 2 * N_COLS;
case 10:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
round(fwd_rnd, b1, b0, kp + 3 * N_COLS);
round(fwd_rnd, b0, b1, kp + 4 * N_COLS);
round(fwd_rnd, b1, b0, kp + 5 * N_COLS);
round(fwd_rnd, b0, b1, kp + 6 * N_COLS);
round(fwd_rnd, b1, b0, kp + 7 * N_COLS);
round(fwd_rnd, b0, b1, kp + 8 * N_COLS);
round(fwd_rnd, b1, b0, kp + 9 * N_COLS);
round(fwd_lrnd, b0, b1, kp +10 * N_COLS);
}
#else
#if (ENC_UNROLL == PARTIAL)
{ aes_32t rnd;
for(rnd = 0; rnd < (nr >> 1) - 1; ++rnd)
{
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
kp += N_COLS;
round(fwd_rnd, b0, b1, kp);
}
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
#else
{ aes_32t rnd;
for(rnd = 0; rnd < nr - 1; ++rnd)
{
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
l_copy(b0, b1);
}
#endif
kp += N_COLS;
round(fwd_lrnd, b0, b1, kp);
}
#endif
state_out(out_blk, b0);
#ifdef AES_ERR_CHK
return aes_good;
#endif
}
#endif
#if defined(DECRYPTION) && !defined(AES_ASM)
/* Visual C++ .Net v7.1 provides the fastest encryption code when using
Pentium optimization with small code but this is poor for decryption
so we need to control this with the following VC++ pragmas
*/
#if defined(_MSC_VER)
#pragma optimize( "t", on )
#endif
/* Given the column (c) of the output state variable, the following
macros give the input state variables which are needed in its
computation for each row (r) of the state. All the alternative
macros give the same end values but expand into different ways
of calculating these values. In particular the complex macro
used for dynamically variable block sizes is designed to expand
to a compile time constant whenever possible but will expand to
conditional clauses on some branches (I am grateful to Frank
Yellin for this construction)
*/
#define inv_var(x,r,c)\
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
: r == 1 ? ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))\
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
: ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0)))
#if defined(IT4_SET)
#undef dec_imvars
#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,n),inv_var,rf1,c))
#elif defined(IT1_SET)
#undef dec_imvars
#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(i,n),inv_var,rf1,c))
#else
#define inv_rnd(y,x,k,c) (s(y,c) = inv_mcol((k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c)))
#endif
#if defined(IL4_SET)
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,l),inv_var,rf1,c))
#elif defined(IL1_SET)
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(i,l),inv_var,rf1,c))
#else
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c))
#endif
aes_rval aes_decrypt(const void *in_blk, void *out_blk, const aes_decrypt_ctx cx[1])
{ aes_32t locals(b0, b1);
#ifdef dec_imvars
dec_imvars; /* declare variables for inv_mcol() if needed */
#endif
aes_32t nr = (cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] ? cx->ks[52] : 14);
const aes_32t *kp = cx->ks + nr * N_COLS;
#ifdef AES_ERR_CHK
if( (nr != 10 || !(cx->ks[0] | cx->ks[3] | cx->ks[4]))
&& (nr != 12 || !(cx->ks[0] | cx->ks[5] | cx->ks[6]))
&& (nr != 14 || !(cx->ks[0] | cx->ks[7] | cx->ks[8])) )
return aes_error;
#endif
state_in(b0, in_blk, kp);
#if (DEC_UNROLL == FULL)
switch(nr)
{
case 14:
round(inv_rnd, b1, b0, kp - 1 * N_COLS);
round(inv_rnd, b0, b1, kp - 2 * N_COLS);
kp -= 2 * N_COLS;
case 12:
round(inv_rnd, b1, b0, kp - 1 * N_COLS);
round(inv_rnd, b0, b1, kp - 2 * N_COLS);
kp -= 2 * N_COLS;
case 10:
round(inv_rnd, b1, b0, kp - 1 * N_COLS);
round(inv_rnd, b0, b1, kp - 2 * N_COLS);
round(inv_rnd, b1, b0, kp - 3 * N_COLS);
round(inv_rnd, b0, b1, kp - 4 * N_COLS);
round(inv_rnd, b1, b0, kp - 5 * N_COLS);
round(inv_rnd, b0, b1, kp - 6 * N_COLS);
round(inv_rnd, b1, b0, kp - 7 * N_COLS);
round(inv_rnd, b0, b1, kp - 8 * N_COLS);
round(inv_rnd, b1, b0, kp - 9 * N_COLS);
round(inv_lrnd, b0, b1, kp - 10 * N_COLS);
}
#else
#if (DEC_UNROLL == PARTIAL)
{ aes_32t rnd;
for(rnd = 0; rnd < (nr >> 1) - 1; ++rnd)
{
kp -= N_COLS;
round(inv_rnd, b1, b0, kp);
kp -= N_COLS;
round(inv_rnd, b0, b1, kp);
}
kp -= N_COLS;
round(inv_rnd, b1, b0, kp);
#else
{ aes_32t rnd;
for(rnd = 0; rnd < nr - 1; ++rnd)
{
kp -= N_COLS;
round(inv_rnd, b1, b0, kp);
l_copy(b0, b1);
}
#endif
kp -= N_COLS;
round(inv_lrnd, b0, b1, kp);
}
#endif
state_out(out_blk, b0);
#ifdef AES_ERR_CHK
return aes_good;
#endif
}
#endif
| {
"pile_set_name": "Github"
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# scheduler class for distributing the topology for execution
heron.class.scheduler: org.apache.heron.scheduler.marathon.MarathonScheduler
# launcher class for submitting and launching the topology
heron.class.launcher: org.apache.heron.scheduler.marathon.MarathonLauncher
# location of java - pick it up from shell environment
heron.directory.sandbox.java.home: ${JAVA_HOME}
# The URI of marathon scheduler
heron.marathon.scheduler.uri: "http://127.0.0.1:8080"
# Invoke the IScheduler as a library directly
heron.scheduler.is.service: False
# location of the core package
heron.package.core.uri: file://${HOME}/.heron/dist/heron-core.tar.gz | {
"pile_set_name": "Github"
} |
// Copyright David Abrahams 2002.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef MAKE_INSTANCE_DWA200296_HPP
# define MAKE_INSTANCE_DWA200296_HPP
# include <boost/python/detail/prefix.hpp>
# include <boost/python/object/instance.hpp>
# include <boost/python/converter/registered.hpp>
# include <boost/python/detail/decref_guard.hpp>
# include <boost/python/detail/none.hpp>
# include <boost/mpl/assert.hpp>
# include <boost/mpl/or.hpp>
# include <boost/type_traits/is_union.hpp>
namespace boost { namespace python { namespace objects {
template <class T, class Holder, class Derived>
struct make_instance_impl
{
typedef objects::instance<Holder> instance_t;
template <class Arg>
static inline PyObject* execute(Arg& x)
{
BOOST_MPL_ASSERT((mpl::or_<is_class<T>, is_union<T> >));
PyTypeObject* type = Derived::get_class_object(x);
if (type == 0)
return python::detail::none();
PyObject* raw_result = type->tp_alloc(
type, objects::additional_instance_size<Holder>::value);
if (raw_result != 0)
{
python::detail::decref_guard protect(raw_result);
instance_t* instance = (instance_t*)raw_result;
// construct the new C++ object and install the pointer
// in the Python object.
Derived::construct(&instance->storage, (PyObject*)instance, x)->install(raw_result);
// Note the position of the internally-stored Holder,
// for the sake of destruction
Py_SIZE(instance) = offsetof(instance_t, storage);
// Release ownership of the python object
protect.cancel();
}
return raw_result;
}
};
template <class T, class Holder>
struct make_instance
: make_instance_impl<T, Holder, make_instance<T,Holder> >
{
template <class U>
static inline PyTypeObject* get_class_object(U&)
{
return converter::registered<T>::converters.get_class_object();
}
static inline Holder* construct(void* storage, PyObject* instance, reference_wrapper<T const> x)
{
return new (storage) Holder(instance, x);
}
};
}}} // namespace boost::python::object
#endif // MAKE_INSTANCE_DWA200296_HPP
| {
"pile_set_name": "Github"
} |
#ifndef CAFFE_UTIL_GPU_UTIL_H_
#define CAFFE_UTIL_GPU_UTIL_H_
namespace caffe {
template <typename Dtype>
inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address);
template <>
inline __device__
float caffe_gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
// double atomicAdd implementation taken from:
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG
template <>
inline __device__
double caffe_gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = // NOLINT(runtime/int)
// NOLINT_NEXT_LINE(runtime/int)
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull; // NOLINT(runtime/int)
unsigned long long int assumed; // NOLINT(runtime/int)
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
} // namespace caffe
#endif // CAFFE_UTIL_GPU_UTIL_H_
| {
"pile_set_name": "Github"
} |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=certificates.k8s.io
package certificates // import "k8s.io/kubernetes/pkg/apis/certificates"
| {
"pile_set_name": "Github"
} |
import os
import atexit
import datetime
import random
import signal
import time
from .. import utils
from ..api import API
from .bot_archive import archive, archive_medias, unarchive_medias
from .bot_block import block, block_bots, block_users, unblock, unblock_users
from .bot_checkpoint import load_checkpoint, save_checkpoint
from .bot_comment import (
comment, comment_geotag, comment_hashtag,
comment_medias, comment_user, comment_users,
is_commented, reply_to_comment
)
from .bot_delete import delete_comment, delete_media, delete_medias
from .bot_direct import (
send_hashtag, send_like, send_media, send_medias,
send_message, send_messages, send_profile, send_photo,
approve_pending_thread_requests
)
from .bot_filter import (
check_media, check_not_bot, check_user, filter_medias
)
from .bot_follow import (
follow, follow_followers, follow_following,
follow_users, approve_pending_follow_requests, reject_pending_follow_requests
)
from .bot_get import (
convert_to_user_id, get_archived_medias, get_comment,
get_comment_likers, get_geotag_medias, get_geotag_users,
get_hashtag_medias, get_hashtag_users,
get_last_user_medias, get_locations_from_coordinates,
get_media_commenters, get_media_comments,
get_media_comments_all, get_media_id_from_link,
get_link_from_media_id, get_media_info, get_media_likers,
get_media_owner, get_messages, get_popular_medias,
get_timeline_medias, get_timeline_users,
get_total_hashtag_medias, get_total_user_medias,
get_user_followers, get_user_following,
get_user_id_from_username, get_user_info,
get_user_likers, get_user_medias, get_user_tags_medias,
get_username_from_user_id, get_your_medias, search_users,
get_user_stories, get_user_reel, get_self_story_viewers,
get_pending_follow_requests, get_pending_thread_requests
)
from .bot_like import (
like, like_comment, like_followers, like_following,
like_geotag, like_hashtag, like_media_comments,
like_medias, like_timeline, like_user, like_users, like_location_feed
)
from .bot_photo import download_photo, download_photos, upload_photo
from .bot_stats import save_user_stats
from .bot_support import (
check_if_file_exists, console_print, extract_urls,
read_list_from_file
)
from .bot_unfollow import (
unfollow, unfollow_everyone, unfollow_non_followers,
unfollow_users
)
from .bot_unlike import (
unlike, unlike_comment, unlike_media_comments,
unlike_medias, unlike_user
)
from .bot_video import upload_video, download_video
from .bot_story import (
download_stories, upload_story_photo, watch_users_reels
)
class Bot(object):
def __init__(
self,
whitelist_file='whitelist.txt',
blacklist_file='blacklist.txt',
comments_file='comments.txt',
followed_file='followed.txt',
unfollowed_file='unfollowed.txt',
skipped_file='skipped.txt',
friends_file='friends.txt',
base_path='',
proxy=None,
max_likes_per_day=10000,
max_unlikes_per_day=1000,
max_follows_per_day=3500,
max_unfollows_per_day=350,
max_comments_per_day=100,
max_blocks_per_day=100,
max_unblocks_per_day=100,
max_likes_to_like=100,
min_likes_to_like=20,
max_messages_per_day=300,
filter_users=True,
filter_private_users=True,
filter_users_without_profile_photo=True,
filter_previously_followed=False,
filter_business_accounts=True,
filter_verified_accounts=True,
max_followers_to_follow=2000,
min_followers_to_follow=10,
max_following_to_follow=2000,
min_following_to_follow=10,
max_followers_to_following_ratio=10,
max_following_to_followers_ratio=2,
min_media_count_to_follow=3,
max_following_to_block=2000,
like_delay=10,
unlike_delay=10,
follow_delay=30,
unfollow_delay=30,
comment_delay=60,
block_delay=30,
unblock_delay=30,
message_delay=60,
stop_words=('shop', 'store', 'free'),
blacklist_hashtags=['#shop', '#store', '#free'],
blocked_actions_protection=True,
verbosity=True,
device=None,
do_logout=False
):
self.api = API(device=device, base_path=base_path)
self.base_path = base_path
self.total = {
'likes': 0,
'unlikes': 0,
'follows': 0,
'unfollows': 0,
'comments': 0,
'blocks': 0,
'unblocks': 0,
'messages': 0,
'archived': 0,
'unarchived': 0,
'stories_viewed': 0
}
self.start_time = datetime.datetime.now()
self.delays = {
'like': like_delay,
'unlike': unlike_delay,
'follow': follow_delay,
'unfollow': unfollow_delay,
'comment': comment_delay,
'block': block_delay,
'unblock': unblock_delay,
'message': message_delay
}
self.last = {key: 0 for key in self.delays.keys()}
# limits - follow
self.filter_users = filter_users
self.filter_private_users = filter_private_users
self.filter_users_without_profile_photo = filter_users_without_profile_photo
self.filter_business_accounts = filter_business_accounts
self.filter_verified_accounts = filter_verified_accounts
self.filter_previously_followed = filter_previously_followed
self.max_per_day = {
'likes': max_likes_per_day,
'unlikes': max_unlikes_per_day,
'follows': max_follows_per_day,
'unfollows': max_unfollows_per_day,
'comments': max_comments_per_day,
'blocks': max_blocks_per_day,
'unblocks': max_unblocks_per_day,
'messages': max_messages_per_day
}
self.blocked_actions_protection = blocked_actions_protection
self.blocked_actions = {
'likes': False,
'unlikes': False,
'follows': False,
'unfollows': False,
'comments': False,
'blocks': False,
'unblocks': False,
'messages': False
}
self.max_likes_to_like = max_likes_to_like
self.min_likes_to_like = min_likes_to_like
self.max_followers_to_follow = max_followers_to_follow
self.min_followers_to_follow = min_followers_to_follow
self.max_following_to_follow = max_following_to_follow
self.min_following_to_follow = min_following_to_follow
self.max_followers_to_following_ratio = max_followers_to_following_ratio
self.max_following_to_followers_ratio = max_following_to_followers_ratio
self.min_media_count_to_follow = min_media_count_to_follow
self.stop_words = stop_words
self.blacklist_hashtags = blacklist_hashtags
# limits - block
self.max_following_to_block = max_following_to_block
# current following and followers
self._following = None
self._followers = None
self._user_infos = {} # User info cache
self._usernames = {} # `username` to `user_id` mapping
# Adjust file paths
followed_file = os.path.join(base_path, followed_file)
unfollowed_file = os.path.join(base_path, unfollowed_file)
skipped_file = os.path.join(base_path, skipped_file)
friends_file = os.path.join(base_path, friends_file)
comments_file = os.path.join(base_path, comments_file)
blacklist_file = os.path.join(base_path, blacklist_file)
whitelist_file = os.path.join(base_path, whitelist_file)
# Database files
self.followed_file = utils.file(followed_file)
self.unfollowed_file = utils.file(unfollowed_file)
self.skipped_file = utils.file(skipped_file)
self.friends_file = utils.file(friends_file)
self.comments_file = utils.file(comments_file)
self.blacklist_file = utils.file(blacklist_file)
self.whitelist_file = utils.file(whitelist_file)
self.proxy = proxy
self.verbosity = verbosity
self.do_logout = do_logout
self.logger = self.api.logger
self.logger.info('Instabot Started')
@property
def user_id(self):
# For compatibility
return self.api.user_id
@property
def username(self):
# For compatibility
return self.api.username
@property
def password(self):
# For compatibility
return self.api.password
@property
def last_json(self):
# For compatibility
return self.api.last_json
@property
def blacklist(self):
# This is a fast operation because `get_user_id_from_username` is cached.
return [self.convert_to_user_id(i) for i in self.blacklist_file.list
if i is not None]
@property
def whitelist(self):
# This is a fast operation because `get_user_id_from_username` is cached.
return [self.convert_to_user_id(i) for i in self.whitelist_file.list
if i is not None]
@property
def following(self):
now = time.time()
last = self.last.get('updated_following', now)
if self._following is None or now - last > 7200:
self.console_print('`bot.following` is empty, will download.', 'green')
self._following = self.get_user_following(self.user_id)
self.last['updated_following'] = now
return self._following
@property
def followers(self):
now = time.time()
last = self.last.get('updated_followers', now)
if self._followers is None or now - last > 7200:
self.console_print('`bot.followers` is empty, will download.', 'green')
self._followers = self.get_user_followers(self.user_id)
self.last['updated_followers'] = now
return self._followers
def version(self):
try:
from pip._vendor import pkg_resources
except ImportError:
import pkg_resources
return next((p.version for p in pkg_resources.working_set if p.project_name.lower() == 'instabot'), "No match")
def logout(self, *args, **kwargs):
self.api.logout()
self.print_counters()
def login(self, **args):
if self.proxy:
args['proxy'] = self.proxy
if self.do_logout or not self.api.check_cookie(**args):
if not self.api.login(**args):
return False
elif self.do_logout:
self.prepare()
signal.signal(signal.SIGTERM, self.logout)
atexit.register(self.logout)
return True
self.prepare()
atexit.register(self.print_counters)
if 'is_threaded' in args:
if args['is_threaded']:
return True
signal.signal(signal.SIGTERM, self.print_counters)
return True
def prepare(self):
storage = load_checkpoint(self)
if storage is not None:
self.total, self.blocked_actions, self.api.total_requests, self.start_time = storage
def print_counters(self):
save_checkpoint(self)
self.logger.info("Bot stopped. "
"Worked: %s", datetime.datetime.now() - self.start_time)
for key, val in self.total.items():
if val > 0:
self.logger.info("Total {}: {}{}".format(key, val,
"/" + str(self.max_per_day[key]) if self.max_per_day.get(key) else ""))
for key, val in self.blocked_actions.items():
if val:
self.logger.info("Blocked {}".format(key))
self.logger.info("Total requests: {}".format(self.api.total_requests))
def delay(self, key):
"""Sleep only if elapsed time since `self.last[key]` < `self.delay[key]`."""
last_action, target_delay = self.last[key], self.delays[key]
elapsed_time = time.time() - last_action
if elapsed_time < target_delay:
t_remaining = target_delay - elapsed_time
time.sleep(t_remaining * random.uniform(0.25, 1.25))
self.last[key] = time.time()
def error_delay(self):
time.sleep(10)
def small_delay(self):
time.sleep(random.uniform(0.75, 3.75))
def very_small_delay(self):
time.sleep(random.uniform(0.175, 0.875))
def reached_limit(self, key):
current_date = datetime.datetime.now()
passed_days = (current_date.date() - self.start_time.date()).days
if passed_days > 0:
self.reset_counters()
return self.max_per_day[key] - self.total[key] <= 0
def reset_counters(self):
for k in self.total:
self.total[k] = 0
for k in self.blocked_actions:
self.blocked_actions[k] = False
self.start_time = datetime.datetime.now()
# getters
def get_user_stories(self, user_id):
"""
Returns array of stories links
"""
return get_user_stories(self, user_id)
def get_user_reel(self, user_id):
return get_user_reel(self, user_id)
def get_self_story_viewers(self, story_id):
return get_self_story_viewers(self, story_id)
def get_pending_follow_requests(self):
return get_pending_follow_requests(self)
def get_your_medias(self, as_dict=False):
"""
Returns your media ids. With parameter as_dict=True returns media as dict.
:type as_dict: bool
"""
return get_your_medias(self, as_dict)
def get_archived_medias(self, as_dict=False):
"""
Returns your archived media ids. With parameter as_dict=True returns media as dict.
:type as_dict: bool
"""
return get_archived_medias(self, as_dict)
def get_timeline_medias(self):
return get_timeline_medias(self)
def get_popular_medias(self):
return get_popular_medias(self)
def get_user_medias(self, user_id, filtration=True, is_comment=False):
return get_user_medias(self, user_id, filtration, is_comment)
def get_total_user_medias(self, user_id):
return get_total_user_medias(self, user_id)
def get_last_user_medias(self, user_id, count):
"""
Returns the last number of posts specified in count in media ids array.
:type count: int
:param count: Count of posts
:return: array
"""
return get_last_user_medias(self, user_id, count)
def get_hashtag_medias(self, hashtag, filtration=True):
return get_hashtag_medias(self, hashtag, filtration)
def get_total_hashtag_medias(self, hashtag, amount=100, filtration=False):
return get_total_hashtag_medias(self, hashtag, amount, filtration)
def get_geotag_medias(self, geotag, filtration=True):
return get_geotag_medias(self, geotag, filtration)
def get_locations_from_coordinates(self, latitude, longitude):
return get_locations_from_coordinates(self, latitude, longitude)
def get_media_info(self, media_id):
return get_media_info(self, media_id)
def get_timeline_users(self):
return get_timeline_users(self)
def get_hashtag_users(self, hashtag):
return get_hashtag_users(self, hashtag)
def get_geotag_users(self, geotag):
return get_geotag_users(self, geotag)
def get_user_id_from_username(self, username):
return get_user_id_from_username(self, username)
def get_user_tags_medias(self, user_id):
return get_user_tags_medias(self, user_id)
def get_username_from_user_id(self, user_id):
return get_username_from_user_id(self, user_id)
def get_user_info(self, user_id, use_cache=True):
return get_user_info(self, user_id, use_cache)
def get_user_followers(self, user_id, nfollows=None):
return get_user_followers(self, user_id, nfollows)
def get_user_following(self, user_id, nfollows=None):
return get_user_following(self, user_id, nfollows)
def get_comment_likers(self, comment_id):
return get_comment_likers(self, comment_id)
def get_media_likers(self, media_id):
return get_media_likers(self, media_id)
def get_media_comments(self, media_id, only_text=False):
return get_media_comments(self, media_id, only_text)
def get_media_comments_all(self, media_id, only_text=False, count=False):
return get_media_comments_all(self, media_id, only_text, count)
def get_comment(self):
return get_comment(self)
def get_media_commenters(self, media_id):
return get_media_commenters(self, media_id)
def get_media_owner(self, media):
return get_media_owner(self, media)
def get_user_likers(self, user_id, media_count=10):
return get_user_likers(self, user_id, media_count)
def get_media_id_from_link(self, link):
return get_media_id_from_link(self, link)
def get_link_from_media_id(self, link):
return get_link_from_media_id(self, link)
def get_messages(self):
return get_messages(self)
def search_users(self, query):
return search_users(self, query)
def convert_to_user_id(self, usernames):
return convert_to_user_id(self, usernames)
def get_pending_thread_requests(self):
return get_pending_thread_requests(self)
# like
def like(self, media_id, check_media=True):
return like(self, media_id, check_media)
def like_comment(self, comment_id):
return like_comment(self, comment_id)
def like_medias(self, media_ids, check_media=True):
return like_medias(self, media_ids, check_media)
def like_timeline(self, amount=None):
return like_timeline(self, amount)
def like_media_comments(self, media_id):
return like_media_comments(self, media_id)
def like_user(self, user_id, amount=None, filtration=True):
return like_user(self, user_id, amount, filtration)
def like_hashtag(self, hashtag, amount=None):
return like_hashtag(self, hashtag, amount)
def like_geotag(self, geotag, amount=None):
return like_geotag(self, geotag, amount)
def like_users(self, user_ids, nlikes=None, filtration=True):
return like_users(self, user_ids, nlikes, filtration)
def like_location_feed(self, place, amount):
return like_location_feed(self, place, amount)
def like_followers(self, user_id, nlikes=None, nfollows=None):
return like_followers(self, user_id, nlikes, nfollows)
def like_following(self, user_id, nlikes=None, nfollows=None):
return like_following(self, user_id, nlikes, nfollows)
# unlike
def unlike(self, media_id):
return unlike(self, media_id)
def unlike_comment(self, comment_id):
return unlike_comment(self, comment_id)
def unlike_media_comments(self, media_id):
return unlike_media_comments(self, media_id)
def unlike_medias(self, media_ids):
return unlike_medias(self, media_ids)
def unlike_user(self, user):
return unlike_user(self, user)
# story
def download_stories(self, username):
return download_stories(self, username)
def upload_story_photo(self, photo, upload_id=None):
return upload_story_photo(self, photo, upload_id)
def watch_users_reels(self, user_ids, max_users=100):
return watch_users_reels(self, user_ids, max_users=max_users)
# photo
def download_photo(self, media_id, folder='photos', filename=None, save_description=False):
return download_photo(self, media_id, folder, filename, save_description)
def download_photos(self, medias, folder='photos', save_description=False):
return download_photos(self, medias, folder, save_description)
def upload_photo(self, photo, caption=None, upload_id=None, from_video=False, options={}):
"""Upload photo to Instagram
@param photo Path to photo file (String)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate automatically
@param from_video A flag that signals whether the photo is loaded from the video or by itself (Boolean, DEPRECATED: not used)
@param options Object with difference options, e.g. configure_timeout, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@return Object with state of uploading to Instagram (or False)
"""
return upload_photo(self, photo, caption, upload_id, from_video, options)
# video
def upload_video(self, video, caption='', thumbnail=None, options={}):
"""Upload video to Instagram
@param video Path to video file (String)
@param caption Media description (String)
@param thumbnail Path to thumbnail for video (String). When None, then thumbnail is generate automatically
@param options Object with difference options, e.g. configure_timeout, rename_thumbnail, rename (Dict)
Designed to reduce the number of function arguments!
@return Object with state of uploading to Instagram (or False)
"""
return upload_video(self, video, caption, thumbnail, options)
def download_video(self, media_id, folder='videos', filename=None, save_description=False):
return download_video(self, media_id, folder, filename, save_description)
# follow
def follow(self, user_id):
return follow(self, user_id)
def follow_users(self, user_ids):
return follow_users(self, user_ids)
def follow_followers(self, user_id, nfollows=None):
return follow_followers(self, user_id, nfollows)
def follow_following(self, user_id, nfollows=None):
return follow_following(self, user_id, nfollows)
# unfollow
def unfollow(self, user_id):
return unfollow(self, user_id)
def unfollow_users(self, user_ids):
return unfollow_users(self, user_ids)
def unfollow_non_followers(self, n_to_unfollows=None):
return unfollow_non_followers(self, n_to_unfollows)
def unfollow_everyone(self):
return unfollow_everyone(self)
def approve_pending_follow_requests(self):
return approve_pending_follow_requests(self)
def reject_pending_follow_requests(self):
return reject_pending_follow_requests(self)
# direct
def send_message(self, text, user_ids, thread_id=None):
return send_message(self, text, user_ids, thread_id)
def send_messages(self, text, user_ids):
return send_messages(self, text, user_ids)
def send_media(self, media_id, user_ids, text=None, thread_id=None):
return send_media(self, media_id, user_ids, text, thread_id)
def send_medias(self, media_id, user_ids, text=None):
return send_medias(self, media_id, user_ids, text)
def send_hashtag(self, hashtag, user_ids, text='', thread_id=None):
return send_hashtag(self, hashtag, user_ids, text, thread_id)
def send_profile(self, profile_user_id, user_ids, text='', thread_id=None):
return send_profile(self, profile_user_id, user_ids, text, thread_id)
def send_like(self, user_ids, thread_id=None):
return send_like(self, user_ids, thread_id)
def send_photo(self, user_ids, filepath, thread_id=None):
return send_photo(self, user_ids, filepath, thread_id)
def approve_pending_thread_requests(self):
return approve_pending_thread_requests(self)
# delete
def delete_media(self, media_id):
return delete_media(self, media_id)
def delete_medias(self, medias):
return delete_medias(self, medias)
def delete_comment(self, media_id, comment_id):
return delete_comment(self, media_id, comment_id)
# archive
def archive(self, media_id, undo=False):
return archive(self, media_id, undo)
def unarchive(self, media_id):
return archive(self, media_id, True)
def archive_medias(self, medias):
return archive_medias(self, medias)
def unarchive_medias(self, medias):
return unarchive_medias(self, medias)
# comment
def comment(self, media_id, comment_text):
return comment(self, media_id, comment_text)
def reply_to_comment(self, media_id, comment_text, parent_comment_id):
return reply_to_comment(self, media_id, comment_text, parent_comment_id)
def comment_hashtag(self, hashtag, amount=None):
return comment_hashtag(self, hashtag, amount)
def comment_medias(self, medias):
return comment_medias(self, medias)
def comment_user(self, user_id, amount=None):
return comment_user(self, user_id, amount)
def comment_users(self, user_ids, ncomments=None):
return comment_users(self, user_ids, ncomments)
def comment_geotag(self, geotag):
return comment_geotag(self, geotag)
def is_commented(self, media_id):
return is_commented(self, media_id)
# block
def block(self, user_id):
return block(self, user_id)
def unblock(self, user_id):
return unblock(self, user_id)
def block_users(self, user_ids):
return block_users(self, user_ids)
def unblock_users(self, user_ids):
return unblock_users(self, user_ids)
def block_bots(self):
return block_bots(self)
# filter
def filter_medias(self, media_items, filtration=True, quiet=False, is_comment=False):
return filter_medias(self, media_items, filtration, quiet, is_comment)
def check_media(self, media):
return check_media(self, media)
def check_user(self, user, unfollowing=False):
return check_user(self, user, unfollowing)
def check_not_bot(self, user):
return check_not_bot(self, user)
# support
def check_if_file_exists(self, file_path, quiet=False):
return check_if_file_exists(file_path, quiet)
def extract_urls(self, text):
return extract_urls(text)
def read_list_from_file(self, file_path):
return read_list_from_file(file_path)
def console_print(self, text, color=None):
return console_print(self, text, color)
# stats
def save_user_stats(self, username, path=""):
return save_user_stats(self, username, path=path)
| {
"pile_set_name": "Github"
} |
<div role="tabpanel" class="tab-pane config-section" id="email">
<form method="POST" autocomplete="off" class="w-100">
<h5>Email Content</h5>
<small class="form-text text-muted">
Customize CTFd emails with <a href="https://docs.ctfd.io/docs/settings/emails/#email-content" target="_blank">predefined variables</a> and custom content
</small>
<ul class="nav nav-tabs mt-3" role="tablist">
<li class="nav-item active">
<a class="nav-link active" href="#confirmation-email-tab" role="tab" data-toggle="tab">
Confirmation
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#account-details-email-tab" role="tab" data-toggle="tab">Account Details</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#password-reset-email-tab" role="tab" data-toggle="tab">Password Reset</a>
</li>
</ul>
<div class="tab-content">
<div role="tabpanel" class="tab-pane active" id="confirmation-email-tab">
<div class="form-group">
<label class="pt-3">
Account Registration<br>
<small class="form-text text-muted">
Email sent to users after they've registered their account entirely
</small>
</label>
<div>
<label>
Subject
</label>
<input class="form-control" id='successful_registration_email_subject' name='successful_registration_email_subject' type='text'
value="{{ successful_registration_email_subject or ''}}">
<label>
Body
</label>
<textarea class="form-control" type="text" id="successful_registration_email_body" name="successful_registration_email_body"
rows="5">{{ successful_registration_email_body or '' }}</textarea>
</div>
</div>
<div class="form-group">
<label class="pt-3">
Account Confirmation<br>
<small class="form-text text-muted">
Email sent to users to confirm their account
</small>
</label>
<div>
<label>
Subject
</label>
<input class="form-control" id='verification_email_subject' name='verification_email_subject' type='text' value="{{ verification_email_subject or ''}}">
<label>
Body
</label>
<textarea class="form-control" type="text" id="verification_email_body" name="verification_email_body"
rows="5">{{ verification_email_body or '' }}</textarea>
</div>
</div>
</div>
<div role="tabpanel" class="tab-pane" id="account-details-email-tab">
<div class="form-group">
<label class="pt-3">
New Account Details<br>
<small class="form-text text-muted">
Email sent to new users (created by an admin) with their initial account details
</small>
</label>
<div>
<label>
Subject
</label>
<input class="form-control" id='user_creation_email_subject' name='user_creation_email_subject' type='text'
value="{{ user_creation_email_subject or ''}}">
<label>
Body
</label>
<textarea class="form-control" type="text" id="user_creation_email_body" name="user_creation_email_body"
rows="5">{{ user_creation_email_body or '' }}</textarea>
</div>
</div>
</div>
<div role="tabpanel" class="tab-pane" id="password-reset-email-tab">
<div class="form-group">
<label class="pt-3">
Password Reset Request<br>
<small class="form-text text-muted">
Email sent whent a user requests a password reset
</small>
</label>
<div>
<label>
Subject
</label>
<input class="form-control" id='password_reset_subject' name='password_reset_subject' type='text'
value="{{ password_reset_subject or ''}}">
<label>
Body
</label>
<textarea class="form-control" type="text" id="password_reset_body" name="password_reset_body"
rows="5">{{ password_reset_body or '' }}</textarea>
</div>
</div>
<div class="form-group">
<label class="pt-3">
Password Reset Confirmation<br>
<small class="form-text text-muted">
Email sent whent a user successfully resets their password
</small>
</label>
<div>
<label>
Subject
</label>
<input class="form-control" id='password_change_alert_subject' name='password_change_alert_subject' type='text'
value="{{ password_change_alert_subject or ''}}">
<label>
Body
</label>
<textarea class="form-control" type="text" id="password_change_alert_body" name="password_change_alert_body"
rows="5">{{ password_change_alert_body or '' }}</textarea>
</div>
</div>
</div>
</div>
<hr class="my-5">
<h5>Email Server</h5>
<small class="form-text text-muted">
Change the email server used by CTFd to send email
</small>
<ul class="nav nav-tabs my-3" role="tablist">
<li class="nav-item active">
<a class="nav-link active" href="#mailserver" role="tab" data-toggle="tab">Mail
Server</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#mailgun" role="tab" data-toggle="tab">Mailgun</a>
</li>
</ul>
<div class="form-group">
<label>
Mail From Address<br>
<small class="form-text text-muted">
Email address used to send email
</small>
</label>
<input class="form-control" id='mailfrom_addr' name='mailfrom_addr' type='text' {% if mailfrom_addr is defined and mailfrom_addr != None %}value="{{ mailfrom_addr }}"{% endif %}>
</div>
<div class="tab-content">
<div role="tabpanel" class="tab-pane active" id="mailserver">
<div class="form-group">
<label>
Mail Server Address<br>
<small class="form-text text-muted">
Mail Server Address
</small>
</label>
<input class="form-control" id='mail_server' name='mail_server' type='text'
{% if mail_server is defined and mail_server != None %}value="{{ mail_server }}"{% endif %}>
</div>
<div class="form-group">
<label>
Mail Server Port<br>
<small class="form-text text-muted">
Mail Server Port
</small>
</label>
<input class="form-control" id='mail_port' name='mail_port' type='text'
{% if mail_port is defined and mail_port != None %}value="{{ mail_port }}"{% endif %}>
</div>
<div class="form-check">
<label>
<input id="mail_useauth" name="mail_useauth" type="checkbox"
{% if mail_useauth %}checked{% endif %}>
Use Mail Server Username and Password
</label>
</div>
<div id="mail_username_password">
<div class="form-group">
<label>
Username<br>
<small class="form-text text-muted">
Mail Server Account Username
</small>
</label>
{% if mail_username is defined and mail_username != None %}
<label>
<sup class="form-text text-muted">A mail server username is currently set</sup>
</label>
{% endif %}
<input class="form-control" id='mail_username' name='mail_username' autocomplete='off' type='text'>
</div>
<div class="form-group">
<label for="mail_password">
Password<br>
<small class="form-text text-muted">
Mail Server Account Password
</small>
</label>
{% if mail_password is defined and mail_password != None %}
<label>
<sup class="form-text text-muted">An mail server password is currently set</sup>
</label>
{% endif %}
<input class="form-control" id='mail_password' name='mail_password' autocomplete='off' type='password'>
</div>
<sup>Uncheck setting and update to remove username and password</sup>
<br>
<br>
</div>
<div class="form-check">
<label>
<input id="mail_ssl" name="mail_ssl" type="checkbox" {% if mail_ssl %}checked{% endif %}>
SSL
</label>
</div>
<div class="form-check">
<label>
<input id="mail_tls" name="mail_tls" type="checkbox" {% if mail_tls %}checked{% endif %}>
TLS
</label>
</div>
</div>
<div role="tabpanel" class="tab-pane" id="mailgun">
<div class="alert alert-warning" role="alert">
Mailgun integration is deprecated! Please see your Mailgun account for SMTP credentials.
</div>
<div class="form-group">
<label>
Mailgun API Base URL<br>
<small class="form-text text-muted">
Mailgun API Base URL
</small>
</label>
<input class="form-control" id='mailgun_base_url' name='mailgun_base_url' type='text'
{% if mailgun_base_url is defined and mailgun_base_url != None %}value="{{ mailgun_base_url }}"{% endif %}>
</div>
<div class="form-group">
<label>
Mailgun API Key<br>
<small class="form-text text-muted">
Mailgun API Key
</small>
</label>
<input class="form-control" id='mailgun_api_key' name='mailgun_api_key' type='text'
{% if mailgun_api_key is defined and mailgun_api_key != None %}value="{{ mailgun_api_key }}"{% endif %}>
</div>
</div>
</div>
<button type="submit" class="btn btn-md btn-primary float-right">Update</button>
</form>
</div> | {
"pile_set_name": "Github"
} |
// Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Linq;
using System.Linq.Expressions;
namespace NuGetGallery.OData.QueryInterceptors
{
public class CountInterceptor : ExpressionVisitor
{
private readonly long _count;
public CountInterceptor(long count)
{
_count = count;
}
protected override Expression VisitMethodCall(MethodCallExpression node)
{
var method = node.Method;
if ((method.DeclaringType == typeof(Queryable)) && method.Name.Equals("LongCount", StringComparison.Ordinal))
{
return Expression.Constant(_count);
}
return base.VisitMethodCall(node);
}
}
} | {
"pile_set_name": "Github"
} |
# ================== General ==================
Id: ApacheFriends.Xampp
Publisher: Apache Friends
Name: Xampp
AppMoniker: xampp
Description: XAMPP is an easy to install Apache distribution containing MariaDB, PHP and Perl.
Homepage: https://www.apachefriends.org
Tags: Apache, MySQL, MariaDB, PHP, Perl
License: GNU General Public Licence
LicenseUrl: https://www.apachefriends.org/about.html
# ================= Installer =================
Version: 7.4.6
InstallerType: exe
Installers:
- Arch: x64
Url: https://www.apachefriends.org/xampp-files/7.4.6/xampp-windows-x64-7.4.6-0-VC15-installer.exe
Sha256: 514EC454775A243162FD7126618000B4F65BEC13B55BF4781B19C7C4699C63D7
Switches:
SilentWithProgress: "--mode unattended --launchapps 0"
Silent: "--mode unattended --launchapps 0"
# Auto Generated by [email protected] on Wed, 10 Jun 2020 17:09:43 +0000
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>jQuery UI Effects Demos</title>
<link rel="stylesheet" href="../demos.css">
</head>
<body>
<div class="demos-nav">
<h4>Examples</h4>
<ul>
<li class="demo-config-on"><a href="default.html">Default functionality</a></li>
</ul>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/*
* libjingle
* Copyright 2004--2010, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
#define AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
namespace webrtc {
namespace adm_linux_alsa {
// The ALSA symbols we need, as an X-Macro list.
// This list must contain precisely every libasound function that is used in
// alsasoundsystem.cc.
#define ALSA_SYMBOLS_LIST \
X(snd_device_name_free_hint) \
X(snd_device_name_get_hint) \
X(snd_device_name_hint) \
X(snd_pcm_avail_update) \
X(snd_pcm_close) \
X(snd_pcm_delay) \
X(snd_pcm_drop) \
X(snd_pcm_open) \
X(snd_pcm_prepare) \
X(snd_pcm_readi) \
X(snd_pcm_recover) \
X(snd_pcm_resume) \
X(snd_pcm_reset) \
X(snd_pcm_state) \
X(snd_pcm_set_params) \
X(snd_pcm_get_params) \
X(snd_pcm_start) \
X(snd_pcm_stream) \
X(snd_pcm_frames_to_bytes) \
X(snd_pcm_bytes_to_frames) \
X(snd_pcm_wait) \
X(snd_pcm_writei) \
X(snd_pcm_info_get_class) \
X(snd_pcm_info_get_subdevices_avail) \
X(snd_pcm_info_get_subdevice_name) \
X(snd_pcm_info_set_subdevice) \
X(snd_pcm_info_get_id) \
X(snd_pcm_info_set_device) \
X(snd_pcm_info_set_stream) \
X(snd_pcm_info_get_name) \
X(snd_pcm_info_get_subdevices_count) \
X(snd_pcm_info_sizeof) \
X(snd_pcm_hw_params) \
X(snd_pcm_hw_params_malloc) \
X(snd_pcm_hw_params_free) \
X(snd_pcm_hw_params_any) \
X(snd_pcm_hw_params_set_access) \
X(snd_pcm_hw_params_set_format) \
X(snd_pcm_hw_params_set_channels) \
X(snd_pcm_hw_params_set_rate_near) \
X(snd_pcm_hw_params_set_buffer_size_near) \
X(snd_card_next) \
X(snd_card_get_name) \
X(snd_config_update) \
X(snd_config_copy) \
X(snd_config_get_id) \
X(snd_ctl_open) \
X(snd_ctl_close) \
X(snd_ctl_card_info) \
X(snd_ctl_card_info_sizeof) \
X(snd_ctl_card_info_get_id) \
X(snd_ctl_card_info_get_name) \
X(snd_ctl_pcm_next_device) \
X(snd_ctl_pcm_info) \
X(snd_mixer_load) \
X(snd_mixer_free) \
X(snd_mixer_detach) \
X(snd_mixer_close) \
X(snd_mixer_open) \
X(snd_mixer_attach) \
X(snd_mixer_first_elem) \
X(snd_mixer_elem_next) \
X(snd_mixer_selem_get_name) \
X(snd_mixer_selem_is_active) \
X(snd_mixer_selem_register) \
X(snd_mixer_selem_set_playback_volume_all) \
X(snd_mixer_selem_get_playback_volume) \
X(snd_mixer_selem_has_playback_volume) \
X(snd_mixer_selem_get_playback_volume_range) \
X(snd_mixer_selem_has_playback_switch) \
X(snd_mixer_selem_get_playback_switch) \
X(snd_mixer_selem_set_playback_switch_all) \
X(snd_mixer_selem_has_capture_switch) \
X(snd_mixer_selem_get_capture_switch) \
X(snd_mixer_selem_set_capture_switch_all) \
X(snd_mixer_selem_has_capture_volume) \
X(snd_mixer_selem_set_capture_volume_all) \
X(snd_mixer_selem_get_capture_volume) \
X(snd_mixer_selem_get_capture_volume_range) \
X(snd_dlopen) \
X(snd_dlclose) \
X(snd_config) \
X(snd_config_search) \
X(snd_config_get_string) \
X(snd_config_search_definition) \
X(snd_config_get_type) \
X(snd_config_delete) \
X(snd_config_iterator_entry) \
X(snd_config_iterator_first) \
X(snd_config_iterator_next) \
X(snd_config_iterator_end) \
X(snd_config_delete_compound_members) \
X(snd_config_get_integer) \
X(snd_config_get_bool) \
X(snd_dlsym) \
X(snd_strerror) \
X(snd_lib_error) \
X(snd_lib_error_set_handler)
LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable)
#define X(sym) LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym)
ALSA_SYMBOLS_LIST
#undef X
LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable)
} // namespace adm_linux_alsa
} // namespace webrtc
#endif // AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="search">Nadi</string>
<string name="advancedSearch">Nadi tasurift</string>
</resources>
| {
"pile_set_name": "Github"
} |
/*Domain class of table m_prj_kanban_board*/
package com.mycollab.module.project.domain;
import com.mycollab.core.arguments.ValuedBean;
import com.mycollab.db.metadata.Column;
import com.mycollab.db.metadata.Table;
import java.time.LocalDateTime;
import javax.validation.constraints.NotEmpty;
import javax.validation.constraints.NotNull;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.ibatis.type.Alias;
import org.hibernate.validator.constraints.Length;
@SuppressWarnings("ucd")
@Table("m_prj_kanban_board")
@Alias("KanbanBoard")
public class KanbanBoard extends ValuedBean {
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.id
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@NotNull
@Column("id")
private Integer id;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.name
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@NotEmpty
@Length(max=255, message="Field value is too long")
@Column("name")
private String name;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.projectId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@NotNull
@Column("projectId")
private Integer projectid;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.sAccountId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@NotNull
@Column("sAccountId")
private Integer saccountid;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.lead
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@Length(max=45, message="Field value is too long")
@Column("lead")
private String lead;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.createdTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@Column("createdTime")
private LocalDateTime createdtime;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column m_prj_kanban_board.lastUpdatedTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
@Column("lastUpdatedTime")
private LocalDateTime lastupdatedtime;
private static final long serialVersionUID = 1;
public final boolean equals(Object obj) {
if (obj == null) { return false;}
if (obj == this) { return true;}
if (!obj.getClass().isAssignableFrom(getClass())) { return false;}
KanbanBoard item = (KanbanBoard)obj;
return new EqualsBuilder().append(id, item.id).build();
}
public final int hashCode() {
return new HashCodeBuilder(631, 1221).append(id).build();
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.id
*
* @return the value of m_prj_kanban_board.id
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public Integer getId() {
return id;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withId(Integer id) {
this.setId(id);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.id
*
* @param id the value for m_prj_kanban_board.id
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setId(Integer id) {
this.id = id;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.name
*
* @return the value of m_prj_kanban_board.name
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public String getName() {
return name;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withName(String name) {
this.setName(name);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.name
*
* @param name the value for m_prj_kanban_board.name
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setName(String name) {
this.name = name;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.projectId
*
* @return the value of m_prj_kanban_board.projectId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public Integer getProjectid() {
return projectid;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withProjectid(Integer projectid) {
this.setProjectid(projectid);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.projectId
*
* @param projectid the value for m_prj_kanban_board.projectId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setProjectid(Integer projectid) {
this.projectid = projectid;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.sAccountId
*
* @return the value of m_prj_kanban_board.sAccountId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public Integer getSaccountid() {
return saccountid;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withSaccountid(Integer saccountid) {
this.setSaccountid(saccountid);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.sAccountId
*
* @param saccountid the value for m_prj_kanban_board.sAccountId
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setSaccountid(Integer saccountid) {
this.saccountid = saccountid;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.lead
*
* @return the value of m_prj_kanban_board.lead
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public String getLead() {
return lead;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withLead(String lead) {
this.setLead(lead);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.lead
*
* @param lead the value for m_prj_kanban_board.lead
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setLead(String lead) {
this.lead = lead;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.createdTime
*
* @return the value of m_prj_kanban_board.createdTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public LocalDateTime getCreatedtime() {
return createdtime;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withCreatedtime(LocalDateTime createdtime) {
this.setCreatedtime(createdtime);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.createdTime
*
* @param createdtime the value for m_prj_kanban_board.createdTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setCreatedtime(LocalDateTime createdtime) {
this.createdtime = createdtime;
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column m_prj_kanban_board.lastUpdatedTime
*
* @return the value of m_prj_kanban_board.lastUpdatedTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public LocalDateTime getLastupdatedtime() {
return lastupdatedtime;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table m_prj_kanban_board
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public KanbanBoard withLastupdatedtime(LocalDateTime lastupdatedtime) {
this.setLastupdatedtime(lastupdatedtime);
return this;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column m_prj_kanban_board.lastUpdatedTime
*
* @param lastupdatedtime the value for m_prj_kanban_board.lastUpdatedTime
*
* @mbg.generated Sat Apr 20 17:20:23 CDT 2019
*/
public void setLastupdatedtime(LocalDateTime lastupdatedtime) {
this.lastupdatedtime = lastupdatedtime;
}
public enum Field {
id,
name,
projectid,
saccountid,
lead,
createdtime,
lastupdatedtime;
public boolean equalTo(Object value) {
return name().equals(value);
}
}
} | {
"pile_set_name": "Github"
} |
import angular from 'angular'
import Vue from 'vue'
import '../../src'
import '../../src/plugins'
import Tags from './tags.vue'
angular
.module('vue.components', ['ngVue', 'ngVue.plugins'])
.config(function ($ngVueProvider) {
$ngVueProvider.filters.register(['uppercase'])
})
.filter('uppercase', function () {
return string => string.toUpperCase()
})
.controller('MainController', function () {
this.person = {
firstName: 'The',
lastName: 'World',
description:
'ngVue helps you use Vue components in your angular application ' +
'so that you are able to create a faster and reactive web interfaces.'
}
})
.value('TagsComponent', Tags)
.value(
'HelloComponent',
Vue.component('hello-component', {
props: {
firstName: String,
lastName: String,
description: String
},
render (h) {
const uppercase = Vue.filter('uppercase')
return (
<div class="card blue-grey darken-1">
<div class="card-content white-text">
<span class="card-title">
Hi, {this.firstName} {this.lastName}
</span>
<p>{uppercase(this.description)}</p>
</div>
<div class="card-action">
<a href="https://vuejs.org/guide/overview.html">Vue.js</a>
</div>
</div>
)
}
})
)
| {
"pile_set_name": "Github"
} |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.dense_shape],
lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
def register_session_run_conversion_functions(tensor_type, fetch_function,
feed_function=None, feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds
of one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError(
'%s has already been registered so ignore it.', tensor_type)
return
_REGISTERED_EXPANSIONS.insert(0,
(tensor_type, fetch_function, feed_function, feed_function_for_partial_run))
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if self._fetch_type == list:
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch)
for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d)' % (self.name, self.device_type,
self.memory_limit_bytes,)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s'
% type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
# pylint: disable=protected-access
# We cache _USE_C_API's value because some test cases will create a session
# with _USE_C_API = False but set it back to True before calling close().
self._created_with_new_api = ops._USE_C_API
# pylint: enable=protected-access
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSession(self._graph._c_graph, opts,
status)
# pylint: enable=protected-access
else:
self._session = tf_session.TF_NewDeprecatedSession(opts, status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Each element in the list has the following properties:
- `name`: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
- `device_type`: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
- `memory_limit`: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
raw_device_list = tf_session.TF_SessionListDevices(
self._session, status)
else:
raw_device_list = tf_session.TF_DeprecatedSessionListDevices(
self._session, status)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i, status)
device_type = tf_session.TF_DeviceListType(raw_device_list, i, status)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i, status)
device_list.append(_DeviceAttributes(name, device_type, memory))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._created_with_new_api:
if self._session and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseSession(self._session, status)
else:
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseDeprecatedSession(self._session, status)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
status = c_api_util.ScopedTFStatus()
if self._created_with_new_api:
tf_session.TF_DeleteSession(self._session, status)
else:
tf_session.TF_DeleteDeprecatedSession(self._session, status)
except AttributeError:
# At shutdown, `c_api_util` or `tf_session` may have been garbage
# collected, causing the above method calls to fail. In this case,
# silently leak since the program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
@{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use @{tf.get_default_session}.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of @{tf.get_default_graph},
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* An @{tf.Operation}.
The corresponding fetched value will be `None`.
* A @{tf.Tensor}.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A @{tf.SparseTensor}.
The corresponding fetched value will be a
@{tf.SparseTensorValue}
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a @{tf.Tensor}, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
@{tf.placeholder}, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
@{tf.SparseTensor},
the value should be a
@{tf.SparseTensorValue}.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
if self._created_with_new_api:
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
else:
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionPRunSetup_wrapper(
session, feed_list, fetch_list, target_list, status)
else:
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list, status)
if self._created_with_new_api:
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
else:
final_fetches = _name_list(fetch_handler.fetches())
final_targets = _name_list(fetch_handler.targets())
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val,
int) and subfeed_dtype(subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' is not'
' compatible with Tensor type ' + str(subfeed_dtype) + '.'
' Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self,
fetches,
feed_list=None,
accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
@{tf.Session.run} for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See @{tf.Session.run}
for details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See
@{tf.Session.run} for details of the allowable feed key types.
accept_options: (Optional.) Iff `True`, the returned `Callable` will be
able to accept @{tf.RunOptions} and @{tf.RunMetadata} as optional
keyword arguments `options` and `run_metadata`, respectively, with
the same syntax and semantics as @{tf.Session.run}, which is useful
for certain use cases (profiling and debugging) but will result in
measurable slowdown of the `Callable`'s performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to @{tf.Session.run}.
"""
assert not self._created_with_new_api, ('session.make_callable() doesn\'t '
'work with C API')
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {feed: feed_val
for feed, feed_val in zip(feed_list, feed_args)}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
fetch_list_as_strings = _name_list(fetch_handler.fetches())
target_list_as_strings = _name_list(fetch_handler.targets())
def _callable_template_with_options_and_metadata(
fetch_list_as_strings,
target_list_as_strings,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(
self._session, options_ptr, {}, fetch_list_as_strings,
target_list_as_strings, status, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(
_callable_template_with_options_and_metadata, fetch_list_as_strings,
target_list_as_strings, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list_as_strings
assert len(target_list_as_strings) == 1
def _single_operation_run():
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_Run(self._session, None, {}, [],
target_list_as_strings, status, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list_as_strings) == 1
assert not target_list_as_strings
def _single_tensor_run():
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(self._session, None, {},
fetch_list_as_strings, [], status, None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(self._session, None, {},
fetch_list_as_strings,
target_list_as_strings, status, None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict,
options, run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
if self._created_with_new_api:
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
else:
feeds = dict((compat.as_bytes(t.name), v) for t, v in feed_dict.items())
fetches = _name_list(fetch_list)
targets = _name_list(target_list)
def _run_fn(session, feed_dict, fetch_list, target_list, options,
run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionRun_wrapper(
session, options, feed_dict, fetch_list, target_list,
run_metadata, status)
else:
return tf_session.TF_Run(session, options,
feed_dict, fetch_list, target_list,
status, run_metadata)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionPRun_wrapper(session, handle, feed_dict,
fetch_list, status)
else:
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,
status)
if handle is None:
return self._do_call(_run_fn, self._session, feeds, fetches, targets,
options, run_metadata)
else:
return self._do_call(_prun_fn, self._session, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
# Nothing to do if we're using the new session interface
# TODO(skyewm): remove this function altogether eventually
if self._created_with_new_api: return
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
# pylint: disable=protected-access
graph_def, self._current_version = self._graph._as_graph_def(
from_version=self._current_version,
add_shapes=self._add_shapes)
# pylint: enable=protected-access
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
self._opened = True
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(self.graph,
deleter_key,
tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
@{tf.Variable}, @{tf.QueueBase},
and @{tf.ReaderBase}. It is important to release
these resources when they are no longer required. To do this, either
invoke the @{tf.Session.close} method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See
@{$distributed$Distributed TensorFlow}
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods @{tf.Tensor.eval}
and @{tf.Operation.run}
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2006, 2007, 2008 Apple Computer, Inc. All rights reserved.
* Copyright (C) 2007 Alp Toker <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "Gradient.h"
#include "GraphicsContext.h"
#include "PlatformContextCairo.h"
#include <cairo.h>
namespace WebCore {
void Gradient::platformDestroy()
{
if (m_gradient) {
cairo_pattern_destroy(m_gradient);
m_gradient = 0;
}
}
cairo_pattern_t* Gradient::platformGradient()
{
return platformGradient(1);
}
cairo_pattern_t* Gradient::platformGradient(float globalAlpha)
{
if (m_gradient && m_platformGradientAlpha == globalAlpha)
return m_gradient;
platformDestroy();
m_platformGradientAlpha = globalAlpha;
if (m_radial)
m_gradient = cairo_pattern_create_radial(m_p0.x(), m_p0.y(), m_r0, m_p1.x(), m_p1.y(), m_r1);
else
m_gradient = cairo_pattern_create_linear(m_p0.x(), m_p0.y(), m_p1.x(), m_p1.y());
Vector<ColorStop>::iterator stopIterator = m_stops.begin();
while (stopIterator != m_stops.end()) {
#if !PLATFORM(JS) || USE(ACCELERATED_COMPOSITING)
cairo_pattern_add_color_stop_rgba(m_gradient, stopIterator->stop,
stopIterator->red, stopIterator->green, stopIterator->blue,
stopIterator->alpha * globalAlpha);
#else
// Swap the RGBA channels, canvas painting needs to be ARGB
// but since the channels come out backwards as ABGR we just
// swap the red and blue to get the same channel format without
// haveing to go through all of the pixels and swap them post-blit.
cairo_pattern_add_color_stop_rgba(m_gradient, stopIterator->stop,
stopIterator->blue, stopIterator->green, stopIterator->red,
stopIterator->alpha * globalAlpha);
#endif
++stopIterator;
}
switch (m_spreadMethod) {
case SpreadMethodPad:
cairo_pattern_set_extend(m_gradient, CAIRO_EXTEND_PAD);
break;
case SpreadMethodReflect:
cairo_pattern_set_extend(m_gradient, CAIRO_EXTEND_REFLECT);
break;
case SpreadMethodRepeat:
cairo_pattern_set_extend(m_gradient, CAIRO_EXTEND_REPEAT);
break;
}
cairo_matrix_t matrix = m_gradientSpaceTransformation;
cairo_matrix_invert(&matrix);
cairo_pattern_set_matrix(m_gradient, &matrix);
return m_gradient;
}
void Gradient::setPlatformGradientSpaceTransform(const AffineTransform& gradientSpaceTransformation)
{
if (m_gradient) {
cairo_matrix_t matrix = gradientSpaceTransformation;
cairo_matrix_invert(&matrix);
cairo_pattern_set_matrix(m_gradient, &matrix);
}
}
void Gradient::fill(GraphicsContext* context, const FloatRect& rect)
{
cairo_t* cr = context->platformContext()->cr();
context->save();
cairo_set_source(cr, platformGradient());
cairo_rectangle(cr, rect.x(), rect.y(), rect.width(), rect.height());
cairo_fill(cr);
context->restore();
}
} //namespace
| {
"pile_set_name": "Github"
} |
namespace Microsoft.eShopWeb.Infrastructure
{
using eShopWeb.Models;
using Microsoft.AspNetCore.Builder;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
public class CatalogContextSeed
{
public static async Task SeedAsync(IApplicationBuilder applicationBuilder, ILoggerFactory loggerFactory, int? retry = 0)
{
int retryForAvaiability = retry.Value;
try
{
var context = (CatalogContext)applicationBuilder
.ApplicationServices.GetService(typeof(CatalogContext));
context.Database.Migrate();
if (!context.CatalogBrands.Any())
{
context.CatalogBrands.AddRange(
GetPreconfiguredCatalogBrands());
await context.SaveChangesAsync();
}
if (!context.CatalogTypes.Any())
{
context.CatalogTypes.AddRange(
GetPreconfiguredCatalogTypes());
await context.SaveChangesAsync();
}
if (!context.CatalogItems.Any())
{
context.CatalogItems.AddRange(
GetPreconfiguredItems());
await context.SaveChangesAsync();
}
}
catch (Exception ex)
{
if (retryForAvaiability < 10)
{
retryForAvaiability++;
var log = loggerFactory.CreateLogger("catalog seed");
log.LogError(ex.Message);
await SeedAsync(applicationBuilder, loggerFactory, retryForAvaiability);
}
}
}
static IEnumerable<CatalogBrand> GetPreconfiguredCatalogBrands()
{
return new List<CatalogBrand>()
{
new CatalogBrand() { Brand = "Azure"},
new CatalogBrand() { Brand = ".NET" },
new CatalogBrand() { Brand = "Visual Studio" },
new CatalogBrand() { Brand = "SQL Server" },
new CatalogBrand() { Brand = "Other" }
};
}
static IEnumerable<CatalogType> GetPreconfiguredCatalogTypes()
{
return new List<CatalogType>()
{
new CatalogType() { Type = "Mug"},
new CatalogType() { Type = "T-Shirt" },
new CatalogType() { Type = "Sheet" },
new CatalogType() { Type = "USB Memory Stick" }
};
}
static IEnumerable<CatalogItem> GetPreconfiguredItems()
{
return new List<CatalogItem>()
{
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=2, Description = ".NET Bot Black Sweatshirt", Name = ".NET Bot Black Sweatshirt", Price = 19.5M, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/1" },
new CatalogItem() { CatalogTypeId=1,CatalogBrandId=2, Description = ".NET Black & White Mug", Name = ".NET Black & White Mug", Price= 8.50M, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/2" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=5, Description = "Prism White T-Shirt", Name = "Prism White T-Shirt", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/3" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=2, Description = ".NET Foundation Sweatshirt", Name = ".NET Foundation Sweatshirt", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/4" },
new CatalogItem() { CatalogTypeId=3,CatalogBrandId=5, Description = "Roslyn Red Sheet", Name = "Roslyn Red Sheet", Price = 8.5M, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/5" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=2, Description = ".NET Blue Sweatshirt", Name = ".NET Blue Sweatshirt", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/6" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=5, Description = "Roslyn Red T-Shirt", Name = "Roslyn Red T-Shirt", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/7" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=5, Description = "Kudu Purple Sweatshirt", Name = "Kudu Purple Sweatshirt", Price = 8.5M, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/8" },
new CatalogItem() { CatalogTypeId=1,CatalogBrandId=5, Description = "Cup<T> White Mug", Name = "Cup<T> White Mug", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/9" },
new CatalogItem() { CatalogTypeId=3,CatalogBrandId=2, Description = ".NET Foundation Sheet", Name = ".NET Foundation Sheet", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/10" },
new CatalogItem() { CatalogTypeId=3,CatalogBrandId=2, Description = "Cup<T> Sheet", Name = "Cup<T> Sheet", Price = 8.5M, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/11" },
new CatalogItem() { CatalogTypeId=2,CatalogBrandId=5, Description = "Prism White TShirt", Name = "Prism White TShirt", Price = 12, PictureUri = "http://catalogbaseurltobereplaced/catalog/pic/12" }
};
}
}
}
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2014-2015 Kohei Takahashi
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef FUSION_SET_11062014_1726
#define FUSION_SET_11062014_1726
#include <boost/fusion/support/config.hpp>
#include <boost/fusion/container/set/set_fwd.hpp>
///////////////////////////////////////////////////////////////////////////////
// Without variadics, we will use the PP version
///////////////////////////////////////////////////////////////////////////////
#if !defined(BOOST_FUSION_HAS_VARIADIC_SET)
# include <boost/fusion/container/set/detail/cpp03/set.hpp>
#else
///////////////////////////////////////////////////////////////////////////////
// C++11 interface
///////////////////////////////////////////////////////////////////////////////
#include <boost/fusion/support/detail/access.hpp>
#include <boost/fusion/support/void.hpp>
#include <boost/fusion/support/detail/enabler.hpp>
#include <boost/fusion/support/sequence_base.hpp>
#include <boost/fusion/support/category_of.hpp>
#include <boost/fusion/support/is_sequence.hpp>
#include <boost/fusion/support/detail/is_same_size.hpp>
#include <boost/fusion/container/vector/vector.hpp>
#include <boost/fusion/container/set/detail/begin_impl.hpp>
#include <boost/fusion/container/set/detail/end_impl.hpp>
#include <boost/fusion/container/set/detail/value_of_impl.hpp>
#include <boost/fusion/container/set/detail/deref_data_impl.hpp>
#include <boost/fusion/container/set/detail/deref_impl.hpp>
#include <boost/fusion/container/set/detail/key_of_impl.hpp>
#include <boost/fusion/container/set/detail/value_of_data_impl.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/core/enable_if.hpp>
namespace boost { namespace fusion
{
struct fusion_sequence_tag;
template <>
struct set<> : sequence_base<set<> >
{
struct category : forward_traversal_tag, associative_tag {};
typedef set_tag fusion_tag;
typedef fusion_sequence_tag tag; // this gets picked up by MPL
typedef mpl::false_ is_view;
typedef vector<> storage_type;
typedef storage_type::size size;
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
set()
: data() {}
template <typename Sequence>
BOOST_FUSION_GPU_ENABLED
set(Sequence const& rhs,
typename enable_if<traits::is_sequence<Sequence>, detail::enabler_>::type = detail::enabler,
typename enable_if<detail::is_same_size<Sequence, storage_type>, detail::enabler_>::type = detail::enabler)
: data(rhs) {}
template <typename T>
BOOST_CXX14_CONSTEXPR BOOST_FUSION_GPU_ENABLED
set&
operator=(T const& rhs)
{
data = rhs;
return *this;
}
BOOST_CXX14_CONSTEXPR BOOST_FUSION_GPU_ENABLED
storage_type& get_data() { return data; }
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
storage_type const& get_data() const { return data; }
private:
storage_type data;
};
template <typename ...T>
struct set : sequence_base<set<T...> >
{
struct category : forward_traversal_tag, associative_tag {};
typedef set_tag fusion_tag;
typedef fusion_sequence_tag tag; // this gets picked up by MPL
typedef mpl::false_ is_view;
typedef vector<T...> storage_type;
typedef typename storage_type::size size;
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
set()
: data() {}
template <typename Sequence>
BOOST_FUSION_GPU_ENABLED
set(Sequence&& rhs,
typename enable_if<traits::is_sequence<Sequence>, detail::enabler_>::type = detail::enabler,
typename enable_if<detail::is_same_size<Sequence, storage_type>, detail::enabler_>::type = detail::enabler)
: data(std::forward<Sequence>(rhs)) {}
template <typename ...U>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
explicit
set(U&& ...args)
: data(std::forward<U>(args)...) {}
template <typename U>
BOOST_CXX14_CONSTEXPR BOOST_FUSION_GPU_ENABLED
set&
operator=(U&& rhs)
{
data = std::forward<U>(rhs);
return *this;
}
BOOST_CXX14_CONSTEXPR BOOST_FUSION_GPU_ENABLED
storage_type& get_data() { return data; }
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
storage_type const& get_data() const { return data; }
private:
storage_type data;
};
}}
#endif
#endif
| {
"pile_set_name": "Github"
} |
/*
OOJSFrameCallbacks.m
Copyright (C) 2011-2013 Jens Ayton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#import "OOJSFrameCallbacks.h"
#import "OOJSEngineTimeManagement.h"
#import "OOCollectionExtractors.h"
/*
By default, tracking IDs are scrambled to discourage people from trying to
be clever or making assumptions about them. If DEBUG_FCB_SIMPLE_TRACKING_IDS
is non-zero, tracking IDs starting from 1 and rising monotonously are used
instead. Additionally, the next ID is reset to 1 when all frame callbacks
are removed.
*/
#ifndef DEBUG_FCB_SIMPLE_TRACKING_IDS
#define DEBUG_FCB_SIMPLE_TRACKING_IDS 0
#endif
#ifndef DEBUG_FCB_VERBOSE_LOGGING
#define DEBUG_FCB_VERBOSE_LOGGING 0
#endif
#if defined (NDEBUG) && DEBUG_FCB_SIMPLE_TRACKING_IDS
#error Deployment builds may not be built with DEBUG_FCB_SIMPLE_TRACKING_IDS.
#endif
#if DEBUG_FCB_VERBOSE_LOGGING
#define FCBLog OOLog
#define FCBLogIndentIf OOLogIndentIf
#define FCBLogOutdentIf OOLogOutdentIf
#else
#define FCBLog(...) do {} while (0)
#define FCBLogIndentIf(key) do {} while (0)
#define FCBLogOutdentIf(key) do {} while (0)
#endif
enum
{
kMinCount = 16,
#if DEBUG_FCB_SIMPLE_TRACKING_IDS
kIDScrambleMask = 0,
kIDIncrement = 1
#else
kIDScrambleMask = 0x2315EB16, // Just a random number.
kIDIncrement = 992699 // A large prime number, to produce a non-obvious sequence which still uses all 2^32 values.
#endif
};
typedef struct
{
jsval callback;
uint32 trackingID;
uint32 _padding;
} CallbackEntry;
static CallbackEntry *sCallbacks;
static NSUInteger sCount; // Number of slots in use.
static NSUInteger sSpace; // Number of slots allocated.
static NSUInteger sHighWaterMark; // Number of slots which are GC roots.
static NSMutableArray *sDeferredOps; // Deferred adds/removes while running.
static uint32 sNextID;
static BOOL sRunning;
// Methods
static JSBool GlobalAddFrameCallback(JSContext *context, uintN argc, jsval *vp);
static JSBool GlobalRemoveFrameCallback(JSContext *context, uintN argc, jsval *vp);
static JSBool GlobalIsValidFrameCallback(JSContext *context, uintN argc, jsval *vp);
// Internals
static BOOL AddCallback(JSContext *context, jsval callback, uint32 trackingID, NSString **errorString);
static BOOL GrowCallbackList(JSContext *context, NSString **errorString);
static BOOL GetIndexForTrackingID(uint32 trackingID, NSUInteger *outIndex);
static BOOL RemoveCallbackWithTrackingID(JSContext *context, uint32 trackingID);
static void RemoveCallbackAtIndex(JSContext *context, NSUInteger index);
static void QueueDeferredOperation(NSString *opType, uint32 trackingID, OOJSValue *value);
static void RunDeferredOperations(JSContext *context);
// MARK: Public
void InitOOJSFrameCallbacks(JSContext *context, JSObject *global)
{
JS_DefineFunction(context, global, "addFrameCallback", GlobalAddFrameCallback, 1, OOJS_METHOD_READONLY);
JS_DefineFunction(context, global, "removeFrameCallback", GlobalRemoveFrameCallback, 1, OOJS_METHOD_READONLY);
JS_DefineFunction(context, global, "isValidFrameCallback", GlobalIsValidFrameCallback, 1, OOJS_METHOD_READONLY);
#if DEBUG_FCB_SIMPLE_TRACKING_IDS
sNextID = 1;
#else
// Set randomish initial ID to catch bad habits.
sNextID = [[NSDate date] timeIntervalSinceReferenceDate];
#endif
}
void OOJSFrameCallbacksInvoke(OOTimeDelta inDeltaT)
{
NSCAssert1(!sRunning, @"%s cannot be called while frame callbacks are running.", __PRETTY_FUNCTION__);
if (sCount != 0)
{
const OOTimeDelta delta = inDeltaT * [UNIVERSE timeAccelerationFactor];
JSContext *context = OOJSAcquireContext();
jsval deltaVal, result;
NSUInteger i;
if (EXPECT(JS_NewNumberValue(context, delta, &deltaVal)))
{
// Block mutations.
sRunning = YES;
/*
The watchdog timer only fires once per second in deployment builds,
but in testrelease builds at least we can keep them on a short leash.
*/
OOJSStartTimeLimiterWithTimeLimit(0.1);
for (i = 0; i < sCount; i++)
{
// TODO: remove out of scope callbacks - post MNSR!
JS_CallFunctionValue(context, NULL, sCallbacks[i].callback, 1, &deltaVal, &result);
JS_ReportPendingException(context);
}
OOJSStopTimeLimiter();
sRunning = NO;
if (EXPECT_NOT(sDeferredOps != NULL))
{
RunDeferredOperations(context);
DESTROY(sDeferredOps);
}
}
OOJSRelinquishContext(context);
}
}
void OOJSFrameCallbacksRemoveAll(void)
{
NSCAssert1(!sRunning, @"%s cannot be called while frame callbacks are running.", __PRETTY_FUNCTION__);
if (sCount != 0)
{
JSContext *context = OOJSAcquireContext();
while (sCount != 0) RemoveCallbackAtIndex(context, sCount - 1);
OOJSRelinquishContext(context);
}
}
// MARK: Methods
// addFrameCallback(callback : Function) : Number
static JSBool GlobalAddFrameCallback(JSContext *context, uintN argc, jsval *vp)
{
OOJS_NATIVE_ENTER(context)
// Get callback argument and verify that it's a function.
jsval callback = OOJS_ARGV[0];
if (EXPECT_NOT(argc < 1 || !OOJSValueIsFunction(context, callback)))
{
OOJSReportBadArguments(context, nil, @"addFrameCallback", MIN(argc, 1U), OOJS_ARGV, nil, @"function");
return NO;
}
// Assign a tracking ID.
uint32 trackingID = sNextID ^ kIDScrambleMask;
sNextID += kIDIncrement;
if (EXPECT(!sRunning))
{
// Add to list immediately.
NSString *errorString = nil;
if (EXPECT_NOT(!AddCallback(context, callback, trackingID, &errorString)))
{
OOJSReportError(context, @"%@", errorString);
return NO;
}
}
else
{
// Defer mutations during callback invocation.
FCBLog(@"script.frameCallback.debug.add.deferred", @"Deferring addition of frame callback with tracking ID %u.", trackingID);
QueueDeferredOperation(@"add", trackingID, [OOJSValue valueWithJSValue:callback inContext:context]);
}
OOJS_RETURN_INT(trackingID);
OOJS_NATIVE_EXIT
}
// removeFrameCallback(trackingID : Number)
static JSBool GlobalRemoveFrameCallback(JSContext *context, uintN argc, jsval *vp)
{
OOJS_NATIVE_ENTER(context)
// Get tracking ID argument.
uint32 trackingID;
if (EXPECT_NOT(argc < 1 || !JS_ValueToECMAUint32(context, OOJS_ARGV[0], &trackingID)))
{
OOJSReportBadArguments(context, nil, @"removeFrameCallback", MIN(argc, 1U), OOJS_ARGV, nil, @"frame callback tracking ID");
return NO;
}
if (EXPECT(!sRunning))
{
// Remove it.
if (EXPECT_NOT(!RemoveCallbackWithTrackingID(context, trackingID)))
{
OOJSReportWarning(context, @"removeFrameCallback(): invalid tracking ID.");
}
}
else
{
// Defer mutations during callback invocation.
FCBLog(@"script.frameCallback.debug.remove.deferred", @"Deferring removal of frame callback with tracking ID %u.", trackingID);
QueueDeferredOperation(@"remove", trackingID, nil);
}
OOJS_RETURN_VOID;
OOJS_NATIVE_EXIT
}
// isValidFrameCallback(trackingID : Number)
static JSBool GlobalIsValidFrameCallback(JSContext *context, uintN argc, jsval *vp)
{
OOJS_NATIVE_ENTER(context)
if (EXPECT_NOT(argc < 1))
{
OOJSReportBadArguments(context, nil, @"isValidFrameCallback", 0, OOJS_ARGV, nil, @"frame callback tracking ID");
return NO;
}
// Get tracking ID argument.
uint32 trackingID;
if (EXPECT_NOT(!JS_ValueToECMAUint32(context, OOJS_ARGV[0], &trackingID)))
{
OOJS_RETURN_BOOL(NO);
}
NSUInteger index;
OOJS_RETURN_BOOL(GetIndexForTrackingID(trackingID, &index));
OOJS_NATIVE_EXIT
}
// MARK: Internals
static BOOL AddCallback(JSContext *context, jsval callback, uint32 trackingID, NSString **errorString)
{
NSCParameterAssert(context != NULL && JS_IsInRequest(context));
NSCParameterAssert(errorString != NULL);
NSCAssert1(!sRunning, @"%s cannot be called while frame callbacks are running.", __PRETTY_FUNCTION__);
if (EXPECT_NOT(sCount == sSpace))
{
if (!GrowCallbackList(context, errorString)) return NO;
}
FCBLog(@"script.frameCallback.debug.add", @"Adding frame callback with tracking ID %u.", trackingID);
sCallbacks[sCount].callback = callback;
if (sCount >= sHighWaterMark)
{
// If we haven't used this slot before, root it.
if (EXPECT_NOT(!OOJSAddGCValueRoot(context, &sCallbacks[sCount].callback, "frame callback")))
{
*errorString = @"Failed to add GC root for frame callback.";
return NO;
}
sHighWaterMark = sCount + 1;
}
sCallbacks[sCount].trackingID = trackingID;
sCount++;
return YES;
}
static BOOL GrowCallbackList(JSContext *context, NSString **errorString)
{
NSCParameterAssert(context != NULL && JS_IsInRequest(context));
NSCParameterAssert(errorString != NULL);
NSUInteger newSpace = MAX(sSpace * 2, (NSUInteger)kMinCount);
CallbackEntry *newCallbacks = calloc(sizeof (CallbackEntry), newSpace);
if (newCallbacks == NULL) return NO;
CallbackEntry *oldCallbacks = sCallbacks;
// Root and copy occupied slots.
NSUInteger newHighWaterMark = sCount;
NSUInteger i;
for (i = 0; i < newHighWaterMark; i++)
{
if (EXPECT_NOT(!OOJSAddGCValueRoot(context, &newCallbacks[i].callback, "frame callback")))
{
// If we can't root them all, we fail; unroot all entries to date, free the buffer and return NO.
NSUInteger j;
for (j = 0; j < i; j++)
{
JS_RemoveValueRoot(context, &newCallbacks[j].callback);
}
free(newCallbacks);
*errorString = @"Failed to add GC root for frame callback.";
return NO;
}
newCallbacks[i] = oldCallbacks[i];
}
// Unroot old array's slots.
for (i = 0; i < sHighWaterMark; i++)
{
JS_RemoveValueRoot(context, &oldCallbacks[i].callback);
}
// We only rooted the occupied slots, so reset high water mark.
sHighWaterMark = newHighWaterMark;
// Replace array.
sCallbacks = newCallbacks;
free(oldCallbacks);
sSpace = newSpace;
return YES;
}
static BOOL GetIndexForTrackingID(uint32 trackingID, NSUInteger *outIndex)
{
NSCParameterAssert(outIndex != NULL);
/* It is assumed that few frame callbacks will be active at once, so a
linear search is reasonable. If they become unexpectedly popular, we
can switch to a sorted list or a separate lookup table without changing
the API.
*/
NSUInteger i;
for (i = 0; i < sCount; i++)
{
if (sCallbacks[i].trackingID == trackingID)
{
*outIndex = i;
return YES;
}
}
return NO;
}
static BOOL RemoveCallbackWithTrackingID(JSContext *context, uint32 trackingID)
{
NSCParameterAssert(context != NULL && JS_IsInRequest(context));
NSCAssert1(!sRunning, @"%s cannot be called while frame callbacks are running.", __PRETTY_FUNCTION__);
NSUInteger index = 0;
if (GetIndexForTrackingID(trackingID, &index))
{
RemoveCallbackAtIndex(context, index);
return YES;
}
return NO;
}
static void RemoveCallbackAtIndex(JSContext *context, NSUInteger index)
{
NSCParameterAssert(context != NULL && JS_IsInRequest(context));
NSCParameterAssert(index < sCount && sCallbacks != NULL);
NSCAssert1(!sRunning, @"%s cannot be called while frame callbacks are running.", __PRETTY_FUNCTION__);
FCBLog(@"script.frameCallback.debug.remove", @"Removing frame callback with tracking ID %u.", sCallbacks[index].trackingID);
// Overwrite entry to be removed with last entry, and decrement count.
sCount--;
sCallbacks[index] = sCallbacks[sCount];
sCallbacks[sCount].callback = JSVAL_NULL;
#if DEBUG_FCB_SIMPLE_TRACKING_IDS
if (sCount == 0)
{
OOLog(@"script.frameCallback.debug.reset", @"All frame callbacks removed, resetting next ID to 1.");
sNextID = 1;
}
#endif
}
static void QueueDeferredOperation(NSString *opType, uint32 trackingID, OOJSValue *value)
{
NSCAssert1(sRunning, @"%s can only be called while frame callbacks are running.", __PRETTY_FUNCTION__);
if (sDeferredOps == nil) sDeferredOps = [[NSMutableArray alloc] init];
[sDeferredOps addObject:[NSDictionary dictionaryWithObjectsAndKeys:
opType, @"operation",
[NSNumber numberWithInt:trackingID], @"trackingID",
value, @"value",
nil]];
}
static void RunDeferredOperations(JSContext *context)
{
NSDictionary *operation = nil;
NSEnumerator *operationEnum = nil;
FCBLog(@"script.frameCallback.debug.run-deferred", @"Running %lu deferred frame callback operations.", (long)[sDeferredOps count]);
FCBLogIndentIf(@"script.frameCallback.debug.run-deferred");
for (operationEnum = [sDeferredOps objectEnumerator]; (operation = [operationEnum nextObject]); )
{
NSString *opType = [operation objectForKey:@"operation"];
uint32 trackingID = [operation oo_intForKey:@"trackingID"];
if ([opType isEqualToString:@"add"])
{
OOJSValue *callbackObj = [operation objectForKey:@"value"];
NSString *errorString = nil;
if (!AddCallback(context, OOJSValueFromNativeObject(context, callbackObj), trackingID, &errorString))
{
OOLogWARN(@"script.frameCallback.deferredAdd.failed", @"Deferred frame callback insertion failed: %@", errorString);
}
}
else if ([opType isEqualToString:@"remove"])
{
RemoveCallbackWithTrackingID(context, trackingID);
}
}
FCBLogOutdentIf(@"script.frameCallback.debug.run-deferred");
}
| {
"pile_set_name": "Github"
} |
pp5 5695b8193095a63b9f397fff8343d1a6
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 1998, 1999, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.tools.jdi;
import com.sun.jdi.*;
public class ByteTypeImpl extends PrimitiveTypeImpl implements ByteType {
ByteTypeImpl(VirtualMachine vm) {
super(vm);
}
public String signature() {
return String.valueOf((char)JDWP.Tag.BYTE);
}
PrimitiveValue convert(PrimitiveValue value) throws InvalidTypeException {
return vm.mirrorOf(((PrimitiveValueImpl)value).checkedByteValue());
}
}
| {
"pile_set_name": "Github"
} |
/*
* Safe path resolving with a module root dir
*
* Copyright (C) 2014, 2016 Per Lundqvist
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.github.perlundq.yajsync.server.module;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.util.Objects;
import java.util.regex.Pattern;
import com.github.perlundq.yajsync.RsyncSecurityException;
import com.github.perlundq.yajsync.internal.text.Text;
import com.github.perlundq.yajsync.internal.util.PathOps;
/**
* A RestrictedPath is a representation of a module and its root
* directory path that provides robust semantics for safely resolving
* any untrusted path coming from a possible external source. It
* allows resolving of any path that is below the module root
* directory and will throw a RsyncSecurityException for any other
* path.
*/
public final class RestrictedPath
{
private static final Pattern MODULE_REGEX = Pattern.compile("^\\w+$");
private final String _moduleName;
private final Path _rootPath;
private final Path _dotDir;
private final Path _dotDotDir;
/**
* @param moduleName
* @param rootPath the absolute path to the module top directory.
*/
public RestrictedPath(String moduleName, Path rootPath)
{
if (!MODULE_REGEX.matcher(moduleName).matches()) {
throw new IllegalArgumentException(String.format(
"rsync module must consist of alphanumeric characters " +
"and underscore only: %s", moduleName));
}
assert rootPath.isAbsolute() : rootPath;
_moduleName = moduleName;
_rootPath = rootPath.normalize();
_dotDir = _rootPath.getFileSystem().getPath(Text.DOT);
_dotDotDir = _rootPath.getFileSystem().getPath(Text.DOT_DOT);
}
@Override
public String toString()
{
return String.format("%s(name=%s, root=%s)", getClass().getSimpleName(),
_moduleName, _rootPath);
}
@Override
public boolean equals(Object other)
{
if (other != null && other.getClass() == getClass()) {
RestrictedPath otherPath = (RestrictedPath) other;
return _moduleName.equals(otherPath._moduleName) &&
_rootPath.equals(otherPath._rootPath);
}
return false;
}
@Override
public int hashCode()
{
return Objects.hash(_moduleName, _rootPath);
}
public Path resolve(String pathName) throws RsyncSecurityException
{
try {
Path otherPath = PathOps.get(_rootPath.getFileSystem(), pathName);
Path resolved = resolve(otherPath);
if (PathOps.contains(resolved, _dotDotDir)) {
throw new RsyncSecurityException(String.format(
"resolved path of %s contains ..: %s",
pathName, resolved));
}
return resolved;
} catch (InvalidPathException e) {
throw new RsyncSecurityException(e);
}
}
/**
* resolve other in a secure manner without any call to stat.
* @throws RsyncSecurityException
*/
private Path resolve(Path path) throws RsyncSecurityException
{
Path result;
Path normalized = path.normalize();
if (normalized.startsWith(_moduleName)) {
if (normalized.getNameCount() == 1) {
result = _rootPath;
} else {
Path strippedOfModulePrefix =
normalized.subpath(1, normalized.getNameCount());
result = _rootPath.resolve(strippedOfModulePrefix).normalize();
}
} else {
throw new RsyncSecurityException(String.format(
"\"%s\" is outside virtual dir for module %s",
path, _moduleName));
}
if (path.endsWith(_dotDir)) {
return result.resolve(_dotDir);
} else {
return result;
}
}
}
| {
"pile_set_name": "Github"
} |
#import "TGVideoCameraPipeline.h"
#import "LegacyComponentsInternal.h"
#import <libkern/OSAtomic.h>
#import <CoreMedia/CoreMedia.h>
#import <ImageIO/ImageIO.h>
#import <Accelerate/Accelerate.h>
#import <LegacyComponents/TGVideoCameraGLRenderer.h>
#import <LegacyComponents/TGVideoCameraMovieRecorder.h>
#import <LegacyComponents/TGMediaVideoConverter.h>
typedef enum {
TGVideoCameraRecordingStatusIdle = 0,
TGVideoCameraRecordingStatusStartingRecording,
TGVideoCameraRecordingStatusRecording,
TGVideoCameraRecordingStatusStoppingRecording,
} TGVideoCameraRecordingStatus;
const NSInteger TGVideoCameraRetainedBufferCount = 16;
@interface TGVideoCameraPipeline () <AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate, TGVideoCameraMovieRecorderDelegate>
{
AVCaptureSession *_captureSession;
AVCaptureDevice *_videoDevice;
AVCaptureConnection *_videoConnection;
AVCaptureDeviceInput *_videoInput;
AVCaptureVideoDataOutput *_videoOutput;
AVCaptureDevice *_audioDevice;
AVCaptureConnection *_audioConnection;
AVCaptureDeviceInput *_audioInput;
AVCaptureAudioDataOutput *_audioOutput;
AVCaptureVideoOrientation _videoBufferOrientation;
AVCaptureDevicePosition _preferredPosition;
bool _running;
bool _startCaptureSessionOnEnteringForeground;
id _applicationWillEnterForegroundObserver;
dispatch_queue_t _audioDataOutputQueue;
dispatch_queue_t _videoDataOutputQueue;
TGVideoCameraGLRenderer *_renderer;
bool _renderingEnabled;
TGVideoCameraMovieRecorder *_recorder;
NSURL *_recordingURL;
TGVideoCameraRecordingStatus _recordingStatus;
UIImage *_recordingThumbnail;
__weak id<TGVideoCameraPipelineDelegate> _delegate;
dispatch_queue_t _delegateCallbackQueue;
NSTimeInterval _resultDuration;
CVPixelBufferRef _previousPixelBuffer;
int32_t _repeatingCount;
int16_t _micLevelPeak;
int _micLevelPeakCount;
TGMediaVideoConversionPreset _preset;
bool _liveUpload;
id<TGLiveUploadInterface> _watcher;
id _liveUploadData;
OSSpinLock _recordLock;
bool _startRecordAfterAudioBuffer;
CVPixelBufferRef _currentPreviewPixelBuffer;
NSMutableDictionary *_thumbnails;
NSTimeInterval _firstThumbnailTime;
NSTimeInterval _previousThumbnailTime;
id<TGLiveUploadInterface> _liveUploadInterface;
}
@property (nonatomic, strong) __attribute__((NSObject)) CMFormatDescriptionRef outputVideoFormatDescription;
@property (nonatomic, strong) __attribute__((NSObject)) CMFormatDescriptionRef outputAudioFormatDescription;
@end
@implementation TGVideoCameraPipeline
- (instancetype)initWithDelegate:(id<TGVideoCameraPipelineDelegate>)delegate position:(AVCaptureDevicePosition)position callbackQueue:(dispatch_queue_t)queue liveUploadInterface:(id<TGLiveUploadInterface>)liveUploadInterface
{
self = [super init];
if (self != nil)
{
_liveUploadInterface = liveUploadInterface;
_preferredPosition = position;
_videoDataOutputQueue = dispatch_queue_create("org.telegram.VideoCameraPipeline.video", DISPATCH_QUEUE_SERIAL);
dispatch_set_target_queue(_videoDataOutputQueue, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
_renderer = [[TGVideoCameraGLRenderer alloc] init];
_delegate = delegate;
_delegateCallbackQueue = queue;
_thumbnails = [[NSMutableDictionary alloc] init];
}
return self;
}
- (void)dealloc
{
printf("Camera pipeline dealloc\n");
[self destroyCaptureSession];
}
- (void)startRunning
{
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
[self setupCaptureSession];
if (_captureSession != nil)
{
[_captureSession startRunning];
_running = true;
}
}];
}
- (void)stopRunning
{
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
_running = false;
[self stopRecording:^{}];
[_captureSession stopRunning];
[self captureSessionDidStopRunning];
[self destroyCaptureSession];
}];
}
- (void)setupCaptureSession
{
if (_captureSession != nil)
return;
_captureSession = [[AVCaptureSession alloc] init];
_captureSession.automaticallyConfiguresApplicationAudioSession = false;
_captureSession.usesApplicationAudioSession = true;
[[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(captureSessionNotification:) name:nil object:_captureSession];
_applicationWillEnterForegroundObserver = [[NSNotificationCenter defaultCenter] addObserverForName:UIApplicationWillEnterForegroundNotification object:[[LegacyComponentsGlobals provider] applicationInstance] queue:nil usingBlock:^(__unused NSNotification *note)
{
[self applicationWillEnterForeground];
}];
_audioDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
_audioInput = [[AVCaptureDeviceInput alloc] initWithDevice:_audioDevice error:nil];
if ([_captureSession canAddInput:_audioInput])
[_captureSession addInput:_audioInput];
_audioOutput = [[AVCaptureAudioDataOutput alloc] init];
_audioDataOutputQueue = dispatch_queue_create("org.telegram.VideoCameraPipeline.audio", DISPATCH_QUEUE_SERIAL);
[_audioOutput setSampleBufferDelegate:self queue:_audioDataOutputQueue];
if ([_captureSession canAddOutput:_audioOutput])
[_captureSession addOutput:_audioOutput];
_audioConnection = [_audioOutput connectionWithMediaType:AVMediaTypeAudio];
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices)
{
if (device.position == _preferredPosition)
{
videoDevice = device;
break;
}
}
_renderer.mirror = (videoDevice.position == AVCaptureDevicePositionFront);
_renderer.orientation = _orientation;
NSError *videoDeviceError = nil;
_videoInput = [[AVCaptureDeviceInput alloc] initWithDevice:videoDevice error:&videoDeviceError];
if ([_captureSession canAddInput:_videoInput])
{
[_captureSession addInput:_videoInput];
_videoDevice = videoDevice;
}
else
{
[self handleNonRecoverableCaptureSessionRuntimeError:videoDeviceError];
return;
}
_videoOutput = [[AVCaptureVideoDataOutput alloc] init];
_videoOutput.alwaysDiscardsLateVideoFrames = false;
_videoOutput.videoSettings = @{ (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_32BGRA) };
[_videoOutput setSampleBufferDelegate:self queue:_videoDataOutputQueue];
if ([_captureSession canAddOutput:_videoOutput])
[_captureSession addOutput:_videoOutput];
_videoConnection = [_videoOutput connectionWithMediaType:AVMediaTypeVideo];
if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset640x480])
_captureSession.sessionPreset = AVCaptureSessionPreset640x480;
else
_captureSession.sessionPreset = AVCaptureSessionPresetMedium;
[self _configureFPS];
[self _enableLowLightBoost];
[self _enableVideoStabilization];
_videoBufferOrientation = _videoConnection.videoOrientation;
}
- (void)destroyCaptureSession
{
if (_captureSession)
{
[[NSNotificationCenter defaultCenter] removeObserver:self name:nil object:_captureSession];
[[NSNotificationCenter defaultCenter] removeObserver:_applicationWillEnterForegroundObserver];
_applicationWillEnterForegroundObserver = nil;
[_captureSession beginConfiguration];
[_captureSession removeOutput:_videoOutput];
[_captureSession removeInput:_videoInput];
[_captureSession removeOutput:_audioOutput];
[_captureSession removeInput:_audioInput];
[_captureSession commitConfiguration];
_audioInput = nil;
_audioDevice = nil;
_audioOutput = nil;
_audioConnection = nil;
_videoInput = nil;
_videoDevice = nil;
_videoOutput = nil;
_videoConnection = nil;
_captureSession = nil;
}
}
- (void)captureSessionNotification:(NSNotification *)notification
{
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
if ([notification.name isEqualToString:AVCaptureSessionWasInterruptedNotification])
{
[self captureSessionDidStopRunning];
}
else if ([notification.name isEqualToString:AVCaptureSessionRuntimeErrorNotification])
{
[self captureSessionDidStopRunning];
NSError *error = notification.userInfo[AVCaptureSessionErrorKey];
if (error.code == AVErrorDeviceIsNotAvailableInBackground)
{
if (_running)
_startCaptureSessionOnEnteringForeground = true;
}
else if (error.code == AVErrorMediaServicesWereReset)
{
[self handleRecoverableCaptureSessionRuntimeError:error];
}
else
{
[self handleNonRecoverableCaptureSessionRuntimeError:error];
}
}
}];
}
- (void)handleRecoverableCaptureSessionRuntimeError:(NSError *)__unused error
{
if (_running)
[_captureSession startRunning];
}
- (void)handleNonRecoverableCaptureSessionRuntimeError:(NSError *)error
{
_running = false;
[self destroyCaptureSession];
[self invokeDelegateCallbackAsync:^
{
[_delegate capturePipeline:self didStopRunningWithError:error];
}];
}
- (void)captureSessionDidStopRunning
{
[self stopRecording:^{}];
[self destroyVideoPipeline];
}
- (void)applicationWillEnterForeground
{
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
if (_startCaptureSessionOnEnteringForeground)
{
_startCaptureSessionOnEnteringForeground = false;
if (_running)
[_captureSession startRunning];
}
}];
}
- (void)setupVideoPipelineWithInputFormatDescription:(CMFormatDescriptionRef)inputFormatDescription
{
[_renderer prepareForInputWithFormatDescription:inputFormatDescription outputRetainedBufferCountHint:TGVideoCameraRetainedBufferCount];
self.outputVideoFormatDescription = _renderer.outputFormatDescription;
}
- (void)destroyVideoPipeline
{
dispatch_sync(_videoDataOutputQueue, ^
{
if (self.outputVideoFormatDescription == NULL)
return;
self.outputVideoFormatDescription = NULL;
[_renderer reset];
if (_currentPreviewPixelBuffer != NULL)
{
CFRelease(_currentPreviewPixelBuffer);
_currentPreviewPixelBuffer = NULL;
}
});
}
- (void)videoPipelineDidRunOutOfBuffers
{
[self invokeDelegateCallbackAsync:^
{
[_delegate capturePipelineDidRunOutOfPreviewBuffers:self];
}];
}
- (void)setRenderingEnabled:(bool)renderingEnabled
{
@synchronized (_renderer)
{
_renderingEnabled = renderingEnabled;
}
}
- (bool)renderingEnabled
{
@synchronized (_renderer)
{
return _renderingEnabled;
}
}
- (void)captureOutput:(AVCaptureOutput *)__unused captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CMFormatDescriptionRef formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer);
if (connection == _videoConnection)
{
if (self.outputVideoFormatDescription == NULL)
[self setupVideoPipelineWithInputFormatDescription:formatDescription];
else {
// [_recorder appendVideoSampleBuffer:sampleBuffer];
[self renderVideoSampleBuffer:sampleBuffer];
}
}
else if (connection == _audioConnection)
{
self.outputAudioFormatDescription = formatDescription;
@synchronized (self)
{
if (_recordingStatus == TGVideoCameraRecordingStatusRecording)
[_recorder appendAudioSampleBuffer:sampleBuffer];
}
CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
uint32_t numSamplesInBuffer = (uint32_t)CMSampleBufferGetNumSamples(sampleBuffer);
AudioBufferList audioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, &blockBuffer );
for (uint32_t bufferCount = 0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++)
{
int16_t *samples = (int16_t *)audioBufferList.mBuffers[bufferCount].mData;
[self processWaveformPreview:samples count:numSamplesInBuffer];
}
CFRelease(blockBuffer);
OSSpinLockLock(&_recordLock);
if (_startRecordAfterAudioBuffer)
{
_startRecordAfterAudioBuffer = false;
TGDispatchOnMainThread(^
{
[self startRecording:_recordingURL preset:_preset liveUpload:_liveUpload];
});
}
OSSpinLockUnlock(&_recordLock);
}
}
- (void)processWaveformPreview:(int16_t const *)samples count:(int)count {
for (int i = 0; i < count; i++) {
int16_t sample = samples[i];
if (sample < 0) {
sample = -sample;
}
if (_micLevelPeak < sample) {
_micLevelPeak = sample;
}
_micLevelPeakCount++;
if (_micLevelPeakCount >= 1200) {
if (_micLevel) {
CGFloat level = (CGFloat)_micLevelPeak / 4000.0;
_micLevel(level);
}
_micLevelPeak = 0;
_micLevelPeakCount = 0;
}
}
}
- (UIImage *)imageFromImageBuffer:(CVPixelBufferRef)imageBuffer
{
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef cgImage = CGBitmapContextCreateImage(context);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
return image;
}
- (void)renderVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer
{
CVPixelBufferRef renderedPixelBuffer = NULL;
CMTime timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
@synchronized (_renderer)
{
if (_renderingEnabled)
{
bool repeatingFrames = false;
@synchronized (self)
{
if (_recorder.paused && _previousPixelBuffer != NULL)
{
_recorder.paused = false;
_repeatingCount = 11;
[_renderer setPreviousPixelBuffer:_previousPixelBuffer];
CFRelease(_previousPixelBuffer);
_previousPixelBuffer = NULL;
}
if (_repeatingCount > 0)
{
repeatingFrames = true;
_repeatingCount--;
}
CGFloat opacity = 1.0f;
if (_repeatingCount < 10)
opacity = _repeatingCount / 9.0f;
[_renderer setOpacity:opacity];
if (_repeatingCount == 0)
[_renderer setPreviousPixelBuffer:NULL];
}
CVPixelBufferRef sourcePixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
renderedPixelBuffer = [_renderer copyRenderedPixelBuffer:sourcePixelBuffer];
@synchronized (self)
{
if (_recordingStatus == TGVideoCameraRecordingStatusRecording && _recordingThumbnail == nil)
{
UIImage *image = [self imageFromImageBuffer:sourcePixelBuffer];
_recordingThumbnail = image;
}
if (_recordingStatus == TGVideoCameraRecordingStatusRecording && !repeatingFrames)
{
NSTimeInterval currentTime = CMTimeGetSeconds(timestamp);
if (_previousThumbnailTime < DBL_EPSILON)
{
_firstThumbnailTime = currentTime;
_previousThumbnailTime = currentTime;
[self storeThumbnailWithSampleBuffer:sampleBuffer time:0.0 mirror:_renderer.mirror];
}
else
{
NSTimeInterval relativeThumbnailTime = _previousThumbnailTime - _firstThumbnailTime;
NSTimeInterval interval = MAX(0.1, relativeThumbnailTime / 10.0);
if (currentTime - _previousThumbnailTime >= interval)
{
[self storeThumbnailWithSampleBuffer:sampleBuffer time:relativeThumbnailTime mirror:_renderer.mirror];
_previousThumbnailTime = currentTime;
}
}
}
if (!repeatingFrames)
{
if (_previousPixelBuffer != NULL)
{
CFRelease(_previousPixelBuffer);
_previousPixelBuffer = NULL;
}
_previousPixelBuffer = sourcePixelBuffer;
CFRetain(sourcePixelBuffer);
}
}
}
else
{
return;
}
}
if (renderedPixelBuffer)
{
@synchronized (self)
{
[self outputPreviewPixelBuffer:renderedPixelBuffer];
if (_recordingStatus == TGVideoCameraRecordingStatusRecording)
[_recorder appendVideoPixelBuffer:renderedPixelBuffer withPresentationTime:timestamp];
}
CFRelease(renderedPixelBuffer);
}
else
{
[self videoPipelineDidRunOutOfBuffers];
}
}
- (void)outputPreviewPixelBuffer:(CVPixelBufferRef)previewPixelBuffer
{
if (_currentPreviewPixelBuffer != NULL)
{
CFRelease(_currentPreviewPixelBuffer);
_currentPreviewPixelBuffer = NULL;
}
if (_previousPixelBuffer != NULL)
{
_currentPreviewPixelBuffer = previewPixelBuffer;
CFRetain(_currentPreviewPixelBuffer);
}
[self invokeDelegateCallbackAsync:^
{
CVPixelBufferRef currentPreviewPixelBuffer = NULL;
@synchronized (self)
{
currentPreviewPixelBuffer = _currentPreviewPixelBuffer;
if (currentPreviewPixelBuffer != NULL)
{
CFRetain(currentPreviewPixelBuffer);
if (_currentPreviewPixelBuffer != NULL)
{
CFRelease(_currentPreviewPixelBuffer);
_currentPreviewPixelBuffer = NULL;
}
}
}
if (currentPreviewPixelBuffer != NULL)
{
[_delegate capturePipeline:self previewPixelBufferReadyForDisplay:currentPreviewPixelBuffer];
CFRelease(currentPreviewPixelBuffer);
}
}];
}
- (void)storeThumbnailWithSampleBuffer:(CMSampleBufferRef)sampleBuffer time:(NSTimeInterval)time mirror:(bool)mirror
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
size_t cropX = (size_t)((width - height) / 2.0);
size_t cropY = 0;
size_t cropWidth = height;
size_t cropHeight = height;
size_t outWidth = 66;
size_t outHeight = 66;
CVPixelBufferLockBaseAddress(imageBuffer,0);
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
vImage_Buffer inBuff;
inBuff.height = cropHeight;
inBuff.width = cropWidth;
inBuff.rowBytes = bytesPerRow;
unsigned long startpos = cropY * bytesPerRow + 4 * cropX;
inBuff.data = baseAddress + startpos;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreateWithData(NULL, outWidth, outHeight, 8, outWidth * 4, colorSpace, kCGImageByteOrder32Little | kCGImageAlphaPremultipliedFirst, NULL, nil);
unsigned char *outImg = CGBitmapContextGetData(context);
vImage_Buffer outBuff = {outImg, outHeight, outWidth, 4 * outWidth};
vImage_Error err = vImageScale_ARGB8888(&inBuff, &outBuff, NULL, 0);
if (err != kvImageNoError)
TGLegacyLog(@"Video Message thumbnail generation error %ld", err);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
CGImageRef cgImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage scale:1.0f orientation:mirror ? UIImageOrientationLeftMirrored : UIImageOrientationRight];
CGImageRelease(cgImage);
_thumbnails[@(time)] = image;
}
- (void)startRecording:(NSURL *)url preset:(TGMediaVideoConversionPreset)preset liveUpload:(bool)liveUpload
{
_recordingURL = url;
_preset = preset;
_liveUpload = liveUpload;
OSSpinLockLock(&_recordLock);
if (self.outputAudioFormatDescription == NULL)
{
_startRecordAfterAudioBuffer = true;
OSSpinLockUnlock(&_recordLock);
return;
}
OSSpinLockUnlock(&_recordLock);
@synchronized (self)
{
if (_recordingStatus != TGVideoCameraRecordingStatusIdle)
return;
[self transitionToRecordingStatus:TGVideoCameraRecordingStatusStartingRecording error:nil];
}
dispatch_queue_t callbackQueue = dispatch_queue_create("org.telegram.VideoCameraPipeline.recorder", DISPATCH_QUEUE_SERIAL);
TGVideoCameraMovieRecorder *recorder = [[TGVideoCameraMovieRecorder alloc] initWithURL:_recordingURL delegate:self callbackQueue:callbackQueue];
NSDictionary *audioSettings = [TGMediaVideoConversionPresetSettings audioSettingsForPreset:preset];
[recorder addAudioTrackWithSourceFormatDescription:self.outputAudioFormatDescription settings:audioSettings];
_videoTransform = [self transformForOrientation:self.orientation];
CGSize size = [TGMediaVideoConversionPresetSettings maximumSizeForPreset:preset];
NSDictionary *videoSettings = [TGMediaVideoConversionPresetSettings videoSettingsForPreset:preset dimensions:size];
[recorder addVideoTrackWithSourceFormatDescription:self.outputVideoFormatDescription transform:CGAffineTransformIdentity settings:videoSettings];
_recorder = recorder;
[recorder prepareToRecord];
}
- (void)stopRecording:(void (^)())completed
{
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
@synchronized (self)
{
if (_recordingStatus != TGVideoCameraRecordingStatusRecording) {
if (completed) {
completed();
}
return;
}
[self transitionToRecordingStatus:TGVideoCameraRecordingStatusStoppingRecording error:nil];
}
_resultDuration = _recorder.videoDuration;
[_recorder finishRecording:^{
__unused __auto_type description = [self description];
if (completed) {
completed();
}
}];
}];
}
- (bool)isRecording
{
return _recorder != nil && !_recorder.paused;
}
- (void)movieRecorderDidFinishPreparing:(TGVideoCameraMovieRecorder *)__unused recorder
{
@synchronized (self)
{
if (_recordingStatus != TGVideoCameraRecordingStatusStartingRecording)
return;
[self transitionToRecordingStatus:TGVideoCameraRecordingStatusRecording error:nil];
if (_liveUpload)
{
_watcher = _liveUploadInterface;
[_watcher setupWithFileURL:_recordingURL];
}
}
}
- (void)movieRecorder:(TGVideoCameraMovieRecorder *)__unused recorder didFailWithError:(NSError *)error
{
@synchronized (self)
{
_recorder = nil;
[self transitionToRecordingStatus:TGVideoCameraRecordingStatusIdle error:error];
}
}
- (void)movieRecorderDidFinishRecording:(TGVideoCameraMovieRecorder *)__unused recorder
{
printf("movieRecorderDidFinishRecording\n");
@synchronized (self)
{
if (_recordingStatus != TGVideoCameraRecordingStatusStoppingRecording)
return;
}
_recorder = nil;
if (_watcher != nil)
_liveUploadData = [_watcher fileUpdated:true];
[self transitionToRecordingStatus:TGVideoCameraRecordingStatusIdle error:nil];
}
- (void)transitionToRecordingStatus:(TGVideoCameraRecordingStatus)newStatus error:(NSError *)error
{
printf("transitionToRecordingStatus %d\n", newStatus);
TGVideoCameraRecordingStatus oldStatus = _recordingStatus;
_recordingStatus = newStatus;
if (newStatus != oldStatus)
{
dispatch_block_t delegateCallbackBlock = nil;
if (error && newStatus == TGVideoCameraRecordingStatusIdle)
{
delegateCallbackBlock = ^{ [_delegate capturePipeline:self recordingDidFailWithError:error]; };
}
else
{
__strong id<TGVideoCameraPipelineDelegate> delegate = _delegate;
if ((oldStatus == TGVideoCameraRecordingStatusStartingRecording) && (newStatus == TGVideoCameraRecordingStatusRecording))
delegateCallbackBlock = ^{ [delegate capturePipelineRecordingDidStart:self]; };
else if ((oldStatus == TGVideoCameraRecordingStatusRecording) && (newStatus == TGVideoCameraRecordingStatusStoppingRecording))
delegateCallbackBlock = ^{ [delegate capturePipelineRecordingWillStop:self]; };
else if ((oldStatus == TGVideoCameraRecordingStatusStoppingRecording) && (newStatus == TGVideoCameraRecordingStatusIdle))
delegateCallbackBlock = ^{
printf("transitionToRecordingStatus delegateCallbackBlock _delegate == nil = %d\n", (int)(delegate == nil));
[delegate capturePipelineRecordingDidStop:self duration:_resultDuration liveUploadData:_liveUploadData thumbnailImage:_recordingThumbnail thumbnails:_thumbnails];
};
}
if (delegateCallbackBlock != nil)
[self invokeDelegateCallbackAsync:delegateCallbackBlock];
}
}
- (void)invokeDelegateCallbackAsync:(dispatch_block_t)callbackBlock
{
dispatch_async(_delegateCallbackQueue, ^
{
@autoreleasepool
{
callbackBlock();
}
});
}
- (CGAffineTransform)transformForOrientation:(AVCaptureVideoOrientation)orientation
{
CGAffineTransform transform = CGAffineTransformIdentity;
CGFloat orientationAngleOffset = angleOffsetFromPortraitOrientationToOrientation(orientation);
CGFloat videoOrientationAngleOffset = angleOffsetFromPortraitOrientationToOrientation(_videoBufferOrientation);
CGFloat angleOffset = orientationAngleOffset - videoOrientationAngleOffset;
transform = CGAffineTransformMakeRotation(angleOffset);
return transform;
}
static CGFloat angleOffsetFromPortraitOrientationToOrientation(AVCaptureVideoOrientation orientation)
{
CGFloat angle = 0.0;
switch (orientation)
{
case AVCaptureVideoOrientationPortrait:
angle = 0.0;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
angle = M_PI;
break;
case AVCaptureVideoOrientationLandscapeRight:
angle = -M_PI_2;
break;
case AVCaptureVideoOrientationLandscapeLeft:
angle = M_PI_2;
break;
default:
break;
}
return angle;
}
- (NSTimeInterval)videoDuration
{
return _recorder.videoDuration;
}
- (void)setCameraPosition:(AVCaptureDevicePosition)position
{
@synchronized (self)
{
_recorder.paused = true;
}
[[TGVideoCameraPipeline cameraQueue] dispatch:^
{
NSError *error;
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *deviceForTargetPosition = nil;
for (AVCaptureDevice *device in devices)
{
if (device.position == position)
{
deviceForTargetPosition = device;
break;
}
}
_renderer.mirror = deviceForTargetPosition.position == AVCaptureDevicePositionFront;
_renderer.orientation = _orientation;
AVCaptureDeviceInput *newVideoInput = [[AVCaptureDeviceInput alloc] initWithDevice:deviceForTargetPosition error:&error];
if (newVideoInput != nil)
{
[_captureSession beginConfiguration];
[_captureSession removeInput:_videoInput];
if ([_captureSession canAddInput:newVideoInput])
{
[_captureSession addInput:newVideoInput];
_videoInput = newVideoInput;
}
else
{
[_captureSession addInput:_videoInput];
}
[_captureSession commitConfiguration];
}
_videoDevice = deviceForTargetPosition;
_videoConnection = [_videoOutput connectionWithMediaType:AVMediaTypeVideo];
[self _configureFPS];
[self _enableLowLightBoost];
[self _enableVideoStabilization];
_videoBufferOrientation = _videoConnection.videoOrientation;
}];
}
- (void)_enableLowLightBoost
{
[self _reconfigureDevice:_videoDevice withBlock:^(AVCaptureDevice *device)
{
if (device.isLowLightBoostSupported)
device.automaticallyEnablesLowLightBoostWhenAvailable = true;
}];
}
- (void)_enableVideoStabilization
{
AVCaptureConnection *videoConnection = [_videoOutput connectionWithMediaType:AVMediaTypeVideo];
if (videoConnection.supportsVideoStabilization)
{
if ([videoConnection respondsToSelector:@selector(setPreferredVideoStabilizationMode:)])
videoConnection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationModeStandard;
else
videoConnection.enablesVideoStabilizationWhenAvailable = true;
}
}
- (void)_reconfigureDevice:(AVCaptureDevice *)device withBlock:(void (^)(AVCaptureDevice *device))block
{
if (block == nil)
return;
NSError *error = nil;
[device lockForConfiguration:&error];
block(device);
[device unlockForConfiguration];
if (error != nil)
TGLegacyLog(@"ERROR: failed to reconfigure camera: %@", error);
}
- (void)_addAudioInput
{
if (_audioDevice != nil || _audioDataOutputQueue == NULL)
return;
AVCaptureDevice *audioDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error = nil;
if (audioDevice != nil)
{
_audioDevice = audioDevice;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:_audioDevice error:&error];
if ([_captureSession canAddInput:audioInput])
{
[_captureSession addInput:audioInput];
_audioInput = audioInput;
}
}
AVCaptureAudioDataOutput *audioOutput = [[AVCaptureAudioDataOutput alloc] init];
if ([_captureSession canAddOutput:audioOutput])
{
[audioOutput setSampleBufferDelegate:self queue:_audioDataOutputQueue];
[_captureSession addOutput:audioOutput];
_audioOutput = audioOutput;
}
}
- (void)_removeAudioInput
{
if (_audioDevice == nil)
return;
[_captureSession removeInput:_audioInput];
_audioInput = nil;
[_audioOutput setSampleBufferDelegate:nil queue:NULL];
[_captureSession removeOutput:_audioOutput];
_audioOutput = nil;
_audioDevice = nil;
}
- (void)_configureFPS
{
CMTime frameDuration = CMTimeMake(1, 30);
[self _reconfigureDevice:_videoDevice withBlock:^(AVCaptureDevice *device)
{
device.activeVideoMaxFrameDuration = frameDuration;
device.activeVideoMinFrameDuration = frameDuration;
}];
}
+ (bool)cameraPositionChangeAvailable
{
return [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo].count > 1;
}
+ (SQueue *)cameraQueue
{
static dispatch_once_t onceToken;
static SQueue *queue = nil;
dispatch_once(&onceToken, ^
{
queue = [[SQueue alloc] init];
});
return queue;
}
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>Module: Jabber::Command</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<meta http-equiv="Content-Script-Type" content="text/javascript" />
<link rel="stylesheet" href="../.././rdoc-style.css" type="text/css" media="screen" />
<script type="text/javascript">
// <![CDATA[
function popupCode( url ) {
window.open(url, "Code", "resizable=yes,scrollbars=yes,toolbar=no,status=no,height=150,width=400")
}
function toggleCode( id ) {
if ( document.getElementById )
elem = document.getElementById( id );
else if ( document.all )
elem = eval( "document.all." + id );
else
return false;
elemStyle = elem.style;
if ( elemStyle.display != "block" ) {
elemStyle.display = "block"
} else {
elemStyle.display = "none"
}
return true;
}
// Make codeblocks hidden by default
document.writeln( "<style type=\"text/css\">div.method-source-code { display: none }</style>" )
// ]]>
</script>
</head>
<body>
<div id="classHeader">
<table class="header-table">
<tr class="top-aligned-row">
<td><strong>Module</strong></td>
<td class="class-name-in-header">Jabber::Command</td>
</tr>
<tr class="top-aligned-row">
<td><strong>In:</strong></td>
<td>
<a href="../../files/lib/xmpp4r/command/helper/responder_rb.html">
lib/xmpp4r/command/helper/responder.rb
</a>
<br />
<a href="../../files/lib/xmpp4r/command/iq/command_rb.html">
lib/xmpp4r/command/iq/command.rb
</a>
<br />
</td>
</tr>
</table>
</div>
<!-- banner header -->
<div id="bodyContent">
<div id="contextContent">
</div>
</div>
<!-- if includes -->
<div id="section">
<div id="class-list">
<h3 class="section-bar">Classes and Modules</h3>
Class <a href="Command/IqCommand.html" class="link">Jabber::Command::IqCommand</a><br />
Class <a href="Command/Responder.html" class="link">Jabber::Command::Responder</a><br />
</div>
<!-- if method_list -->
</div>
<div id="validator-badges">
<p><small><a href="http://validator.w3.org/check/referer">[Validate]</a></small></p>
</div>
</body>
</html> | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleDocumentTypes</key>
<array>
<dict>
<key>CFBundleTypeRole</key>
<string>QLGenerator</string>
<key>LSItemContentTypes</key>
<array>
<string>com.apple.itunes.ipa</string>
<string>com.apple.iphone.mobileprovision</string>
<string>com.apple.mobileprovision</string>
<string>com.apple.provisionprofile</string>
<string>com.apple.application-and-system-extension</string>
<string>com.apple.xcode.archive</string>
</array>
</dict>
</array>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>${PRODUCT_NAME}</string>
<key>CFBundleShortVersionString</key>
<string>$(MARKETING_VERSION)</string>
<key>CFBundleVersion</key>
<string>$(CURRENT_PROJECT_VERSION)</string>
<key>CFPlugInDynamicRegisterFunction</key>
<string></string>
<key>CFPlugInDynamicRegistration</key>
<string>NO</string>
<key>CFPlugInFactories</key>
<dict>
<key>6BE4A2DB-04D7-404F-9CEA-7AD00B98F179</key>
<string>QuickLookGeneratorPluginFactory</string>
</dict>
<key>CFPlugInTypes</key>
<dict>
<key>5E2D9680-5022-40FA-B806-43349622E5B9</key>
<array>
<string>6BE4A2DB-04D7-404F-9CEA-7AD00B98F179</string>
</array>
</dict>
<key>CFPlugInUnloadFunction</key>
<string></string>
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2013-2018 Evgeny Aleksandrov. All rights reserved.</string>
<key>QLNeedsToBeRunInMainThread</key>
<true/>
<key>QLPreviewHeight</key>
<real>600</real>
<key>QLPreviewWidth</key>
<integer>640</integer>
<key>QLSupportsConcurrentRequests</key>
<false/>
<key>QLThumbnailMinimumSize</key>
<integer>16</integer>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
# encoding: utf-8
require 'spec_helper'
describe Algebra::Product, '#each' do
subject { object.each { |tuple| yields << tuple } }
let(:object) { described_class.new(left, right) }
let(:header) { Relation::Header.coerce([[:id, Integer], [:name, String]]) }
let(:left) { Relation.new(header.project([:id]), [[1], [2]]) }
let(:right) { Relation.new(header.project([:name]), [['Dan Kubb'], ['Alex Kubb']]) }
let(:yields) { [] }
it_should_behave_like 'an #each method'
it 'yields only tuples' do
subject
yields.each { |tuple| expect(tuple).to be_instance_of(Tuple) }
end
it 'yields only tuples with the expected header' do
subject
yields.each { |tuple| expect(tuple.header).to be(object.header) }
end
it 'yields only tuples with the expected data' do
expect { subject }.to change { yields.dup }
.from([])
.to([[1, 'Dan Kubb'], [1, 'Alex Kubb'], [2, 'Dan Kubb'], [2, 'Alex Kubb']])
end
end
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: bf944057124b80b4e84496139e3b072c
timeCreated: 18446744011573954816
ShaderImporter:
externalObjects: {}
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
#include <iostream>
#include <cstring>
using namespace std;
int BIT[101000], A[101000], n;
int query(int i)
{
int ans = 0;
for (; i > 0; i -= i & (-i))
ans += BIT[i];
return ans;
}
void update(int i)
{
for (; i <= n; i += i & (-i))
BIT[i]++;
}
int main()
{
int ans, i;
while (cin >> n and n)
{
memset(BIT, 0, (n + 1) * (sizeof(int)));
for (int i = 0; i < n; i++)
cin >> A[i];
ans = 0;
for (i = n - 1; i >= 0; i--)
{
ans += query(A[i] - 1);
update(A[i]);
}
cout << ans << endl;
}
return 0;
}
| {
"pile_set_name": "Github"
} |
[
{
"id": 112094225,
"name": "CVE-2017-15394",
"full_name": "sudosammy\/CVE-2017-15394",
"owner": {
"login": "sudosammy",
"id": 18524051,
"avatar_url": "https:\/\/avatars1.githubusercontent.com\/u\/18524051?v=4",
"html_url": "https:\/\/github.com\/sudosammy"
},
"html_url": "https:\/\/github.com\/sudosammy\/CVE-2017-15394",
"description": null,
"fork": false,
"created_at": "2017-11-26T15:32:04Z",
"updated_at": "2017-11-26T15:32:04Z",
"pushed_at": "2017-11-26T15:32:56Z",
"stargazers_count": 0,
"watchers_count": 0,
"forks_count": 0,
"forks": 0,
"watchers": 0,
"score": 0
}
] | {
"pile_set_name": "Github"
} |
var http = require('http');
if (http.METHODS) {
module.exports = http.METHODS.map(function(method){
return method.toLowerCase();
});
return;
}
module.exports = [
'get',
'post',
'put',
'head',
'delete',
'options',
'trace',
'copy',
'lock',
'mkcol',
'move',
'propfind',
'proppatch',
'unlock',
'report',
'mkactivity',
'checkout',
'merge',
'm-search',
'notify',
'subscribe',
'unsubscribe',
'patch',
'search'
];
| {
"pile_set_name": "Github"
} |
module Test.Set exposing (tests)
import Basics exposing (..)
import Set
import Set exposing (Set)
import List
import Test exposing (..)
import Expect
set : Set Int
set =
Set.fromList <| List.range 1 100
setPart1 : Set Int
setPart1 =
Set.fromList <| List.range 1 50
setPart2 : Set Int
setPart2 =
Set.fromList <| List.range 51 100
pred : Int -> Bool
pred x =
x <= 50
tests : Test
tests =
let
queryTests =
describe "query Tests"
[ test "size of set of 100 elements" <|
\() -> Expect.equal 100 (Set.size set)
]
filterTests =
describe "filter Tests"
[ test "Simple filter" <|
\() -> Expect.equal setPart1 <| Set.filter pred set
]
partitionTests =
describe "partition Tests"
[ test "Simple partition" <|
\() -> Expect.equal ( setPart1, setPart2 ) <| Set.partition pred set
]
in
describe "Set Tests" [ queryTests, partitionTests, filterTests ]
| {
"pile_set_name": "Github"
} |
# Copyright(C) 2011 Kouhei Sutou <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
--source ../../../include/mroonga/have_innodb.inc
--source ../../../include/mroonga/not_embedded.inc
--source ../../../include/mroonga/have_mroonga.inc
--disable_warnings
DROP TABLE IF EXISTS diaries;
--enable_warnings
CREATE TABLE diaries (
id INT PRIMARY KEY AUTO_INCREMENT,
title TEXT,
body TEXT,
FULLTEXT INDEX title_index (title),
FULLTEXT INDEX body_index (body)
) ENGINE MyISAM DEFAULT CHARSET UTF8MB4;
SELECT table_name, engine, table_comment
FROM information_schema.tables
WHERE table_name = 'diaries';
INSERT INTO diaries (title, body) VALUES ("survey", "will start groonga!");
INSERT INTO diaries (title, body) VALUES ("groonga (1)", "starting groonga...");
SELECT * FROM diaries
WHERE MATCH(title) AGAINST("survey" IN BOOLEAN MODE) AND
MATCH(body) AGAINST("groonga" IN BOOLEAN MODE);
ALTER TABLE diaries ENGINE = mroonga COMMENT = 'ENGINE "InnoDB"';
SELECT table_name, engine, table_comment
FROM information_schema.tables
WHERE table_name = 'diaries';
SELECT * FROM diaries
WHERE MATCH(title) AGAINST("survey" IN BOOLEAN MODE) AND
MATCH(body) AGAINST("groonga" IN BOOLEAN MODE);
INSERT INTO diaries (title, body) VALUES ("groonga (2)", "started groonga.");
SELECT * FROM diaries
WHERE MATCH(title) AGAINST("groonga" IN BOOLEAN MODE) AND
MATCH(body) AGAINST("groonga" IN BOOLEAN MODE);
DROP TABLE diaries;
--source ../../../include/mroonga/have_mroonga_deinit.inc
| {
"pile_set_name": "Github"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
void test_cuda_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(float);
std::size_t t_result_bytes = t_result.size() * sizeof(float);
float* d_t_input;
float* d_t_result;
cudaMalloc((void**)(&d_t_input), t_input_bytes);
cudaMalloc((void**)(&d_t_result), t_result_bytes);
cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
continue;
}
std::cout << "mismatch detected at index " << i << ": " << t_result(i)
<< " vs " << t_result_gpu(i) << std::endl;
assert(false);
}
cudaFree((void*)d_t_input);
cudaFree((void*)d_t_result);
}
void test_cxx11_tensor_scan_cuda()
{
CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
}
| {
"pile_set_name": "Github"
} |
'use strict';
define(['core/app/detourService'], function (detour) {
detour.registerController([
'CreateManyToManyCtrl',
['$scope', 'logger', '$state', '$stateParams', '$http', '$parse',
function ($scope, logger, $state, $stateParams, $http, $parse) {
$scope.showPrimaryList = true;
$scope.showRelatedList = true;
var validator = $("#manytomany-form").validate({
errorClass: "inputError"
});
$scope.save = function () {
$("input.primary-entity").prop('disabled', false);
var form = $('#manytomany-form');
if (!validator.form()) {
return null;
}
var promise = $http({
url: form.attr('action'),
method: form.attr('method'),
data: form.serialize(),
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
tracker: 'saverelation'
}).then(function (response) {
logger.success('success');
$("input.primary-entity").prop('disabled', true);
return response;
}, function (result) {
logger.error('Failed:\n' + result);
$("input.primary-entity").prop('disabled', true);
});
return promise;
};
$scope.exit = function () {
$state.transitionTo('EntityDetail.Relationships', { Id: $stateParams.EntityName });
};
$scope.saveAndView = function () {
var promise = $scope.save();
promise && promise.then(function (response) {
var getter = $parse('relationId');
var relationId = getter(response.data);
if (relationId)
$state.transitionTo('EditManyToMany', { EntityName: $stateParams.EntityName, RelationId: relationId });
});
};
$scope.saveAndBack = function () {
var promise = $scope.save();
promise && promise.then(function () {
$scope.exit();
});
};
}]
]);
});
| {
"pile_set_name": "Github"
} |
.PHONY: all
.PHONY: all_html
all: artifacts/requirements.pdf artifacts/req-graph1.png \
artifacts/req-graph2.png all_html
# Adding new files (especially requirements) can not automatically
# handled. The 'force' target tries to handle this.
.PHONY: force
force:
rm .rmtoo_dependencies
${MAKE} all
#
# This is the way the rmtoo must be called.
# (The RMTOO_CALL variable is set in the setenv.sh script)
# You can override the default Config.py file by setting
# the RMTOO_CONFIG variable from the command line.
# ie:
# make RMTOO_CONFIG=YourConfig.json
#
RMTOO_CALL ?= rmtoo
RMTOO_CONFIG=Config.json
RMTOO_CONTRIB_DIR ?= /usr/local/pkg/rmtoo/rmtoo
CALL_RMTOO=${RMTOO_CALL} -j file://${RMTOO_CONFIG}
#
# Dependency handling
# The file .rmtoo_dependencies is created by rmtoo itself.
#
include .rmtoo_dependencies
all_html: ${OUTPUT_HTML}
# And how to make the dependencies
.rmtoo_dependencies:
${CALL_RMTOO} \
--create-makefile-dependencies=.rmtoo_dependencies
artifacts/req-graph1.png: artifacts/req-graph1.dot
unflatten -l 23 artifacts/req-graph1.dot | \
dot -Tpng -o artifacts/req-graph1.png
artifacts/req-graph2.png: artifacts/req-graph2.dot
dot -Tpng -o artifacts/req-graph2.png artifacts/req-graph2.dot
# Two calls are needed: one for the requirments converting and one for
# backlog creation.
artifacts/requirements.pdf: ${REQS_LATEX2} latex/requirements.tex
(cd artifacts && \
gnuplot ${RMTOO_CONTRIB_DIR}/contrib/gnuplot_stats_reqs_cnt.inc && \
epstopdf stats_reqs_cnt.eps)
(cd artifacts && \
gnuplot ${RMTOO_CONTRIB_DIR}/contrib/gnuplot_stats_burndown.inc && \
epstopdf stats_burndown.eps)
(cd artifacts && \
gnuplot ${RMTOO_CONTRIB_DIR}/contrib/gnuplot_stats_sprint_burndown.inc && \
epstopdf stats_sprint_burndown.eps)
(cd artifacts && pdflatex ../latex/requirements.tex; \
pdflatex ../latex/requirements.tex; \
pdflatex ../latex/requirements.tex)
.PHONY: clean
clean:
rm -fr artifacts/html
rm -f artifacts/* \
add_data.py*
rm -fr debian/rmtoo build
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
extern const int kRTCAudioSessionPreferredNumberOfChannels;
extern const double kRTCAudioSessionHighPerformanceSampleRate;
extern const double kRTCAudioSessionLowComplexitySampleRate;
extern const double kRTCAudioSessionHighPerformanceIOBufferDuration;
extern const double kRTCAudioSessionLowComplexityIOBufferDuration;
// Struct to hold configuration values.
@interface RTCAudioSessionConfiguration : NSObject
@property(nonatomic, strong) NSString *category;
@property(nonatomic, assign) AVAudioSessionCategoryOptions categoryOptions;
@property(nonatomic, strong) NSString *mode;
@property(nonatomic, assign) double sampleRate;
@property(nonatomic, assign) NSTimeInterval ioBufferDuration;
@property(nonatomic, assign) NSInteger inputNumberOfChannels;
@property(nonatomic, assign) NSInteger outputNumberOfChannels;
/** Initializes configuration to defaults. */
- (instancetype)init NS_DESIGNATED_INITIALIZER;
/** Returns the current configuration of the audio session. */
+ (instancetype)currentConfiguration;
/** Returns the configuration that WebRTC needs. */
+ (instancetype)webRTCConfiguration;
/** Provide a way to override the default configuration. */
+ (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration;
@end
NS_ASSUME_NONNULL_END
| {
"pile_set_name": "Github"
} |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/background_fetch/storage/mark_registration_for_deletion_task.h"
#include <utility>
#include "content/browser/background_fetch/background_fetch.pb.h"
#include "content/browser/background_fetch/background_fetch_data_manager.h"
#include "content/browser/background_fetch/storage/database_helpers.h"
#include "content/browser/service_worker/service_worker_context_wrapper.h"
namespace content {
namespace background_fetch {
MarkRegistrationForDeletionTask::MarkRegistrationForDeletionTask(
BackgroundFetchDataManager* data_manager,
const BackgroundFetchRegistrationId& registration_id,
HandleBackgroundFetchErrorCallback callback)
: DatabaseTask(data_manager),
registration_id_(registration_id),
callback_(std::move(callback)),
weak_factory_(this) {}
MarkRegistrationForDeletionTask::~MarkRegistrationForDeletionTask() = default;
void MarkRegistrationForDeletionTask::Start() {
// Look up if there is already an active |unique_id| entry for this
// |developer_id|.
service_worker_context()->GetRegistrationUserData(
registration_id_.service_worker_registration_id(),
{ActiveRegistrationUniqueIdKey(registration_id_.developer_id()),
RegistrationKey(registration_id_.unique_id())},
base::BindOnce(&MarkRegistrationForDeletionTask::DidGetActiveUniqueId,
weak_factory_.GetWeakPtr()));
}
void MarkRegistrationForDeletionTask::DidGetActiveUniqueId(
const std::vector<std::string>& data,
ServiceWorkerStatusCode status) {
switch (ToDatabaseStatus(status)) {
case DatabaseStatus::kOk:
break;
case DatabaseStatus::kNotFound:
std::move(callback_).Run(blink::mojom::BackgroundFetchError::INVALID_ID);
Finished(); // Destroys |this|.
return;
case DatabaseStatus::kFailed:
std::move(callback_).Run(
blink::mojom::BackgroundFetchError::STORAGE_ERROR);
Finished(); // Destroys |this|.
return;
}
DCHECK_EQ(2u, data.size());
// If the |unique_id| does not match, then the registration identified by
// |registration_id_.unique_id()| was already deactivated.
if (data[0] != registration_id_.unique_id()) {
std::move(callback_).Run(blink::mojom::BackgroundFetchError::INVALID_ID);
Finished(); // Destroys |this|.
return;
}
proto::BackgroundFetchMetadata metadata_proto;
if (metadata_proto.ParseFromString(data[1])) {
// Mark registration as no longer active. Also deletes pending request
// keys, since those are globally sorted and requests within deactivated
// registrations are no longer eligible to be started. Pending request
// keys are not required by GetRegistration.
service_worker_context()->ClearRegistrationUserDataByKeyPrefixes(
registration_id_.service_worker_registration_id(),
{ActiveRegistrationUniqueIdKey(registration_id_.developer_id()),
PendingRequestKeyPrefix(registration_id_.unique_id())},
base::BindOnce(&MarkRegistrationForDeletionTask::DidDeactivate,
weak_factory_.GetWeakPtr()));
} else {
NOTREACHED() << "Database is corrupt"; // TODO(crbug.com/780027): Nuke it.
}
}
void MarkRegistrationForDeletionTask::DidDeactivate(
ServiceWorkerStatusCode status) {
switch (ToDatabaseStatus(status)) {
case DatabaseStatus::kOk:
case DatabaseStatus::kNotFound:
break;
case DatabaseStatus::kFailed:
std::move(callback_).Run(
blink::mojom::BackgroundFetchError::STORAGE_ERROR);
Finished(); // Destroys |this|.
return;
}
// If CleanupTask runs after this, it shouldn't clean up the
// |unique_id| as there may still be JavaScript references to it.
ref_counted_unique_ids().emplace(registration_id_.unique_id());
std::move(callback_).Run(blink::mojom::BackgroundFetchError::NONE);
Finished(); // Destroys |this|.
}
} // namespace background_fetch
} // namespace content
| {
"pile_set_name": "Github"
} |
# Installation instructions
## Requirements
- Buildship 3.x
- Minimum Java version: 1.8
- Eclipse version: 4.3, or newer
- Buildship 2.x
- Minimum Java version: 1.7
- Eclipse version: 4.2, or newer
- Buildship 1.x
- Minimum Java version: 1.6
- Eclipse version: 4.2 - 4.6
Different Eclipse versions might be compatible but they aren't explicitly tested.
Depending on the Gradle version that Buildship uses for a project import, certain features may not be available.
## Installing from eclipse.org update site
We propose you install Buildship from the [Eclipse Marketplace](http://marketplace.eclipse.org/content/buildship-gradle-integration).
Buildship is also available through one of the provided composite update sites listed on [eclipse.org](https://projects.eclipse.org/projects/tools.buildship/downloads).
For manual installation use one of the update sites below.
### Update site with the latest release
There's a separate update site that contains the latest Buildship release.
https://download.eclipse.org/buildship/updates/latest/
This update site is built against the Eclipse version that was current at the time of the release.
### Update sites for Buildship 3.x
Eclipse Version | Type | Update Site
--------------- | ----------| ------------
2020-03 | snapshot | `https://download.eclipse.org/buildship/updates/e415/snapshots/3.x`
2020-03 | milestone | `https://download.eclipse.org/buildship/updates/e415/milestones/3.x`
2020-03 | release | `https://download.eclipse.org/buildship/updates/e415/releases/3.x`
2019-12 | snapshot | `https://download.eclipse.org/buildship/updates/e414/snapshots/3.x`
2019-12 | milestone | `https://download.eclipse.org/buildship/updates/e414/milestones/3.x`
2019-12 | release | `https://download.eclipse.org/buildship/updates/e414/releases/3.x`
2019-09 | snapshot | `https://download.eclipse.org/buildship/updates/e413/snapshots/3.x`
2019-09 | milestone | `https://download.eclipse.org/buildship/updates/e413/milestones/3.x`
2019-09 | release | `https://download.eclipse.org/buildship/updates/e413/releases/3.x`
2019-06 | snapshot | `https://download.eclipse.org/buildship/updates/e412/snapshots/3.x`
2019-06 | milestone | `https://download.eclipse.org/buildship/updates/e412/milestones/3.x`
2019-06 | release | `https://download.eclipse.org/buildship/updates/e412/releases/3.x`
2019-03 | snapshot | `https://download.eclipse.org/buildship/updates/e411/snapshots/3.x`
2019-03 | milestone | `https://download.eclipse.org/buildship/updates/e411/milestones/3.x`
2019-03 | release | `https://download.eclipse.org/buildship/updates/e411/releases/3.x`
2018-12 | snapshot | `https://download.eclipse.org/buildship/updates/e410/snapshots/3.x`
2018-12 | milestone | `https://download.eclipse.org/buildship/updates/e410/milestones/3.x`
2018-12 | release | `https://download.eclipse.org/buildship/updates/e410/releases/3.x`
2018-09 | snapshot | `https://download.eclipse.org/buildship/updates/e49/snapshots/3.x`
2018-09 | milestone | `https://download.eclipse.org/buildship/updates/e49/milestones/3.x`
2018-09 | release | `https://download.eclipse.org/buildship/updates/e49/releases/3.x`
Photon (4.8) | snapshot | `https://download.eclipse.org/buildship/updates/e48/snapshots/3.x`
Photon (4.8) | milestone | `https://download.eclipse.org/buildship/updates/e48/milestones/3.x`
Photon (4.8) | release | `https://download.eclipse.org/buildship/updates/e48/releases/3.x`
Oxygen (4.7) | snapshot | `https://download.eclipse.org/buildship/updates/e47/snapshots/3.x`
Oxygen (4.7) | milestone | `https://download.eclipse.org/buildship/updates/e47/milestones/3.x`
Oxygen (4.7) | release | `https://download.eclipse.org/buildship/updates/e47/releases/3.x`
Neon (4.6) | snapshot | `https://download.eclipse.org/buildship/updates/e46/snapshots/3.x`
Neon (4.6) | milestone | `https://download.eclipse.org/buildship/updates/e46/milestones/3.x`
Neon (4.6) | release | `https://download.eclipse.org/buildship/updates/e46/releases/3.x`
Mars (4.5) | snapshot | `https://download.eclipse.org/buildship/updates/e45/snapshots/3.x`
Mars (4.5) | milestone | `https://download.eclipse.org/buildship/updates/e45/milestones/3.x`
Mars (4.5) | release | `https://download.eclipse.org/buildship/updates/e45/releases/3.x`
Luna (4.4) | snapshot | `https://download.eclipse.org/buildship/updates/e44/snapshots/3.x`
Luna (4.4) | milestone | `https://download.eclipse.org/buildship/updates/e44/milestones/3.x`
Luna (4.4) | release | `https://download.eclipse.org/buildship/updates/e44/releases/3.x`
Kepler (4.3) | snapshot | `https://download.eclipse.org/buildship/updates/e43/snapshots/3.x`
Kepler (4.3) | milestone | `https://download.eclipse.org/buildship/updates/e43/milestones/3.x`
Kepler (4.3) | release | `https://download.eclipse.org/buildship/updates/e43/releases/3.x`
### Update sites for Buildship 2.x
Eclipse Version | Type | Update Site
--------------- | ----------| ------------
Photon (4.8) | release | `https://download.eclipse.org/buildship/updates/e48/releases/2.x`
Photon (4.8) | milestone | `https://download.eclipse.org/buildship/updates/e48/milestones/2.x`
Photon (4.8) | snapshot | `https://download.eclipse.org/buildship/updates/e48/snapshots/2.x`
Oxygen (4.7) | release | `https://download.eclipse.org/buildship/updates/e47/releases/2.x`
Oxygen (4.7) | milestone | `https://download.eclipse.org/buildship/updates/e47/milestones/2.x`
Oxygen (4.7) | snapshot | `https://download.eclipse.org/buildship/updates/e47/snapshots/2.x`
Neon (4.6) | release | `https://download.eclipse.org/buildship/updates/e46/releases/2.x`
Neon (4.6) | milestone | `https://download.eclipse.org/buildship/updates/e46/milestones/2.x`
Neon (4.6) | snapshot | `https://download.eclipse.org/buildship/updates/e46/snapshots/2.x`
Mars (4.5) | release | `https://download.eclipse.org/buildship/updates/e45/releases/2.x`
Mars (4.5) | milestone | `https://download.eclipse.org/buildship/updates/e45/milestones/2.x`
Mars (4.5) | snapshot | `https://download.eclipse.org/buildship/updates/e45/snapshots/2.x`
Luna (4.4) | release | `https://download.eclipse.org/buildship/updates/e44/releases/2.x`
Luna (4.4) | milestone | `https://download.eclipse.org/buildship/updates/e44/milestones/2.x`
Luna (4.4) | snapshot | `https://download.eclipse.org/buildship/updates/e44/snapshots/2.x`
Kepler (4.3) | release | `https://download.eclipse.org/buildship/updates/e43/releases/2.x`
Kepler (4.3) | milestone | `https://download.eclipse.org/buildship/updates/e43/milestones/2.x`
Kepler (4.3) | snapshot | `https://download.eclipse.org/buildship/updates/e43/snapshots/2.x`
Juno (4.2) | release | `https://download.eclipse.org/buildship/updates/e42/releases/2.x`
Juno (4.2) | milestone | `https://download.eclipse.org/buildship/updates/e42/milestones/2.x`
Juno (4.2) | snapshot | `https://download.eclipse.org/buildship/updates/e42/snapshots/2.x`
#### Update sites for Buildship 1.x
Eclipse Version | Update Site
-------------- |------------
Neon (4.6) | `https://download.eclipse.org/buildship/updates/e46/releases/1.0`
Mars (4.5) | `https://download.eclipse.org/buildship/updates/e45/releases/1.0`
Luna (4.4) | `https://download.eclipse.org/buildship/updates/e44/releases/1.0`
Kepler (4.3) | `https://download.eclipse.org/buildship/updates/e43/releases/1.0`
Juno (4.2) | `https://download.eclipse.org/buildship/updates/e42/releases/1.0`
Indigo (3.7) | `https://download.eclipse.org/buildship/updates/e37/releases/1.0`
Helios (3.6) | `https://download.eclipse.org/buildship/updates/e36/releases/1.0`
The continuous integration server generates nightly snapshot releases each day 23:00 CET which instantly become
available at the snapshot update sites above. In regular intervals, the Buildship team also creates new
milestone releases and makes them available at the milestone update sites.
Apply the following instructions to install the latest snapshot or milestone of Buildship into Eclipse.
1. In Eclipse, open the menu item _Help >> Install New Software_.
1. Paste the appropriate update site link into the _Work with_ text box.
1. Click the _Add_ button at the top of the screen, give the update site a name, and press _OK_.
1. Ensure that the option _Group Items by Category_ is enabled.
1. Select the top-level node _Buildship: Eclipse Plug-ins for Gradle_ once it appears.
1. Click _Next_. This may take a while.
1. Review the list of software that will be installed. Click _Next_ again.
1. Review and accept the licence agreement and click _Finish_.
## Updating from update site
If you have already installed Buildship, you can update to the most recent version by opening the menu item _Help >> Check for Updates_. Note, that the update works only if Buildship was installed from the updates sites from download.eclipse.org or from builds.gradle.org, as listed above. If Buildship comes preinstalled in your Eclipse (for instance if you use the standard [Eclipse for Java developers](https://www.eclipse.org/downloads/packages/eclipse-ide-java-developers/neon) package) then you have to do the update manually. To do that just follow the steps from the previous section.
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"filename" : "RuntimeDoc48.pdf"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
},
"properties" : {
"template-rendering-intent" : "original"
}
} | {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_ServiceUser_AuthenticationRule extends Google_Collection
{
protected $collection_key = 'requirements';
public $allowWithoutCredential;
protected $oauthType = 'Google_Service_ServiceUser_OAuthRequirements';
protected $oauthDataType = '';
protected $requirementsType = 'Google_Service_ServiceUser_AuthRequirement';
protected $requirementsDataType = 'array';
public $selector;
public function setAllowWithoutCredential($allowWithoutCredential)
{
$this->allowWithoutCredential = $allowWithoutCredential;
}
public function getAllowWithoutCredential()
{
return $this->allowWithoutCredential;
}
/**
* @param Google_Service_ServiceUser_OAuthRequirements
*/
public function setOauth(Google_Service_ServiceUser_OAuthRequirements $oauth)
{
$this->oauth = $oauth;
}
/**
* @return Google_Service_ServiceUser_OAuthRequirements
*/
public function getOauth()
{
return $this->oauth;
}
/**
* @param Google_Service_ServiceUser_AuthRequirement
*/
public function setRequirements($requirements)
{
$this->requirements = $requirements;
}
/**
* @return Google_Service_ServiceUser_AuthRequirement
*/
public function getRequirements()
{
return $this->requirements;
}
public function setSelector($selector)
{
$this->selector = $selector;
}
public function getSelector()
{
return $this->selector;
}
}
| {
"pile_set_name": "Github"
} |
/* ========================================
* Slew - Slew.h
* Created 8/12/11 by SPIAdmin
* Copyright (c) 2011 __MyCompanyName__, All rights reserved
* ======================================== */
#ifndef __Slew_H
#include "Slew.h"
#endif
AudioEffect* createEffectInstance(audioMasterCallback audioMaster)
{
return new Slew(audioMaster);
}
Slew::Slew(audioMasterCallback audioMaster) :
AudioEffectX(audioMaster, kNumPrograms, kNumParameters)
{
lastSampleL = 0.0;
lastSampleR = 0.0;
// TODO: uncomment canDo entries according to your plugin's capabilities
// _canDo.insert("sendVstEvents"); // plug-in will send Vst events to Host.
// _canDo.insert("sendVstMidiEvent"); // plug-in will send MIDI events to Host.
// _canDo.insert("sendVstTimeInfo"); // unknown
// _canDo.insert("receiveVstEvents"); // plug-in can receive Vst events from Host.
// _canDo.insert("receiveVstMidiEvent"); // plug-in can receive MIDI events from Host.
// _canDo.insert("receiveVstTimeInfo"); // plug-in can receive Time info from Host.
// _canDo.insert("offline"); // plug-in supports offline functions (#offlineNotify, #offlinePrepare, #offlineRun).
_canDo.insert("plugAsChannelInsert"); // plug-in can be used as a channel insert effect.
_canDo.insert("plugAsSend"); // plug-in can be used as a send effect.
// _canDo.insert("mixDryWet"); // dry/wet mix control
// _canDo.insert("noRealTime"); // no real-time processing
// _canDo.insert("multipass"); // unknown
// _canDo.insert("metapass"); // unknown
// _canDo.insert("x1in1out");
// _canDo.insert("x1in2out");
// _canDo.insert("x2in1out");
_canDo.insert("x2in2out");
// _canDo.insert("x2in4out");
// _canDo.insert("x4in2out");
// _canDo.insert("x4in4out");
// _canDo.insert("x4in8out"); // 4:2 matrix to surround bus
// _canDo.insert("x8in4out"); // surround bus to 4:2 matrix
// _canDo.insert("x8in8out");
// _canDo.insert("midiProgramNames"); // plug-in supports function #getMidiProgramName().
// _canDo.insert("conformsToWindowRules"); // mac: doesn't mess with grafport.
// _canDo.insert("bypass"); // plug-in supports function #setBypass().
// these configuration values are established in the header
setNumInputs(kNumInputs);
setNumOutputs(kNumOutputs);
setUniqueID(kUniqueId);
canProcessReplacing(); // supports output replacing
canDoubleReplacing(); // supports double precision processing
programsAreChunks(true);
vst_strncpy (_programName, "Default", kVstMaxProgNameLen); // default program name
}
Slew::~Slew()
{
}
VstInt32 Slew::getVendorVersion ()
{
// TODO: return version number
return 1000;
}
void Slew::setProgramName(char *name) {
vst_strncpy (_programName, name, kVstMaxProgNameLen);
}
void Slew::getProgramName(char *name) {
vst_strncpy (name, _programName, kVstMaxProgNameLen);
}
static float pinParameter(float data)
{
if (data < 0.0f) return 0.0f;
if (data > 1.0f) return 1.0f;
return data;
}
VstInt32 Slew::getChunk (void** data, bool isPreset)
{
float *chunkData = (float *)calloc(kNumParameters, sizeof(float));
chunkData[0] = gain;
/* Note: The way this is set up, it will break if you manage to save settings on an Intel
machine and load them on a PPC Mac. However, it's fine if you stick to the machine you
started with. */
*data = chunkData;
return kNumParameters * sizeof(float);
}
VstInt32 Slew::setChunk (void* data, VstInt32 byteSize, bool isPreset)
{
float *chunkData = (float *)data;
gain = pinParameter(chunkData[0]);
/* We're ignoring byteSize as we found it to be a filthy liar */
/* calculate any other fields you need here - you could copy in
code from setParameter() here. */
return 0;
}
void Slew::setParameter(VstInt32 index, float value) {
switch (index) {
case kSlewParam:
gain = value;
break;
default: // unknown parameter, shouldn't happen!
throw;
}
}
float Slew::getParameter(VstInt32 index) {
switch (index) {
case kSlewParam:
return gain;
break;
default: // unknown parameter, shouldn't happen!
break;
}
return 0.0;
}
void Slew::getParameterName(VstInt32 index, char *text) {
switch (index) {
case kSlewParam:
vst_strncpy (text, "Clamping", kVstMaxParamStrLen);
break;
default: // unknown parameter, shouldn't happen!
break;
}
}
void Slew::getParameterDisplay(VstInt32 index, char *text) {
switch (index) {
case kSlewParam:
float2string (gain, text, kVstMaxParamStrLen);
break;
default: // unknown parameter, shouldn't happen!
break;
}
}
void Slew::getParameterLabel(VstInt32 index, char *text) {
switch (index) {
case kSlewParam:
vst_strncpy (text, "", kVstMaxParamStrLen);
break;
default: // unknown parameter, shouldn't happen!
break;
}
}
VstInt32 Slew::canDo(char *text)
{
// 1 = yes, -1 = no, 0 = don't know
return (_canDo.find(text) == _canDo.end()) ? 0 : 1;
}
bool Slew::getEffectName(char* name) {
vst_strncpy(name, "Slew", kVstMaxProductStrLen);
return true;
}
VstPlugCategory Slew::getPlugCategory() {
return kPlugCategEffect;
}
bool Slew::getProductString(char* text) {
vst_strncpy (text, "Slew", kVstMaxProductStrLen);
return true;
}
bool Slew::getVendorString(char* text) {
vst_strncpy (text, "airwindows", kVstMaxVendorStrLen);
return true;
}
| {
"pile_set_name": "Github"
} |
################################################################################
#
# gstreamer1
#
################################################################################
GSTREAMER1_VERSION = 1.14.4
GSTREAMER1_SOURCE = gstreamer-$(GSTREAMER1_VERSION).tar.xz
GSTREAMER1_SITE = https://gstreamer.freedesktop.org/src/gstreamer
GSTREAMER1_INSTALL_STAGING = YES
GSTREAMER1_LICENSE_FILES = COPYING
GSTREAMER1_LICENSE = LGPL-2.0+, LGPL-2.1+
GSTREAMER1_CONF_OPTS = \
--disable-examples \
--disable-tests \
--disable-failing-tests \
--disable-valgrind \
--disable-benchmarks \
--disable-introspection \
$(if $(BR2_PACKAGE_GSTREAMER1_CHECK),,--disable-check) \
$(if $(BR2_PACKAGE_GSTREAMER1_TRACE),,--disable-trace) \
$(if $(BR2_PACKAGE_GSTREAMER1_PARSE),,--disable-parse) \
$(if $(BR2_PACKAGE_GSTREAMER1_GST_DEBUG),,--disable-gst-debug) \
$(if $(BR2_PACKAGE_GSTREAMER1_PLUGIN_REGISTRY),,--disable-registry) \
$(if $(BR2_PACKAGE_GSTREAMER1_INSTALL_TOOLS),,--disable-tools)
GSTREAMER1_DEPENDENCIES = \
host-bison \
host-flex \
host-pkgconf \
libglib2 \
$(if $(BR2_PACKAGE_LIBUNWIND),libunwind)
$(eval $(autotools-package))
| {
"pile_set_name": "Github"
} |
.confirm[@user, :confirm]
#{t :user_confirm_delete}
#{t :delete} <b>#{@user.full_name}</b>?
= link_to_confirm_delete(@user) + " : "
= link_to_function(t(:no_button), "crm.flick('#{dom_id(@user, :confirm)}', 'remove')")
| {
"pile_set_name": "Github"
} |
import { createHash } from 'crypto';
import { debug } from './utils/logger';
const fileHashes = new Map<string, string>();
const evalCache = new Map<string, any>();
const fileKeys = new Map<string, string[]>();
const hash = (text: string) => createHash('sha1').update(text).digest('base64');
let lastText: string = '';
let lastHash: string = hash(lastText);
const memoizedHash: typeof hash = (text) => {
if (lastText !== text) {
lastHash = hash(text);
lastText = text;
}
return lastHash;
};
const toKey = (filename: string, exports: string[]) =>
exports.length > 0 ? `${filename}:${exports.join(',')}` : filename;
export const clear = () => {
fileHashes.clear();
evalCache.clear();
fileKeys.clear();
};
export const clearForFile = (filename: string) => {
const keys = fileKeys.get(filename) ?? [];
if (keys.length === 0) {
return;
}
debug('eval-cache:clear-for-file', filename);
for (const key of keys) {
fileHashes.delete(key);
evalCache.delete(key);
}
fileKeys.set(filename, []);
};
export const has = (
[filename, ...exports]: string[],
text: string
): boolean => {
const key = toKey(filename, exports);
const textHash = memoizedHash(text);
debug('eval-cache:has', `${key} ${textHash}`);
return fileHashes.get(key) === textHash;
};
export const get = ([filename, ...exports]: string[], text: string): any => {
const key = toKey(filename, exports);
const textHash = memoizedHash(text);
debug('eval-cache:get', `${key} ${textHash}`);
if (fileHashes.get(key) !== textHash) {
return undefined;
}
return evalCache.get(key);
};
export const set = (
[filename, ...exports]: string[],
text: string,
value: any
): void => {
const key = toKey(filename, exports);
const textHash = memoizedHash(text);
debug('eval-cache:set', `${key} ${textHash}`);
fileHashes.set(key, textHash);
evalCache.set(key, value);
if (!fileKeys.has(filename)) {
fileKeys.set(filename, []);
}
fileKeys.get(filename)!.push(key);
};
| {
"pile_set_name": "Github"
} |
window.util = function () {
function map(elems, func) {
var ret = [];
for (var i = 0, len = elems.length; i < len; i++) {
ret[i] = func(elems[i]);
}
return ret;
}
function filter(elems, predicate) {
var ret = [];
for (var i = 0, len = elems.length; i < len; i++) {
if (predicate(elems[i]))
ret.push(elems[i]);
}
return ret;
}
function all(elems) {
for (var i = 0, len = elems.length; i < len; i++) {
if (!elems[i])
return false;
}
return true;
}
function any(elems) {
for (var i = 0, len = elems.length; i < len; i++) {
if (elems[i])
return elems[i];
}
return false;
}
function contains(elems, e) {
for (var i = 0, len = elems.length; i < len; i++) {
if (elems[i] == e)
return true;
}
return false;
}
function last(items) {
return items[items.length-1];
}
function unescape(string) {
return string.replace(/</g, '<').replace(/>/g, '>').replace(/&/g, '&');
}
function escape(string) {
return string.replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>');
}
function normalize(string) {
return string.toLowerCase().replace(/ /g, '').replace(/_/g, '');
}
function regexpEscape(string) {
return string.replace(/[-[\]{}()+?*.,\\^$|#]/g, "\\$&");
}
function Matcher(pattern) {
pattern = regexpEscape(normalize(pattern));
var rePattern = '^' + pattern.replace(/\\\?/g, '.').replace(/\\\*/g, '[\\s\\S]*') + '$';
var regexp = new RegExp(rePattern);
function matches(string) {
return regexp.test(normalize(string));
}
return {
matches: matches,
matchesAny: function (strings) {
for (var i = 0, len = strings.length; i < len; i++)
if (matches(strings[i]))
return true;
return false;
}
};
}
function formatParentName(item) {
var parentName = item.fullName.slice(0, item.fullName.length - item.name.length);
return parentName.replace(/\./g, ' . ');
}
function timeFromDate(date) {
if (!date)
return 'N/A';
return formatTime(date.getHours(), date.getMinutes(),
date.getSeconds(), date.getMilliseconds());
}
function dateFromDate(date) {
if (!date)
return 'N/A';
return padTo(date.getFullYear(), 4) +
padTo(date.getMonth() + 1, 2) +
padTo(date.getDate(), 2);
}
function dateTimeFromDate(date) {
if (!date)
return 'N/A';
return dateFromDate(date) + ' ' + timeFromDate(date);
}
function formatTime(hours, minutes, seconds, milliseconds) {
return padTo(hours, 2) + ':' +
padTo(minutes, 2) + ':' +
padTo(seconds, 2) + '.' +
padTo(milliseconds, 3);
}
function formatElapsed(elapsed) {
var millis = elapsed;
var hours = Math.floor(millis / (60 * 60 * 1000));
millis -= hours * 60 * 60 * 1000;
var minutes = Math.floor(millis / (60 * 1000));
millis -= minutes * 60 * 1000;
var seconds = Math.floor(millis / 1000);
millis -= seconds * 1000;
return formatTime(hours, minutes, seconds, millis);
}
function padTo(number, len) {
var numString = number + "";
while (numString.length < len) numString = "0" + numString;
return numString;
}
function timestamp(millis) {
// used also by tools that do not set window.output.baseMillis
var base = window.output ? window.output.baseMillis : 0;
return new Date(base + millis);
}
function createGeneratedString(timestamp) {
var date = new Date(timestamp);
var dt = dateTimeFromDate(date).slice(0, 17); // drop millis
var offset = date.getTimezoneOffset();
var sign = offset > 0 ? '-' : '+';
var hh = Math.floor(Math.abs(offset) / 60);
var mm = Math.abs(offset) % 60;
return dt + ' UTC' + sign + padTo(hh, 2) + ':' + padTo(mm, 2);
}
function createGeneratedAgoString(timestamp) {
function timeString(time, shortUnit) {
var unit = {y: 'year', d: 'day', h: 'hour', m: 'minute',
s: 'second'}[shortUnit];
var end = time == 1 ? ' ' : 's ';
return time + ' ' + unit + end;
}
function compensateLeapYears(days, years) {
// Not a perfect algorithm but ought to be enough
return days - Math.floor(years / 4);
}
var generated = Math.round(timestamp / 1000);
var current = Math.round(new Date().getTime() / 1000);
var elapsed = current - generated;
var prefix = '';
if (elapsed < 0) {
prefix = '- ';
elapsed = Math.abs(elapsed);
}
var secs = elapsed % 60;
var mins = Math.floor(elapsed / 60) % 60;
var hours = Math.floor(elapsed / (60*60)) % 24;
var days = Math.floor(elapsed / (60*60*24)) % 365;
var years = Math.floor(elapsed / (60*60*24*365));
if (years) {
days = compensateLeapYears(days, years);
return prefix + timeString(years, 'y') + timeString(days, 'd');
} else if (days) {
return prefix + timeString(days, 'd') + timeString(hours, 'h');
} else if (hours) {
return prefix + timeString(hours, 'h') + timeString(mins, 'm');
} else if (mins) {
return prefix + timeString(mins, 'm') + timeString(secs, 's');
} else {
return prefix + timeString(secs, 's');
}
}
function parseQueryString(query) {
var result = {};
if (!query)
return result;
var params = query.split('&');
var parts;
function decode(item) {
return decodeURIComponent(item.replace('+', ' '));
}
for (var i = 0, len = params.length; i < len; i++) {
parts = params[i].split('=');
result[decode(parts.shift())] = decode(parts.join('='));
}
return result;
}
return {
map: map,
filter: filter,
all: all,
any: any,
contains: contains,
last: last,
escape: escape,
unescape: unescape,
normalize: normalize,
regexpEscape: regexpEscape,
Matcher: Matcher,
formatParentName: formatParentName,
timeFromDate: timeFromDate,
dateFromDate: dateFromDate,
dateTimeFromDate: dateTimeFromDate,
formatElapsed: formatElapsed,
timestamp: timestamp,
createGeneratedString: createGeneratedString,
createGeneratedAgoString: createGeneratedAgoString,
parseQueryString: parseQueryString
};
}();
| {
"pile_set_name": "Github"
} |
<!-- YAML
added: v8.3.0
-->
取消此解析程序所做的所有未完成的DNS查询。
使用错误码 `ECANCELLED` 调用相应的回调。
| {
"pile_set_name": "Github"
} |
/*
* The MIT License
*
* Copyright (c) 2011, Oracle Corporation, Anton Kozak
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.tasks.mail.impl;
import hudson.Util;
import hudson.model.AbstractBuild;
import hudson.model.AbstractProject;
import hudson.model.BuildListener;
import hudson.model.Result;
import hudson.tasks.Messages;
import java.util.List;
import javax.mail.MessagingException;
import javax.mail.internet.MimeMessage;
/**
* Class used for the mail preparation if build is unstable.
*/
public class UnstableBuildMail extends BaseBuildResultMail {
public UnstableBuildMail(String recipients, boolean sendToIndividuals,
List<AbstractProject> upstreamProjects, String charset) {
super(recipients, sendToIndividuals, upstreamProjects, charset);
}
/**
* @inheritDoc
*/
public MimeMessage getMail(AbstractBuild<?, ?> build, BuildListener listener)
throws MessagingException, InterruptedException {
MimeMessage msg = createEmptyMail(build, listener);
String subject = Messages.MailSender_UnstableMail_Subject();
AbstractBuild<?, ?> prev = build.getPreviousBuild();
boolean still = false;
if (prev != null) {
if (prev.getResult() == Result.SUCCESS) {
subject = Messages.MailSender_UnstableMail_ToUnStable_Subject();
} else if (prev.getResult() == Result.UNSTABLE) {
subject = Messages.MailSender_UnstableMail_StillUnstable_Subject();
still = true;
}
}
msg.setSubject(getSubject(build, subject), getCharset());
StringBuilder buf = new StringBuilder();
// Link to project changes summary for "still unstable" if this or last build has changes
if (still && !(build.getChangeSet().isEmptySet() && prev.getChangeSet().isEmptySet())) {
appendUrl(Util.encode(build.getProject().getUrl()) + "changes", buf);
} else {
appendBuildUrl(build, buf);
}
appendFooter(buf);
msg.setText(buf.toString(), getCharset());
return msg;
}
}
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
require 'digest/md5'
module RedisAnalytics
class Tracker
def initialize(app)
@app = app
end
def call(env)
dup.call!(env)
end
def call!(env)
@env = env
@request = Rack::Request.new(env)
status, headers, body = @app.call(env)
@response = Rack::Response.new(body, status, headers)
record if should_record?
@response.finish
end
def should_record?
dashboard_path = Rails.application.routes.named_routes[:redis_analytics].path rescue nil
return false if dashboard_path =~ @request.path
return false unless @response.ok?
return false unless @response.content_type =~ /^text\/html/
RedisAnalytics.path_filters.each do |filter|
return false if filter.matches?(@request.path)
end
RedisAnalytics.filters.each do |filter|
return false if filter.matches?(@request, @response)
end
return true
end
def record
v = Visit.new(@request, @response)
@response = v.record
@response.set_cookie(RedisAnalytics.current_visit_cookie_name, v.updated_current_visit_info)
@response.set_cookie(RedisAnalytics.first_visit_cookie_name, v.updated_first_visit_info)
end
end
end
| {
"pile_set_name": "Github"
} |
<vector android:height="24dp" android:tint="#FFFFFF"
android:viewportHeight="24.0" android:viewportWidth="24.0"
android:width="24dp" xmlns:android="http://schemas.android.com/apk/res/android">
<path android:fillColor="#FF000000" android:pathData="M12,2C6.48,2 2,6.48 2,12s4.48,10 10,10 10,-4.48 10,-10S17.52,2 12,2zM12,5c1.66,0 3,1.34 3,3s-1.34,3 -3,3 -3,-1.34 -3,-3 1.34,-3 3,-3zM12,19.2c-2.5,0 -4.71,-1.28 -6,-3.22 0.03,-1.99 4,-3.08 6,-3.08 1.99,0 5.97,1.09 6,3.08 -1.29,1.94 -3.5,3.22 -6,3.22z"/>
</vector>
| {
"pile_set_name": "Github"
} |
{
"word": "Arterial",
"definitions": [
"Relating to an artery or arteries.",
"Denoting an important route in a system of roads, railway lines, or rivers."
],
"parts-of-speech": "Adjective"
} | {
"pile_set_name": "Github"
} |
# Batch
resource "aws_cloudwatch_event_target" "example_batch" {
target_id = "example-batch"
rule = aws_cloudwatch_event_rule.example_batch.name
arn = aws_ecs_cluster.example.arn
role_arn = module.ecs_events_role.iam_role_arn
ecs_target {
launch_type = "FARGATE"
task_count = 1
platform_version = "1.3.0"
task_definition_arn = aws_ecs_task_definition.example_batch.arn
network_configuration {
assign_public_ip = "false"
subnets = [aws_subnet.private_0.id]
}
}
}
resource "aws_cloudwatch_event_rule" "example_batch" {
name = "example-batch"
description = "とても重要なバッチ処理です"
schedule_expression = "cron(*/2 * * * ? *)"
}
module "ecs_events_role" {
source = "./iam_role"
name = "ecs-events"
identifier = "events.amazonaws.com"
policy = data.aws_iam_policy.ecs_events_role_policy.policy
}
data "aws_iam_policy" "ecs_events_role_policy" {
arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole"
}
# ECS
resource "aws_ecs_cluster" "example" {
name = "example-12"
}
resource "aws_ecs_task_definition" "example_batch" {
family = "example-batch"
cpu = "256"
memory = "512"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
container_definitions = file("./parameter_container_definitions.json")
execution_role_arn = module.ecs_task_execution_role.iam_role_arn
}
module "ecs_task_execution_role" {
source = "./iam_role"
name = "ecs-task-execution"
identifier = "ecs-tasks.amazonaws.com"
policy = data.aws_iam_policy_document.ecs_task_execution.json
}
data "aws_iam_policy_document" "ecs_task_execution" {
source_json = data.aws_iam_policy.ecs_task_execution_role_policy.policy
statement {
effect = "Allow"
actions = ["ssm:GetParameters", "kms:Decrypt"]
resources = ["*"]
}
}
data "aws_iam_policy" "ecs_task_execution_role_policy" {
arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
}
resource "aws_cloudwatch_log_group" "for_ecs_scheduled_tasks" {
name = "/ecs-scheduled-tasks/example"
retention_in_days = 180
}
# ネットワーク
resource "aws_vpc" "example" {
cidr_block = "10.0.0.0/16"
instance_tenancy = "default"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "example"
}
}
resource "aws_subnet" "public_0" {
vpc_id = aws_vpc.example.id
cidr_block = "10.0.0.0/24"
availability_zone = "ap-northeast-1a"
map_public_ip_on_launch = true
}
resource "aws_subnet" "public_1" {
vpc_id = aws_vpc.example.id
cidr_block = "10.0.1.0/24"
availability_zone = "ap-northeast-1c"
map_public_ip_on_launch = true
}
resource "aws_internet_gateway" "example" {
vpc_id = aws_vpc.example.id
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.example.id
}
resource "aws_route" "public" {
route_table_id = aws_route_table.public.id
gateway_id = aws_internet_gateway.example.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route_table_association" "public_0" {
subnet_id = aws_subnet.public_0.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public_1" {
subnet_id = aws_subnet.public_1.id
route_table_id = aws_route_table.public.id
}
resource "aws_subnet" "private_0" {
vpc_id = aws_vpc.example.id
cidr_block = "10.0.64.0/24"
availability_zone = "ap-northeast-1a"
map_public_ip_on_launch = false
}
resource "aws_subnet" "private_1" {
vpc_id = aws_vpc.example.id
cidr_block = "10.0.65.0/24"
availability_zone = "ap-northeast-1c"
map_public_ip_on_launch = false
}
resource "aws_eip" "nat_gateway_0" {
vpc = true
depends_on = [aws_internet_gateway.example]
}
resource "aws_eip" "nat_gateway_1" {
vpc = true
depends_on = [aws_internet_gateway.example]
}
resource "aws_nat_gateway" "nat_gateway_0" {
allocation_id = aws_eip.nat_gateway_0.id
subnet_id = aws_subnet.public_0.id
depends_on = [aws_internet_gateway.example]
}
resource "aws_nat_gateway" "nat_gateway_1" {
allocation_id = aws_eip.nat_gateway_1.id
subnet_id = aws_subnet.public_1.id
depends_on = [aws_internet_gateway.example]
}
resource "aws_route_table" "private_0" {
vpc_id = aws_vpc.example.id
}
resource "aws_route_table" "private_1" {
vpc_id = aws_vpc.example.id
}
resource "aws_route" "private_0" {
route_table_id = aws_route_table.private_0.id
nat_gateway_id = aws_nat_gateway.nat_gateway_0.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route" "private_1" {
route_table_id = aws_route_table.private_1.id
nat_gateway_id = aws_nat_gateway.nat_gateway_1.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route_table_association" "private_0" {
subnet_id = aws_subnet.private_0.id
route_table_id = aws_route_table.private_0.id
}
resource "aws_route_table_association" "private_1" {
subnet_id = aws_subnet.private_1.id
route_table_id = aws_route_table.private_1.id
}
# AWS プロバイダの設定
provider "aws" {
region = "ap-northeast-1"
}
| {
"pile_set_name": "Github"
} |
Package: kaltura-php7-pspell
Source: kaltura-php7
Version: 7.0.0-6
Architecture: amd64
Maintainer: Jess Portnoy <[email protected]>
Installed-Size: 89
Pre-Depends: dpkg (>= 1.16.1~)
Depends: libaspell15 (>= 0.60.7~20110707), libc6 (>= 2.4), phpapi-20151012, kaltura-php7-common (= 7.0.0-6), ucf
Section: php
Priority: optional
Homepage: http://www.php.net/
Description: pspell module for kaltura-php7
This package provides a module for pspell functions in PHP scripts.
.
PHP (recursive acronym for PHP: Hypertext Preprocessor) is a widely-used
open source general-purpose scripting language that is especially suited
for web development and can be embedded into HTML.
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace Zend\Db\TableGateway\Feature\EventFeature;
use Zend\Db\TableGateway\AbstractTableGateway;
use Zend\EventManager\EventInterface;
class TableGatewayEvent implements EventInterface
{
/**
* @var AbstractTableGateway
*/
protected $target = null;
/**
* @var null
*/
protected $name = null;
/**
* @var array|\ArrayAccess
*/
protected $params = array();
/**
* Get event name
*
* @return string
*/
public function getName()
{
return $this->name;
}
/**
* Get target/context from which event was triggered
*
* @return null|string|object
*/
public function getTarget()
{
return $this->target;
}
/**
* Get parameters passed to the event
*
* @return array|\ArrayAccess
*/
public function getParams()
{
return $this->params;
}
/**
* Get a single parameter by name
*
* @param string $name
* @param mixed $default Default value to return if parameter does not exist
* @return mixed
*/
public function getParam($name, $default = null)
{
return (isset($this->params[$name]) ? $this->params[$name] : $default);
}
/**
* Set the event name
*
* @param string $name
* @return void
*/
public function setName($name)
{
$this->name = $name;
}
/**
* Set the event target/context
*
* @param null|string|object $target
* @return void
*/
public function setTarget($target)
{
$this->target = $target;
}
/**
* Set event parameters
*
* @param string $params
* @return void
*/
public function setParams($params)
{
$this->params = $params;
}
/**
* Set a single parameter by key
*
* @param string $name
* @param mixed $value
* @return void
*/
public function setParam($name, $value)
{
$this->params[$name] = $value;
}
/**
* Indicate whether or not the parent EventManagerInterface should stop propagating events
*
* @param bool $flag
* @return void
*/
public function stopPropagation($flag = true)
{
return;
}
/**
* Has this event indicated event propagation should stop?
*
* @return bool
*/
public function propagationIsStopped()
{
return false;
}
}
| {
"pile_set_name": "Github"
} |
/* src/backend/utils/mb/Unicode/utf8_to_win1253.map */
static const pg_utf_to_local ULmapWIN1253[ 111 ] = {
{0xc2a0, 0x00a0},
{0xc2a3, 0x00a3},
{0xc2a4, 0x00a4},
{0xc2a5, 0x00a5},
{0xc2a6, 0x00a6},
{0xc2a7, 0x00a7},
{0xc2a8, 0x00a8},
{0xc2a9, 0x00a9},
{0xc2ab, 0x00ab},
{0xc2ac, 0x00ac},
{0xc2ad, 0x00ad},
{0xc2ae, 0x00ae},
{0xc2b0, 0x00b0},
{0xc2b1, 0x00b1},
{0xc2b2, 0x00b2},
{0xc2b3, 0x00b3},
{0xc2b5, 0x00b5},
{0xc2b6, 0x00b6},
{0xc2b7, 0x00b7},
{0xc2bb, 0x00bb},
{0xc2bd, 0x00bd},
{0xc692, 0x0083},
{0xce84, 0x00b4},
{0xce85, 0x00a1},
{0xce86, 0x00a2},
{0xce88, 0x00b8},
{0xce89, 0x00b9},
{0xce8a, 0x00ba},
{0xce8c, 0x00bc},
{0xce8e, 0x00be},
{0xce8f, 0x00bf},
{0xce90, 0x00c0},
{0xce91, 0x00c1},
{0xce92, 0x00c2},
{0xce93, 0x00c3},
{0xce94, 0x00c4},
{0xce95, 0x00c5},
{0xce96, 0x00c6},
{0xce97, 0x00c7},
{0xce98, 0x00c8},
{0xce99, 0x00c9},
{0xce9a, 0x00ca},
{0xce9b, 0x00cb},
{0xce9c, 0x00cc},
{0xce9d, 0x00cd},
{0xce9e, 0x00ce},
{0xce9f, 0x00cf},
{0xcea0, 0x00d0},
{0xcea1, 0x00d1},
{0xcea3, 0x00d3},
{0xcea4, 0x00d4},
{0xcea5, 0x00d5},
{0xcea6, 0x00d6},
{0xcea7, 0x00d7},
{0xcea8, 0x00d8},
{0xcea9, 0x00d9},
{0xceaa, 0x00da},
{0xceab, 0x00db},
{0xceac, 0x00dc},
{0xcead, 0x00dd},
{0xceae, 0x00de},
{0xceaf, 0x00df},
{0xceb0, 0x00e0},
{0xceb1, 0x00e1},
{0xceb2, 0x00e2},
{0xceb3, 0x00e3},
{0xceb4, 0x00e4},
{0xceb5, 0x00e5},
{0xceb6, 0x00e6},
{0xceb7, 0x00e7},
{0xceb8, 0x00e8},
{0xceb9, 0x00e9},
{0xceba, 0x00ea},
{0xcebb, 0x00eb},
{0xcebc, 0x00ec},
{0xcebd, 0x00ed},
{0xcebe, 0x00ee},
{0xcebf, 0x00ef},
{0xcf80, 0x00f0},
{0xcf81, 0x00f1},
{0xcf82, 0x00f2},
{0xcf83, 0x00f3},
{0xcf84, 0x00f4},
{0xcf85, 0x00f5},
{0xcf86, 0x00f6},
{0xcf87, 0x00f7},
{0xcf88, 0x00f8},
{0xcf89, 0x00f9},
{0xcf8a, 0x00fa},
{0xcf8b, 0x00fb},
{0xcf8c, 0x00fc},
{0xcf8d, 0x00fd},
{0xcf8e, 0x00fe},
{0xe28093, 0x0096},
{0xe28094, 0x0097},
{0xe28095, 0x00af},
{0xe28098, 0x0091},
{0xe28099, 0x0092},
{0xe2809a, 0x0082},
{0xe2809c, 0x0093},
{0xe2809d, 0x0094},
{0xe2809e, 0x0084},
{0xe280a0, 0x0086},
{0xe280a1, 0x0087},
{0xe280a2, 0x0095},
{0xe280a6, 0x0085},
{0xe280b0, 0x0089},
{0xe280b9, 0x008b},
{0xe280ba, 0x009b},
{0xe282ac, 0x0080},
{0xe284a2, 0x0099}
};
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<in.srain.cube.views.ptr.PtrFrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/ptr_frame_list_view"
xmlns:cube_ptr="http://schemas.android.com/apk/res-auto"
android:layout_width="match_parent"
android:layout_height="match_parent"
cube_ptr:ptr_duration_to_close="200"
cube_ptr:ptr_duration_to_close_header="1000"
cube_ptr:ptr_keep_header_when_refresh="true"
cube_ptr:ptr_pull_to_fresh="false"
cube_ptr:ptr_ratio_of_header_height_to_refresh="0.9"
cube_ptr:ptr_resistance="2.0">
<cn.cfanr.ultraptrdemo.view.loadmore.LoadMoreListViewContainer
android:id="@+id/ptr_list_view_load_more_container"
android:layout_width="match_parent"
android:layout_height="match_parent">
<ListView
android:id="@+id/ptr_list_view_base"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:divider="@null"
android:fadingEdge="none"
android:listSelector="@android:color/transparent"
android:paddingLeft="12dp"
android:paddingRight="12dp"
android:scrollbarStyle="outsideOverlay"
android:choiceMode="singleChoice"
/>
</cn.cfanr.ultraptrdemo.view.loadmore.LoadMoreListViewContainer>
</in.srain.cube.views.ptr.PtrFrameLayout> | {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2013, Redsolution LTD. All rights reserved.
*
* This file is part of Xabber project; you can redistribute it and/or
* modify it under the terms of the GNU General Public License, Version 3.
*
* Xabber is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License,
* along with this program. If not, see http://www.gnu.org/licenses/.
*/
package com.xabber.android.data.notification;
import com.xabber.android.data.entity.AccountJid;
import java.util.Iterator;
public class BaseAccountNotificationProvider<T extends AccountNotificationItem>
extends BaseNotificationProvider<T> implements
AccountNotificationProvider<T> {
public BaseAccountNotificationProvider(int icon) {
super(icon);
}
public BaseAccountNotificationProvider(int icon, String channelID) {
super(icon, channelID);
}
public T get(AccountJid account) {
for (T item : items)
if (item != null && item.getAccount().equals(account))
return item;
return null;
}
public boolean remove(AccountJid account) {
return remove(get(account));
}
@Override
public void clearAccountNotifications(AccountJid account) {
for (Iterator<T> iterator = items.iterator(); iterator.hasNext(); )
if (account.equals(iterator.next().getAccount()))
iterator.remove();
}
}
| {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
require "spec_helper"
module Decidim
describe FileZipper do
subject { FileZipper.new("foo.txt", "bar") }
describe "#zip" do
let(:zip) { subject.zip }
it "zips a file" do
entries = []
Zip::InputStream.open(StringIO.new(zip)) do |io|
while (entry = io.get_next_entry)
entries << { name: entry.name, content: entry.get_input_stream.read }
end
end
expect(entries.length).to eq(1)
entry = entries.first
expect(entry[:name]).to eq("foo.txt")
expect(entry[:content]).to eq("bar")
end
end
end
end
| {
"pile_set_name": "Github"
} |
#define __NR_read 6000
#define __NR_write 6001
#define __NR_open 6002
#define __NR_close 6003
#define __NR_stat 6004
#define __NR_fstat 6005
#define __NR_lstat 6006
#define __NR_poll 6007
#define __NR_lseek 6008
#define __NR_mmap 6009
#define __NR_mprotect 6010
#define __NR_munmap 6011
#define __NR_brk 6012
#define __NR_rt_sigaction 6013
#define __NR_rt_sigprocmask 6014
#define __NR_ioctl 6015
#define __NR_pread64 6016
#define __NR_pwrite64 6017
#define __NR_readv 6018
#define __NR_writev 6019
#define __NR_access 6020
#define __NR_pipe 6021
#define __NR__newselect 6022
#define __NR_sched_yield 6023
#define __NR_mremap 6024
#define __NR_msync 6025
#define __NR_mincore 6026
#define __NR_madvise 6027
#define __NR_shmget 6028
#define __NR_shmat 6029
#define __NR_shmctl 6030
#define __NR_dup 6031
#define __NR_dup2 6032
#define __NR_pause 6033
#define __NR_nanosleep 6034
#define __NR_getitimer 6035
#define __NR_setitimer 6036
#define __NR_alarm 6037
#define __NR_getpid 6038
#define __NR_sendfile 6039
#define __NR_socket 6040
#define __NR_connect 6041
#define __NR_accept 6042
#define __NR_sendto 6043
#define __NR_recvfrom 6044
#define __NR_sendmsg 6045
#define __NR_recvmsg 6046
#define __NR_shutdown 6047
#define __NR_bind 6048
#define __NR_listen 6049
#define __NR_getsockname 6050
#define __NR_getpeername 6051
#define __NR_socketpair 6052
#define __NR_setsockopt 6053
#define __NR_getsockopt 6054
#define __NR_clone 6055
#define __NR_fork 6056
#define __NR_execve 6057
#define __NR_exit 6058
#define __NR_wait4 6059
#define __NR_kill 6060
#define __NR_uname 6061
#define __NR_semget 6062
#define __NR_semop 6063
#define __NR_semctl 6064
#define __NR_shmdt 6065
#define __NR_msgget 6066
#define __NR_msgsnd 6067
#define __NR_msgrcv 6068
#define __NR_msgctl 6069
#define __NR_fcntl 6070
#define __NR_flock 6071
#define __NR_fsync 6072
#define __NR_fdatasync 6073
#define __NR_truncate 6074
#define __NR_ftruncate 6075
#define __NR_getdents 6076
#define __NR_getcwd 6077
#define __NR_chdir 6078
#define __NR_fchdir 6079
#define __NR_rename 6080
#define __NR_mkdir 6081
#define __NR_rmdir 6082
#define __NR_creat 6083
#define __NR_link 6084
#define __NR_unlink 6085
#define __NR_symlink 6086
#define __NR_readlink 6087
#define __NR_chmod 6088
#define __NR_fchmod 6089
#define __NR_chown 6090
#define __NR_fchown 6091
#define __NR_lchown 6092
#define __NR_umask 6093
#define __NR_gettimeofday 6094
#define __NR_getrlimit 6095
#define __NR_getrusage 6096
#define __NR_sysinfo 6097
#define __NR_times 6098
#define __NR_ptrace 6099
#define __NR_getuid 6100
#define __NR_syslog 6101
#define __NR_getgid 6102
#define __NR_setuid 6103
#define __NR_setgid 6104
#define __NR_geteuid 6105
#define __NR_getegid 6106
#define __NR_setpgid 6107
#define __NR_getppid 6108
#define __NR_getpgrp 6109
#define __NR_setsid 6110
#define __NR_setreuid 6111
#define __NR_setregid 6112
#define __NR_getgroups 6113
#define __NR_setgroups 6114
#define __NR_setresuid 6115
#define __NR_getresuid 6116
#define __NR_setresgid 6117
#define __NR_getresgid 6118
#define __NR_getpgid 6119
#define __NR_setfsuid 6120
#define __NR_setfsgid 6121
#define __NR_getsid 6122
#define __NR_capget 6123
#define __NR_capset 6124
#define __NR_rt_sigpending 6125
#define __NR_rt_sigtimedwait 6126
#define __NR_rt_sigqueueinfo 6127
#define __NR_rt_sigsuspend 6128
#define __NR_sigaltstack 6129
#define __NR_utime 6130
#define __NR_mknod 6131
#define __NR_personality 6132
#define __NR_ustat 6133
#define __NR_statfs 6134
#define __NR_fstatfs 6135
#define __NR_sysfs 6136
#define __NR_getpriority 6137
#define __NR_setpriority 6138
#define __NR_sched_setparam 6139
#define __NR_sched_getparam 6140
#define __NR_sched_setscheduler 6141
#define __NR_sched_getscheduler 6142
#define __NR_sched_get_priority_max 6143
#define __NR_sched_get_priority_min 6144
#define __NR_sched_rr_get_interval 6145
#define __NR_mlock 6146
#define __NR_munlock 6147
#define __NR_mlockall 6148
#define __NR_munlockall 6149
#define __NR_vhangup 6150
#define __NR_pivot_root 6151
#define __NR__sysctl 6152
#define __NR_prctl 6153
#define __NR_adjtimex 6154
#define __NR_setrlimit 6155
#define __NR_chroot 6156
#define __NR_sync 6157
#define __NR_acct 6158
#define __NR_settimeofday 6159
#define __NR_mount 6160
#define __NR_umount2 6161
#define __NR_swapon 6162
#define __NR_swapoff 6163
#define __NR_reboot 6164
#define __NR_sethostname 6165
#define __NR_setdomainname 6166
#define __NR_create_module 6167
#define __NR_init_module 6168
#define __NR_delete_module 6169
#define __NR_get_kernel_syms 6170
#define __NR_query_module 6171
#define __NR_quotactl 6172
#define __NR_nfsservctl 6173
#define __NR_getpmsg 6174
#define __NR_putpmsg 6175
#define __NR_afs_syscall 6176
#define __NR_reserved177 6177
#define __NR_gettid 6178
#define __NR_readahead 6179
#define __NR_setxattr 6180
#define __NR_lsetxattr 6181
#define __NR_fsetxattr 6182
#define __NR_getxattr 6183
#define __NR_lgetxattr 6184
#define __NR_fgetxattr 6185
#define __NR_listxattr 6186
#define __NR_llistxattr 6187
#define __NR_flistxattr 6188
#define __NR_removexattr 6189
#define __NR_lremovexattr 6190
#define __NR_fremovexattr 6191
#define __NR_tkill 6192
#define __NR_reserved193 6193
#define __NR_futex 6194
#define __NR_sched_setaffinity 6195
#define __NR_sched_getaffinity 6196
#define __NR_cacheflush 6197
#define __NR_cachectl 6198
#define __NR_sysmips 6199
#define __NR_io_setup 6200
#define __NR_io_destroy 6201
#define __NR_io_getevents 6202
#define __NR_io_submit 6203
#define __NR_io_cancel 6204
#define __NR_exit_group 6205
#define __NR_lookup_dcookie 6206
#define __NR_epoll_create 6207
#define __NR_epoll_ctl 6208
#define __NR_epoll_wait 6209
#define __NR_remap_file_pages 6210
#define __NR_rt_sigreturn 6211
#define __NR_fcntl64 6212
#define __NR_set_tid_address 6213
#define __NR_restart_syscall 6214
#define __NR_semtimedop 6215
#define __NR_fadvise64 6216
#define __NR_statfs64 6217
#define __NR_fstatfs64 6218
#define __NR_sendfile64 6219
#define __NR_timer_create 6220
#define __NR_timer_settime 6221
#define __NR_timer_gettime 6222
#define __NR_timer_getoverrun 6223
#define __NR_timer_delete 6224
#define __NR_clock_settime 6225
#define __NR_clock_gettime 6226
#define __NR_clock_getres 6227
#define __NR_clock_nanosleep 6228
#define __NR_tgkill 6229
#define __NR_utimes 6230
#define __NR_mbind 6231
#define __NR_get_mempolicy 6232
#define __NR_set_mempolicy 6233
#define __NR_mq_open 6234
#define __NR_mq_unlink 6235
#define __NR_mq_timedsend 6236
#define __NR_mq_timedreceive 6237
#define __NR_mq_notify 6238
#define __NR_mq_getsetattr 6239
#define __NR_vserver 6240
#define __NR_waitid 6241
#define __NR_add_key 6243
#define __NR_request_key 6244
#define __NR_keyctl 6245
#define __NR_set_thread_area 6246
#define __NR_inotify_init 6247
#define __NR_inotify_add_watch 6248
#define __NR_inotify_rm_watch 6249
#define __NR_migrate_pages 6250
#define __NR_openat 6251
#define __NR_mkdirat 6252
#define __NR_mknodat 6253
#define __NR_fchownat 6254
#define __NR_futimesat 6255
#define __NR_newfstatat 6256
#define __NR_unlinkat 6257
#define __NR_renameat 6258
#define __NR_linkat 6259
#define __NR_symlinkat 6260
#define __NR_readlinkat 6261
#define __NR_fchmodat 6262
#define __NR_faccessat 6263
#define __NR_pselect6 6264
#define __NR_ppoll 6265
#define __NR_unshare 6266
#define __NR_splice 6267
#define __NR_sync_file_range 6268
#define __NR_tee 6269
#define __NR_vmsplice 6270
#define __NR_move_pages 6271
#define __NR_set_robust_list 6272
#define __NR_get_robust_list 6273
#define __NR_kexec_load 6274
#define __NR_getcpu 6275
#define __NR_epoll_pwait 6276
#define __NR_ioprio_set 6277
#define __NR_ioprio_get 6278
#define __NR_utimensat 6279
#define __NR_signalfd 6280
#define __NR_timerfd 6281
#define __NR_eventfd 6282
#define __NR_fallocate 6283
#define __NR_timerfd_create 6284
#define __NR_timerfd_gettime 6285
#define __NR_timerfd_settime 6286
#define __NR_signalfd4 6287
#define __NR_eventfd2 6288
#define __NR_epoll_create1 6289
#define __NR_dup3 6290
#define __NR_pipe2 6291
#define __NR_inotify_init1 6292
#define __NR_preadv 6293
#define __NR_pwritev 6294
#define __NR_rt_tgsigqueueinfo 6295
#define __NR_perf_event_open 6296
#define __NR_accept4 6297
#define __NR_recvmmsg 6298
#define __NR_getdents64 6299
#define __NR_fanotify_init 6300
#define __NR_fanotify_mark 6301
#define __NR_prlimit64 6302
#define __NR_name_to_handle_at 6303
#define __NR_open_by_handle_at 6304
#define __NR_clock_adjtime 6305
#define __NR_syncfs 6306
#define __NR_sendmmsg 6307
#define __NR_setns 6308
#define __NR_process_vm_readv 6309
#define __NR_process_vm_writev 6310
#define __NR_kcmp 6311
#define __NR_finit_module 6312
#define __NR_sched_setattr 6313
#define __NR_sched_getattr 6314
#define __NR_renameat2 6315
#define __NR_seccomp 6316
#define __NR_getrandom 6317
#define __NR_memfd_create 6318
#define __NR_bpf 6319
#define __NR_execveat 6320
#define __NR_userfaultfd 6321
#define __NR_membarrier 6322
#define __NR_mlock2 6323
#define __NR_copy_file_range 6324
#define __NR_preadv2 6325
#define __NR_pwritev2 6326
#define __NR_pkey_mprotect 6327
#define __NR_pkey_alloc 6328
#define __NR_pkey_free 6329
#define __NR_statx 6330
#define __NR_rseq 6331
#define __NR_io_pgetevents 6332
| {
"pile_set_name": "Github"
} |
//+build !go1.7
package reflect2
import "unsafe"
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return nil
}
| {
"pile_set_name": "Github"
} |
---
title: Log off or Disconnect User Sessions
description: Learn how to manually log off a user
ms.topic: article
ms.assetid: 3e9bbcdc-e33b-481e-8b46-787a4f6d58bc
author: lizap
manager: dongill
ms.author: elizapo
ms.date: 08/04/2016
---
# Log off or Disconnect User Sessions
MultiPoint Services users can log on and log off of their desktop sessions as they would with any Windows session. Users can also disconnect or suspend their session so that the MultiPoint Services station is not being used, but their session remains active in the MultiPoint Services system's computer memory.
In addition, administrative users can end a user's session if the user has stepped away from their MultiPoint Services session or has forgotten to log off of the system.
## Logging off or disconnecting a session
The following table describes the different options that you or any user can use to log off, suspend, or end a session.
|||
|-|-|
|**Action**|**Effect**|
|Click **Start**, click Settings, click the user name (top-right corner), and then click **Sign out**.|The session ends and the station is available for log on by any user.|
|Click **Start**, click **Settings**, click Power, and then click **Disconnect**.|Your session is disconnected and your session is preserved in computer memory. The station becomes available for log on by the same user or a different user.|
|Click **Start**, click Settings, click the user name (top-right corner), and then click **Lock**|The station is locked and your session is preserved in computer memory.|
## Suspending or ending a user's session
The following table describes the different options that you, as an administrative user, can use to disconnect or end a user's session.
|||
|-|-|
|**Action**|**Effect**|
|**Suspend:** In MultiPoint Manager, use the **Stations** tab to suspend the user's session. For more information, see the [Suspend and Leave User Session Active](Suspend-and-Leave-User-Session-Active.md) topic.|The user's session ends and is preserved in computer memory. The station becomes available for log on by the same user or a different user. The user can log on to the same station or another station and continue with their work.|
|**End:** In MultiPoint Manager, use the **Stations** tab to end the user's session. You can also end all user sessions on the **Stations** tab. For more information, see the [End a User Session](End-a-User-Session.md) topic.|The user's session ends and the station becomes available for log on by any user. The user's session no longer displays on the **Stations** tab, and it is not in computer memory.|
## See Also
[Suspend and Leave User Session Active](Suspend-and-Leave-User-Session-Active.md)
[End a User Session](End-a-User-Session.md)
[Manage User Desktops](manage-user-desktops-using-multipoint-dashboard.md)
[Log Off User Sessions](Log-Off-User-Sessions.md) | {
"pile_set_name": "Github"
} |
#version 330
#define MAX_BATCH_SIZE 512
layout(location = 0) in vec3 in_pos;
layout(location = 1) in vec2 in_texc;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 mats[MAX_BATCH_SIZE];
uniform int flip_x[MAX_BATCH_SIZE];
uniform int flip_y[MAX_BATCH_SIZE];
uniform vec4 coords[MAX_BATCH_SIZE];
uniform vec4 colors[MAX_BATCH_SIZE];
out vec2 pass_texcoord;
out vec4 pass_color;
void main() {
vec2 mod_coord = in_texc;
vec4 raw_coord = coords[gl_InstanceID];
if (flip_x[gl_InstanceID] == 1) {
mod_coord.x = 1.0 - mod_coord.x;
}
if (flip_y[gl_InstanceID] == 1) {
mod_coord.y = 1.0 - mod_coord.y;
}
// drop 0.5 down to -0.01 (force OpenGL to round down from edges)
vec2 fix = vec2(sign(mod_coord.x - 0.51) * -0.005, sign(mod_coord.y - 0.51) * -0.005);
//vec2 tex_size = raw_coord.zw - raw_coord.xy;
vec2 tex_size = vec2(raw_coord.w - raw_coord.y, raw_coord.z - raw_coord.x);
//tex_size += fix;
vec2 offset = raw_coord.xy;
//offset += fix;
pass_texcoord = offset + (tex_size * mod_coord);
pass_color = colors[gl_InstanceID];
gl_Position = projection * view * mats[gl_InstanceID] * vec4(in_pos, 1.0f);
}
| {
"pile_set_name": "Github"
} |
#region COPYRIGHT
//
// THIS IS GENERATED BY TEMPLATE
//
// AUTHOR : ROYE
// DATE : 2010
//
// COPYRIGHT (C) 2010, TIANXIAHOTEL TECHNOLOGIES CO., LTD. ALL RIGHTS RESERVED.
//
#endregion
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Web;
using System.Text;
using System.Text.RegularExpressions;
using System.Drawing;
using System.Drawing.Imaging;
using System.Drawing.Drawing2D;
using System.Windows.Forms;
using TX.Framework.WindowUI.Forms;
namespace TX.Framework.WindowUI.Controls
{
public class ThumbnailView : ListView
{
private delegate void SetThumbnailDelegate(string imageKey, Image image);
private BackgroundWorker _LoadWorker;
private PictureBox _PictureBox;
private IContainer components = null;
private ContextMenuStrip menuAction;
private ToolStripMenuItem menuItemDelete;
private int _Id;
private Color _BorderColor = SkinManager.CurrentSkin.BorderColor;
public event EventHandler LoadCompleted;
public event EventHandler<ItemDeletedEventArgs> ItemDeleted;
public event EventHandler<ItemAddedEventArgs> ItemAdded;
public ThumbnailView()
: base()
{
InitializeComponent();
ThumbnailSize = 95;
ThumbBorderColor = Color.Wheat;
_Id = 1;
_PictureBox = new PictureBox();
base.BorderStyle = BorderStyle.FixedSingle;
ImageList imageList = new ImageList();
imageList.ImageSize = new Size(ThumbnailSize, ThumbnailSize);
imageList.ColorDepth = ColorDepth.Depth32Bit;
imageList.TransparentColor = Color.White;
this.LargeImageList = imageList;
}
private void AddDefaultThumbnail()
{
using (Bitmap bmp = new Bitmap(LargeImageList.ImageSize.Width, LargeImageList.ImageSize.Height, PixelFormat.Format32bppRgb))
using (Graphics g = Graphics.FromImage(bmp))
using (Brush brush = new SolidBrush(Color.White))
using (Pen pen = new Pen(Color.Wheat, 1))
{
Rectangle rectangle = new Rectangle(0, 0, bmp.Width - 1, bmp.Height - 1);
g.FillRectangle(brush, rectangle);
g.DrawRectangle(pen, 0, 0, bmp.Width - 1, bmp.Height - 1);
LargeImageList.Images.Add(bmp);
}
}
public void Add(string[] fileList)
{
if ((_LoadWorker != null) && (_LoadWorker.IsBusy))
{
_LoadWorker.CancelAsync();
}
BeginUpdate();
Dictionary<string, string> imageKeys = new Dictionary<string, string>();
foreach (string fileName in fileList)
{
if (fileName.IsValid())
{
ListViewItem item = this.Items.Add((_Id++).ToString());
item.Text = string.Empty;
item.Tag = fileName;
item.ImageKey = Guid.NewGuid().ToString();
imageKeys.Add(item.ImageKey, fileName);
if (ItemAdded != null)
{
ItemAdded(this, new ItemAddedEventArgs(item));
}
}
}
EndUpdate();
if (_LoadWorker != null)
{
if (!_LoadWorker.CancellationPending)
{
_LoadWorker.RunWorkerAsync(imageKeys);
}
}
}
private void loadWorker_DoWork(object sender, DoWorkEventArgs e)
{
if (_LoadWorker.CancellationPending) return;
Dictionary<string, string> fileList = (Dictionary<string, string>)e.Argument;
foreach (KeyValuePair<string, string> file in fileList)
{
try
{
_PictureBox.Load(file.Value);
SetThumbnail(file.Key, ImageHelper.CreateThumbnail(_PictureBox.Image, ThumbnailSize, ThumbnailSize, ThumbBorderColor));
}
catch { }
}
}
private void SetThumbnail(string imageKey, Image image)
{
if (Disposing) return;
if (this.InvokeRequired)
{
this.Invoke(new SetThumbnailDelegate(SetThumbnail), imageKey, image);
}
else
{
this.LargeImageList.Images.Add(imageKey, image);
}
}
private void loadWorker_RunWorkerCompleted(object sender, RunWorkerCompletedEventArgs e)
{
if (LoadCompleted != null)
{
LoadCompleted(sender, e);
}
}
protected override void OnMouseDown(MouseEventArgs e)
{
//if (e.Button == MouseButtons.Right)
//{
// this.menuAction.Show(this, e.X, e.Y);
//}
base.OnMouseDown(e);
}
protected override void OnKeyDown(KeyEventArgs e)
{
if ((Control.ModifierKeys & Keys.Control) != 0)
{
if (e.KeyCode == Keys.A)
{
foreach (ListViewItem item in this.Items)
{
item.Selected = true;
}
}
else if (e.KeyCode == Keys.D)
{
DeleteSelectedItems();
}
}
if (e.KeyCode == Keys.Delete)
{
DeleteSelectedItems();
}
base.OnKeyDown(e);
}
protected override void WndProc(ref Message m)
{
base.WndProc(ref m);
switch (m.Msg)
{
case (int)WindowMessages.WM_NCPAINT:
this.NcPaint(ref m);
break;
default:
break;
}
}
private void menuAction_ItemClicked(object sender, ToolStripItemClickedEventArgs e)
{
if (e.ClickedItem == this.menuItemDelete)
{
DeleteSelectedItems();
}
}
private void DeleteSelectedItems()
{
if (TXMessageBoxExtensions.Question("确认删除", "确定删除所选择的图片吗?") == DialogResult.OK)
{
foreach (ListViewItem item in this.SelectedItems)
{
this.LargeImageList.Images.RemoveByKey(item.ImageKey);
this.Items.Remove(item);
if (ItemDeleted != null)
{
ItemDeleted(this, new ItemDeletedEventArgs(item));
}
}
}
}
#region InitializeComponent
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
private void InitializeComponent()
{
this.components = new System.ComponentModel.Container();
this._LoadWorker = new System.ComponentModel.BackgroundWorker();
this.menuAction = new System.Windows.Forms.ContextMenuStrip(this.components);
this.menuItemDelete = new System.Windows.Forms.ToolStripMenuItem();
this.menuAction.SuspendLayout();
this.SuspendLayout();
this.View = View.LargeIcon;
//
// _LoadWorker
//
this._LoadWorker.WorkerSupportsCancellation = true;
this._LoadWorker.DoWork += new System.ComponentModel.DoWorkEventHandler(this.loadWorker_DoWork);
this._LoadWorker.RunWorkerCompleted += new System.ComponentModel.RunWorkerCompletedEventHandler(this.loadWorker_RunWorkerCompleted);
//
// menuAction
//
this.menuAction.Items.AddRange(new System.Windows.Forms.ToolStripItem[] {
this.menuItemDelete});
this.menuAction.Name = "menuAction";
this.menuAction.Size = new System.Drawing.Size(99, 26);
this.menuAction.ItemClicked += new System.Windows.Forms.ToolStripItemClickedEventHandler(this.menuAction_ItemClicked);
//
// menuItemDelete
//
this.menuItemDelete.Name = "menuItemDelete";
this.menuItemDelete.Size = new System.Drawing.Size(98, 22);
this.menuItemDelete.Text = " 删除(&D) ";
this.menuAction.ResumeLayout(false);
this.ResumeLayout(false);
}
#endregion
[Category("TXProperties")]
public int ThumbnailSize { get; set; }
[Category("TXProperties")]
public Color ThumbBorderColor { get; set; }
[Category("TXProperties")]
[Description("边框颜色")]
public Color BorderColor
{
get { return _BorderColor; }
set
{
this._BorderColor = value;
base.Invalidate(true);
}
}
#region NcPaint
private void NcPaint(ref Message msg)
{
if (base.BorderStyle == BorderStyle.None)
{
return;
}
IntPtr hDC = Win32.GetWindowDC(msg.HWnd);
if (hDC == IntPtr.Zero)
{
throw new Win32Exception();
}
Rectangle bounds = new Rectangle(0, 0, Width - 1, Height - 1);
using (Graphics g = Graphics.FromHdc(hDC))
{
GDIHelper.DrawPathBorder(g, new RoundRectangle(bounds, 0), this._BorderColor);
}
msg.Result = IntPtr.Zero;
Win32.ReleaseDC(msg.HWnd, hDC);
}
#endregion
}
public class ItemDeletedEventArgs : EventArgs
{
public ItemDeletedEventArgs(ListViewItem item)
{
Item = item;
}
public ListViewItem Item { get; private set; }
}
public class ItemAddedEventArgs : EventArgs
{
public ItemAddedEventArgs(ListViewItem item)
{
Item = item;
}
public ListViewItem Item { get; private set; }
}
}
| {
"pile_set_name": "Github"
} |
package lime.system;
import lime._internal.backend.native.NativeCFFI;
import lime.app.Application;
import lime.app.Event;
#if flash
import flash.desktop.Clipboard as FlashClipboard;
#elseif (js && html5)
import lime._internal.backend.html5.HTML5Window;
#end
#if !lime_debug
@:fileXml('tags="haxe,release"')
@:noDebug
#end
@:access(lime._internal.backend.native.NativeCFFI)
@:access(lime.ui.Window)
class Clipboard
{
public static var onUpdate = new Event<Void->Void>();
public static var text(get, set):String;
private static var _text:String;
private static function __update():Void
{
var cacheText = _text;
_text = null;
#if (lime_cffi && !macro)
#if hl
var utf = NativeCFFI.lime_clipboard_get_text();
if (utf != null)
{
_text = @:privateAccess String.fromUTF8(utf);
}
#else
_text = NativeCFFI.lime_clipboard_get_text();
#end
#elseif flash
if (FlashClipboard.generalClipboard.hasFormat(TEXT_FORMAT))
{
_text = FlashClipboard.generalClipboard.getData(TEXT_FORMAT);
}
#end
if (_text != cacheText)
{
onUpdate.dispatch();
}
}
// Get & Set Methods
private static function get_text():String
{
// Native clipboard calls __update when clipboard changes
#if (flash || js || html5)
__update();
#end
return _text;
}
private static function set_text(value:String):String
{
var cacheText = _text;
_text = value;
#if (lime_cffi && !macro)
NativeCFFI.lime_clipboard_set_text(value);
#elseif flash
FlashClipboard.generalClipboard.setData(TEXT_FORMAT, value);
#elseif (js && html5)
var window = Application.current.window;
if (window != null)
{
window.__backend.setClipboard(value);
}
#end
if (_text != cacheText)
{
onUpdate.dispatch();
}
return value;
}
}
| {
"pile_set_name": "Github"
} |
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
// and the Windows (mixed-endian) encoding. See here for details:
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
package guid
import (
"crypto/rand"
"crypto/sha1"
"encoding"
"encoding/binary"
"fmt"
"strconv"
"golang.org/x/sys/windows"
)
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
// The variants specified by RFC 4122.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
VariantRFC4122
VariantMicrosoft
VariantFuture
)
// Version specifies how the bits in the GUID were generated. For instance, a
// version 4 GUID is randomly generated, and a version 5 is generated from the
// hash of an input string.
type Version uint8
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
if _, err := rand.Read(b[:]); err != nil {
return GUID{}, err
}
g := FromArray(b)
g.setVersion(4) // Version 4 means randomly generated.
g.setVariant(VariantRFC4122)
return g, nil
}
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
// and the sample code treats it as a series of bytes, so we do the same here.
//
// Some implementations, such as those found on Windows, treat the name as a
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
b := sha1.New()
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)
a := [16]byte{}
copy(a[:], b.Sum(nil))
g := FromArray(a)
g.setVersion(5) // Version 5 means generated from a string.
g.setVariant(VariantRFC4122)
return g, nil
}
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
var g GUID
g.Data1 = order.Uint32(b[0:4])
g.Data2 = order.Uint16(b[4:6])
g.Data3 = order.Uint16(b[6:8])
copy(g.Data4[:], b[8:16])
return g
}
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
b := [16]byte{}
order.PutUint32(b[0:4], g.Data1)
order.PutUint16(b[4:6], g.Data2)
order.PutUint16(b[6:8], g.Data3)
copy(b[8:16], g.Data4[:])
return b
}
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
func FromArray(b [16]byte) GUID {
return fromArray(b, binary.BigEndian)
}
// ToArray returns an array of 16 bytes representing the GUID in big-endian
// encoding.
func (g GUID) ToArray() [16]byte {
return g.toArray(binary.BigEndian)
}
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
func FromWindowsArray(b [16]byte) GUID {
return fromArray(b, binary.LittleEndian)
}
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
// encoding.
func (g GUID) ToWindowsArray() [16]byte {
return g.toArray(binary.LittleEndian)
}
func (g GUID) String() string {
return fmt.Sprintf(
"%08x-%04x-%04x-%04x-%012x",
g.Data1,
g.Data2,
g.Data3,
g.Data4[:2],
g.Data4[2:])
}
// FromString parses a string containing a GUID and returns the GUID. The only
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
// format.
func FromString(s string) (GUID, error) {
if len(s) != 36 {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
var g GUID
data1, err := strconv.ParseUint(s[0:8], 16, 32)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data1 = uint32(data1)
data2, err := strconv.ParseUint(s[9:13], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data2 = uint16(data2)
data3, err := strconv.ParseUint(s[14:18], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data3 = uint16(data3)
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data4[i] = uint8(v)
}
return g, nil
}
func (g *GUID) setVariant(v Variant) {
d := g.Data4[0]
switch v {
case VariantNCS:
d = (d & 0x7f)
case VariantRFC4122:
d = (d & 0x3f) | 0x80
case VariantMicrosoft:
d = (d & 0x1f) | 0xc0
case VariantFuture:
d = (d & 0x0f) | 0xe0
case VariantUnknown:
fallthrough
default:
panic(fmt.Sprintf("invalid variant: %d", v))
}
g.Data4[0] = d
}
// Variant returns the GUID variant, as defined in RFC 4122.
func (g GUID) Variant() Variant {
b := g.Data4[0]
if b&0x80 == 0 {
return VariantNCS
} else if b&0xc0 == 0x80 {
return VariantRFC4122
} else if b&0xe0 == 0xc0 {
return VariantMicrosoft
} else if b&0xe0 == 0xe0 {
return VariantFuture
}
return VariantUnknown
}
func (g *GUID) setVersion(v Version) {
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
}
// Version returns the GUID version, as defined in RFC 4122.
func (g GUID) Version() Version {
return Version((g.Data3 & 0xF000) >> 12)
}
// MarshalText returns the textual representation of the GUID.
func (g GUID) MarshalText() ([]byte, error) {
return []byte(g.String()), nil
}
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
// into this GUID.
func (g *GUID) UnmarshalText(text []byte) error {
g2, err := FromString(string(text))
if err != nil {
return err
}
*g = g2
return nil
}
| {
"pile_set_name": "Github"
} |
package api
import (
"time"
"github.com/amherag/skycoin/src/cipher"
"github.com/amherag/skycoin/src/coin"
"github.com/amherag/skycoin/src/daemon"
"github.com/amherag/skycoin/src/kvstorage"
"github.com/amherag/skycoin/src/transaction"
"github.com/amherag/skycoin/src/visor"
"github.com/amherag/skycoin/src/visor/historydb"
"github.com/amherag/skycoin/src/wallet"
)
//go:generate mockery -name Gatewayer -case underscore -inpkg -testonly
// Gateway bundles daemon.Daemon, Visor, wallet.Service and kvstorage.Manager into a single object
type Gateway struct {
*daemon.Daemon
*visor.Visor
*wallet.Service
*kvstorage.Manager
}
// NewGateway creates a Gateway
func NewGateway(d *daemon.Daemon, v *visor.Visor, w *wallet.Service, m *kvstorage.Manager) *Gateway {
return &Gateway{
Daemon: d,
Visor: v,
Service: w,
Manager: m,
}
}
// Gatewayer interface for Gateway methods
type Gatewayer interface {
Daemoner
Visorer
Walleter
Storer
}
// Daemoner interface for daemon.Daemon methods used by the API
type Daemoner interface {
DaemonConfig() daemon.DaemonConfig
GetConnection(addr string) (*daemon.Connection, error)
GetConnections(f func(c daemon.Connection) bool) ([]daemon.Connection, error)
DisconnectByGnetID(gnetID uint64) error
GetDefaultConnections() []string
GetTrustConnections() []string
GetExchgConnection() []string
GetBlockchainProgress(headSeq uint64) *daemon.BlockchainProgress
InjectBroadcastTransaction(txn coin.Transaction) error
}
// Visorer interface for visor.Visor methods used by the API
type Visorer interface {
VisorConfig() visor.Config
StartedAt() time.Time
HeadBkSeq() (uint64, bool, error)
GetBlockchainMetadata() (*visor.BlockchainMetadata, error)
ResendUnconfirmedTxns() ([]cipher.SHA256, error)
GetSignedBlockByHash(hash cipher.SHA256) (*coin.SignedBlock, error)
GetSignedBlockByHashVerbose(hash cipher.SHA256) (*coin.SignedBlock, [][]visor.TransactionInput, error)
GetSignedBlockBySeq(seq uint64) (*coin.SignedBlock, error)
GetSignedBlockBySeqVerbose(seq uint64) (*coin.SignedBlock, [][]visor.TransactionInput, error)
GetBlocks(seqs []uint64) ([]coin.SignedBlock, error)
GetBlocksVerbose(seqs []uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error)
GetBlocksInRange(start, end uint64) ([]coin.SignedBlock, error)
GetBlocksInRangeVerbose(start, end uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error)
GetLastBlocks(num uint64) ([]coin.SignedBlock, error)
GetLastBlocksVerbose(num uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error)
GetUnspentOutputsSummary(filters []visor.OutputsFilter) (*visor.UnspentOutputsSummary, error)
GetBalanceOfAddrs(addrs []cipher.Address) ([]wallet.BalancePair, error)
VerifyTxnVerbose(txn *coin.Transaction, signed visor.TxnSignedFlag) ([]visor.TransactionInput, bool, error)
AddressCount() (uint64, error)
GetUxOutByID(id cipher.SHA256) (*historydb.UxOut, error)
GetSpentOutputsForAddresses(addr []cipher.Address) ([][]historydb.UxOut, error)
GetVerboseTransactionsForAddress(a cipher.Address) ([]visor.Transaction, [][]visor.TransactionInput, error)
GetRichlist(includeDistribution bool) (visor.Richlist, error)
GetAllUnconfirmedTransactions() ([]visor.UnconfirmedTransaction, error)
GetAllUnconfirmedTransactionsVerbose() ([]visor.UnconfirmedTransaction, [][]visor.TransactionInput, error)
GetTransaction(txid cipher.SHA256) (*visor.Transaction, error)
GetTransactionWithInputs(txid cipher.SHA256) (*visor.Transaction, []visor.TransactionInput, error)
GetProgramState(flts []visor.TxFilter) ([]byte, error)
GetTransactions(flts []visor.TxFilter) ([]visor.Transaction, error)
GetTransactionsWithInputs(flts []visor.TxFilter) ([]visor.Transaction, [][]visor.TransactionInput, error)
GetWalletUnconfirmedTransactions(wltID string) ([]visor.UnconfirmedTransaction, error)
GetWalletUnconfirmedTransactionsVerbose(wltID string) ([]visor.UnconfirmedTransaction, [][]visor.TransactionInput, error)
GetWalletBalance(wltID string) (wallet.BalancePair, wallet.AddressBalances, error)
CreateTransaction(p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error)
WalletCreateTransaction(wltID string, p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error)
WalletCreateTransactionSigned(wltID string, password []byte, p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error)
WalletSignTransaction(wltID string, password []byte, txn *coin.Transaction, signIndexes []int) (*coin.Transaction, []visor.TransactionInput, error)
}
// Walleter interface for wallet.Service methods used by the API
type Walleter interface {
UnloadWallet(wltID string) error
EncryptWallet(wltID string, password []byte) (*wallet.Wallet, error)
DecryptWallet(wltID string, password []byte) (*wallet.Wallet, error)
GetWalletSeed(wltID string, password []byte) (string, error)
CreateWallet(wltName string, options wallet.Options, bg wallet.BalanceGetter) (*wallet.Wallet, error)
RecoverWallet(wltID, seed string, password []byte) (*wallet.Wallet, error)
NewAddresses(wltID string, password []byte, n uint64) ([]cipher.Address, error)
GetWallet(wltID string) (*wallet.Wallet, error)
GetWallets() (wallet.Wallets, error)
UpdateWalletLabel(wltID, label string) error
WalletDir() (string, error)
}
// Storer interface for kvstorage.Manager methods used by the API
type Storer interface {
GetStorageValue(storageType kvstorage.Type, key string) (string, error)
GetAllStorageValues(storageType kvstorage.Type) (map[string]string, error)
AddStorageValue(storageType kvstorage.Type, key, val string) error
RemoveStorageValue(storageType kvstorage.Type, key string) error
}
| {
"pile_set_name": "Github"
} |
/*
* Phosphorus Five, copyright 2014 - 2017, Thomas Hansen, [email protected]
*
* This file is part of Phosphorus Five.
*
* Phosphorus Five is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3, as published by
* the Free Software Foundation.
*
*
* Phosphorus Five is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Phosphorus Five. If not, see <http://www.gnu.org/licenses/>.
*
* If you cannot for some reasons use the GPL license, Phosphorus
* Five is also commercially available under Quid Pro Quo terms. Check
* out our website at http://gaiasoul.com for more details.
*/
using System.Web;
using p5.exp;
using p5.core;
namespace p5.web.ui.misc
{
/// <summary>
/// Helper to minify JavaScript and CSS.
/// </summary>
public static class Minify
{
/// <summary>
/// Minifies JavaScript.
/// </summary>
/// <param name="context">Application Context</param>
/// <param name="e">Parameters passed into Active Event</param>
[ActiveEvent (Name = "p5.web.javascript.minify")]
public static void p5_web_js_minify (ApplicationContext context, ActiveEventArgs e)
{
// House cleaning.
using (new ArgsRemover (e.Args, true)) {
// Iterating through each CSS content supplied, and minifying it, returning the results to caller.
foreach (var idxCss in XUtil.Iterate<string> (context, e.Args)) {
// Minifying CSS content.
var minifier = new Microsoft.Ajax.Utilities.Minifier ();
// Returning minified content to caller.
e.Args.Add ("result", minifier.MinifyJavaScript (idxCss));
}
}
}
/// <summary>
/// Minifies CSS.
/// </summary>
/// <param name="context">Application Context</param>
/// <param name="e">Parameters passed into Active Event</param>
[ActiveEvent (Name = "p5.web.css.minify")]
public static void p5_web_css_minify (ApplicationContext context, ActiveEventArgs e)
{
// House cleaning.
using (new ArgsRemover (e.Args, true)) {
// Iterating through each CSS content supplied, and minifying it, returning the results to caller.
foreach (var idxCss in XUtil.Iterate<string> (context, e.Args)) {
// Minifying CSS content.
var minifier = new Microsoft.Ajax.Utilities.Minifier ();
var options = new Microsoft.Ajax.Utilities.CssSettings ();
options.CommentMode = Microsoft.Ajax.Utilities.CssComment.None;
// Returning minified content to caller.
e.Args.Add ("result", minifier.MinifyStyleSheet (idxCss, options));
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
'use strict';
const supportedVideoExtensions = ['mp4', 'mov', 'm4v'];
const formatExtensions = new Map([
['av1', 'mp4']
]);
const getFormatExtension = format => formatExtensions.get(format) || format;
module.exports = {
supportedVideoExtensions,
getFormatExtension,
defaultInputDeviceId: 'SYSTEM_DEFAULT'
};
| {
"pile_set_name": "Github"
} |
/**
* Appends the elements of `values` to `array`.
*
* @private
* @param {Array} array The array to modify.
* @param {Array} values The values to append.
* @returns {Array} Returns `array`.
*/
function arrayPush(array, values) {
var index = -1,
length = values.length,
offset = array.length;
while (++index < length) {
array[offset + index] = values[index];
}
return array;
}
module.exports = arrayPush;
| {
"pile_set_name": "Github"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"errors"
"runtime"
"syscall"
"time"
"unsafe"
)
func (p *Process) wait() (ps *ProcessState, err error) {
s, e := syscall.WaitForSingleObject(syscall.Handle(p.handle), syscall.INFINITE)
switch s {
case syscall.WAIT_OBJECT_0:
break
case syscall.WAIT_FAILED:
return nil, NewSyscallError("WaitForSingleObject", e)
default:
return nil, errors.New("os: unexpected result from WaitForSingleObject")
}
var ec uint32
e = syscall.GetExitCodeProcess(syscall.Handle(p.handle), &ec)
if e != nil {
return nil, NewSyscallError("GetExitCodeProcess", e)
}
var u syscall.Rusage
e = syscall.GetProcessTimes(syscall.Handle(p.handle), &u.CreationTime, &u.ExitTime, &u.KernelTime, &u.UserTime)
if e != nil {
return nil, NewSyscallError("GetProcessTimes", e)
}
p.setDone()
// NOTE(brainman): It seems that sometimes process is not dead
// when WaitForSingleObject returns. But we do not know any
// other way to wait for it. Sleeping for a while seems to do
// the trick sometimes. So we will sleep and smell the roses.
defer time.Sleep(5 * time.Millisecond)
defer p.Release()
return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil
}
func terminateProcess(pid, exitcode int) error {
h, e := syscall.OpenProcess(syscall.PROCESS_TERMINATE, false, uint32(pid))
if e != nil {
return NewSyscallError("OpenProcess", e)
}
defer syscall.CloseHandle(h)
e = syscall.TerminateProcess(h, uint32(exitcode))
return NewSyscallError("TerminateProcess", e)
}
func (p *Process) signal(sig Signal) error {
if p.handle == uintptr(syscall.InvalidHandle) {
return syscall.EINVAL
}
if p.done() {
return errors.New("os: process already finished")
}
if sig == Kill {
return terminateProcess(p.Pid, 1)
}
// TODO(rsc): Handle Interrupt too?
return syscall.Errno(syscall.EWINDOWS)
}
func (p *Process) release() error {
if p.handle == uintptr(syscall.InvalidHandle) {
return syscall.EINVAL
}
e := syscall.CloseHandle(syscall.Handle(p.handle))
if e != nil {
return NewSyscallError("CloseHandle", e)
}
p.handle = uintptr(syscall.InvalidHandle)
// no need for a finalizer anymore
runtime.SetFinalizer(p, nil)
return nil
}
func findProcess(pid int) (p *Process, err error) {
const da = syscall.STANDARD_RIGHTS_READ |
syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE
h, e := syscall.OpenProcess(da, false, uint32(pid))
if e != nil {
return nil, NewSyscallError("OpenProcess", e)
}
return newProcess(pid, uintptr(h)), nil
}
func init() {
var argc int32
cmd := syscall.GetCommandLine()
argv, e := syscall.CommandLineToArgv(cmd, &argc)
if e != nil {
return
}
defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
Args = make([]string, argc)
for i, v := range (*argv)[:argc] {
Args[i] = string(syscall.UTF16ToString((*v)[:]))
}
}
func ftToDuration(ft *syscall.Filetime) time.Duration {
n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals
return time.Duration(n*100) * time.Nanosecond
}
func (p *ProcessState) userTime() time.Duration {
return ftToDuration(&p.rusage.UserTime)
}
func (p *ProcessState) systemTime() time.Duration {
return ftToDuration(&p.rusage.KernelTime)
}
| {
"pile_set_name": "Github"
} |
import System.Environment
import Control.Concurrent
import Control.Monad
sleepSort :: [Int] -> IO ()
sleepSort values = do
chan <- newChan
forM_ values (\time -> forkIO (threadDelay (50000 * time) >> writeChan chan time))
forM_ values (\_ -> readChan chan >>= print)
main :: IO ()
main = getArgs >>= sleepSort . map read
| {
"pile_set_name": "Github"
} |
Investors, fed up with a lack of transparency and the poor performance of the Czech equity market, are turning to the country's less opaque debt market, buoyed by favourable interest rates and economic data.
The Prague bourse has been caught in a downward spiral -- the official PX50 index has lost almost 18 percent in the past two months -- that analysts say won't end before the New Year.
But while bears maul the equities market, they say bulls are quietly finding greener pastures in Czech debt.
"The bond market is pretty bullish and the outlook is good. Rates have been tending down and people are positive especially in the longer maturities which are looking more attractive," said Michael Gartner, a debt analyst at ING Capital Markets.
Estimated daily bond turnover has grown to 900 million to one billion crowns, equal to turnover on the Prague stock exchange, from 500 million a year ago.
"The bond market is becoming even more important for foreign investors because they can get reasonable prices," said Alojz Lacko, a bond trader at Patria Finance.
Reasonable prices are something investors, foreign and domestic, have long complained they cannot find on the equities market where insufficient legislation has pushed an estimated 70 percent of trading into murky off-market dealing, and trampled minority shareholder rights.
Many strategic and speculative investors have taken advantage of the opaque system to buy up large stakes in firms without prices or counter-parties being made public until long after the deal has been made.
Bourse and Finance Ministry officials have stepped up their campaign in recent months to assure investors tighter, more stringent rules for trading will be introduced. But analysts say the moves appear to be too little, too late for investors who are turning away from Czech equities.
"Investors are really put off by the (trading) practices here and frankly, it will be hard to lure some back. But we don't have these problems in the debt market, and a lot are turning their attentions there," said one local trader.
ING's Gartner said investor motivation for entering the debt market is in many ways different from equity investments. Investor fears of having the carpet pulled out from under them by an off-market takeover deal are non-existent.
"The bond market is completely transparent because prices are transparent. It's a good market: it works, it's liquid, foreign investors, domestic investors, pension funds, investment funds all participate on an even field," he said.
A recent spate of positive economic indicators has further buoyed the market.
Inflation, one of the lowest in the region, eased further in October with the year-on-year rate standing at 8.7 percent.
The central bank's war on a burgeoning money supply has also borne fruit recently, with the expansion of the key M2 year-on-year rate falling to 13.2 percent at the end of August, just below its 14-17 percent target.
Although investor sentiment has traditionally been bearish on East European bonds because of currency risks, high taxation and political and economic instability, analysts say these factors are less significant in the Czech Republic.
While rates may be higher in neghbouring Poland and Hungary, the markets are very illiquid, especially in local currency corporate issues.
But with the strength and stability of the crown the Czech market is attractive because an investor can add a play on the local currency, and have little fear of currency risk since it is tied closely to the mark.
Traders say the government has also helped by easing currency exchange laws to help foreign investors repatriate their holdings. Foreigners may repatriate interest income on bonds as well as income from selling or redeeming them.
-- Prague newsroom (42 2) 24 23 0003
| {
"pile_set_name": "Github"
} |
#coding=utf-8
'''
Created on 2016-7-6
@author: Administrator
'''
from teamvision.project.models import Tag
class VM_CIServer(object):
'''
classdocs
'''
def __init__(self,dm_server,is_create,selected_server_id):
'''
Constructor
'''
self.ci_server=dm_server
self.selected_server_id=selected_server_id
self.is_create=is_create
def is_selected(self):
result=""
if self.ci_server.id==int(self.selected_server_id):
result="selected"
return result
def every_one(self):
result=""
if self.ci_server.Scope==None or self.ci_server.Scope==1:
result="checked"
return result
def only_me(self):
result=""
if self.ci_server.Scope==2:
result="checked"
return result
def scope_name(self):
result=""
if self.ci_server.Scope==1:
result="所有人可以使用"
if self.ci_server.Scope==2:
result="仅自己可以使用"
return result
| {
"pile_set_name": "Github"
} |
"support_item_gold_ranked"
{
"challengetype" "148"
"desc" "#DOTA_ChallengeDesc_Support_Item_Gold"
"status_text" "#DOTA_ChallengeStatusText_Support_Item_Gold"
"events"
{
"matching_type" "game_state"
"query"
{
"state_values"
{
"1"
{
"key" "support_gold_spent"
"aggregator" "count"
}
}
}
"progress_stored_in" "1"
"post_tests"
{
"test_value"
{
"storage" "1"
"compare" ">="
"amount" "<support_gold_spent>"
}
}
}
"variables"
{
"<support_gold_spent>"
{
"format" "int"
"index" "0"
}
}
}
| {
"pile_set_name": "Github"
} |
<?php
namespace Oro\Bundle\DataAuditBundle\Model;
use Doctrine\Common\Collections\Collection;
use Oro\Bundle\DataAuditBundle\Entity\AbstractAuditField;
/**
* Transform AuditFields to a scalar data
*/
class FieldsTransformer
{
/**
* @param AbstractAuditField[] $fields
* @return array
* @SuppressWarnings(PHPMD.CyclomaticComplexity)
*/
public function getData(array $fields): array
{
$data = [];
foreach ($fields as $field) {
if (!$field instanceof AbstractAuditField) {
throw new \InvalidArgumentException(
sprintf(
'Expected argument of type "%s", "%s" given',
AbstractAuditField::class,
is_object($field) ? get_class($field) : gettype($field)
)
);
}
$newValue = $field->getNewValue();
$oldValue = $field->getOldValue();
$simpleTypes = [
'date' => true,
'date_immutable' => true,
'datetime' => true,
'datetimetz' => true,
'time' => true,
'array' => true,
'jsonarray' => true,
];
if (array_key_exists($field->getDataType(), $simpleTypes)) {
$newValue = [
'value' => $newValue,
'type' => $field->getDataType(),
];
$oldValue = [
'value' => $oldValue,
'type' => $field->getDataType(),
];
}
$data[$field->getField()] = [
'old' => $oldValue,
'new' => $newValue,
];
if (method_exists($field, 'getCollectionDiffs')) {
$collectionDiffs = $field->getCollectionDiffs();
if ($collectionDiffs['added'] || $collectionDiffs['changed'] || $collectionDiffs['removed']) {
$data[$field->getField()]['collectionDiffs'] = $field->getCollectionDiffs();
}
}
if ($field->getTranslationDomain()) {
$data[$field->getField()]['translationDomain'] = $field->getTranslationDomain();
}
}
return $data;
}
/**
* @param Collection $collection
* @return array
*/
public function getCollectionData(Collection $collection): array
{
return $this->getData($collection->toArray());
}
}
| {
"pile_set_name": "Github"
} |
<vector android:height="24dp" android:tint="#FFFFFF"
android:viewportHeight="24.0" android:viewportWidth="24.0"
android:width="24dp" xmlns:android="http://schemas.android.com/apk/res/android">
<path android:fillColor="#FF000000" android:pathData="M19,6.41L17.59,5 12,10.59 6.41,5 5,6.41 10.59,12 5,17.59 6.41,19 12,13.41 17.59,19 19,17.59 13.41,12z"/>
</vector>
| {
"pile_set_name": "Github"
} |
"""Convert the row resulting from a query to the Entities object."""
from .status import (DeliveryOrder, DispatchedOrder, OrderDelivered,
OrderInTransit)
def build_dispatched(row):
return DispatchedOrder(row.dispatched_at)
def build_in_transit(row):
return OrderInTransit(row.location)
def build_delivered(row):
return OrderDelivered(row.delivered_at)
_BUILD_MAPPING = {
"d": build_dispatched,
"t": build_in_transit,
"f": build_delivered,
}
class WrappedRow:
def __init__(self, row):
self._row = row
def __getattr__(self, attrname):
return self._row[attrname]
class OrderNotFoundError(Exception):
"""The requested order does not appear listed."""
def build_from_row(delivery_id, row):
if row is None:
raise OrderNotFoundError(f"{delivery_id} was not found")
row = WrappedRow(row)
status_builder = _BUILD_MAPPING[row.status]
status = status_builder(row)
return DeliveryOrder(delivery_id, status)
| {
"pile_set_name": "Github"
} |
/* $NetBSD: strlen.S,v 1.2 2014/03/22 19:38:46 jakllsch Exp $ */
/*-
* Copyright (c) 2011 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by David Laight.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: strlen.S,v 1.2 2014/03/22 19:38:46 jakllsch Exp $")
ENTRY(strlen)
movl 8(%esp), %ecx
xorl %eax, %eax
dec %eax
1:
incl %eax
cmpb $0, 0(%ecx,%eax,1)
jnz 1b
ret
END(strlen)
| {
"pile_set_name": "Github"
} |
/*
*
* * Copyright 2019-2020 the original author or authors.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * https://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package test.org.springdoc.api.app86.test;
import java.util.Locale;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class HelloController2 {
@GetMapping("/test2")
public void test(HttpSession header, HttpServletRequest request, HttpServletResponse response, Locale locale,
String hello) {
}
} | {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
import pkg_resources
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
dist = pkg_resources.get_distribution('pluggy')
project = dist.project_name
copyright = u'2016, Holger Krekel'
author = 'Holger Krekel'
release = dist.version
# The short X.Y version.
version = u'.'.join(dist.version.split('.')[:2])
language = None
pygments_style = 'sphinx'
html_logo = '_static/img/plug.png'
html_theme = 'alabaster'
html_theme_options = {
# 'logo': 'img/plug.png',
# 'logo_name': 'true',
'description': 'The `pytest` plugin system',
'github_user': 'pytest-dev',
'github_repo': 'pluggy',
'github_button': 'true',
'github_banner': 'true',
'page_width': '1080px',
'fixed_sidebar': 'false',
}
html_static_path = ['_static']
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pluggy', u'pluggy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pluggy', u'pluggy Documentation',
author, 'pluggy', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"pile_set_name": "Github"
} |
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Code generated by go generate; DO NOT EDIT.
// This file was generated at 2020-09-15T18:15:46-07:00
package awsutils
// InstanceNetworkingLimits contains a mapping from instance type to networking limits for the type. Documentation found at
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI
var InstanceNetworkingLimits = map[string]InstanceTypeLimits{
"a1.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"a1.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"a1.large": {ENILimit: 3, IPv4Limit: 10},
"a1.medium": {ENILimit: 2, IPv4Limit: 4},
"a1.metal": {ENILimit: 8, IPv4Limit: 30},
"a1.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c1.medium": {ENILimit: 2, IPv4Limit: 6},
"c1.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c3.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c3.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c3.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c3.large": {ENILimit: 3, IPv4Limit: 10},
"c3.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c4.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c4.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c4.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c4.large": {ENILimit: 3, IPv4Limit: 10},
"c4.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5.18xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5.9xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5.large": {ENILimit: 3, IPv4Limit: 10},
"c5.metal": {ENILimit: 15, IPv4Limit: 50},
"c5.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5a.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5a.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5a.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5a.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5a.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5a.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5a.large": {ENILimit: 3, IPv4Limit: 10},
"c5a.metal": {ENILimit: 15, IPv4Limit: 50},
"c5a.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5ad.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5ad.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5ad.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5ad.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5ad.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5ad.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5ad.large": {ENILimit: 3, IPv4Limit: 10},
"c5ad.metal": {ENILimit: 15, IPv4Limit: 50},
"c5ad.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5d.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5d.18xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5d.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5d.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5d.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5d.9xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5d.large": {ENILimit: 3, IPv4Limit: 10},
"c5d.metal": {ENILimit: 15, IPv4Limit: 50},
"c5d.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5n.18xlarge": {ENILimit: 15, IPv4Limit: 50},
"c5n.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c5n.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5n.9xlarge": {ENILimit: 8, IPv4Limit: 30},
"c5n.large": {ENILimit: 3, IPv4Limit: 10},
"c5n.metal": {ENILimit: 15, IPv4Limit: 50},
"c5n.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c6g.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6g.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"c6g.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c6g.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6g.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6g.large": {ENILimit: 3, IPv4Limit: 10},
"c6g.medium": {ENILimit: 2, IPv4Limit: 4},
"c6g.metal": {ENILimit: 15, IPv4Limit: 50},
"c6g.xlarge": {ENILimit: 4, IPv4Limit: 15},
"c6gd.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6gd.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"c6gd.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"c6gd.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6gd.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"c6gd.large": {ENILimit: 3, IPv4Limit: 10},
"c6gd.medium": {ENILimit: 2, IPv4Limit: 4},
"c6gd.metal": {ENILimit: 15, IPv4Limit: 50},
"c6gd.xlarge": {ENILimit: 4, IPv4Limit: 15},
"cc2.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"cr1.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"d2.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"d2.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"d2.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"d2.xlarge": {ENILimit: 4, IPv4Limit: 15},
"f1.16xlarge": {ENILimit: 8, IPv4Limit: 50},
"f1.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"f1.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"g2.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"g2.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"g3.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"g3.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"g3.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"g3s.xlarge": {ENILimit: 4, IPv4Limit: 15},
"g4dn.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"g4dn.16xlarge": {ENILimit: 4, IPv4Limit: 15},
"g4dn.2xlarge": {ENILimit: 3, IPv4Limit: 10},
"g4dn.4xlarge": {ENILimit: 3, IPv4Limit: 10},
"g4dn.8xlarge": {ENILimit: 4, IPv4Limit: 15},
"g4dn.metal": {ENILimit: 15, IPv4Limit: 50},
"g4dn.xlarge": {ENILimit: 3, IPv4Limit: 10},
"h1.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"h1.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"h1.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"h1.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"hs1.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"i2.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"i2.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"i2.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"i2.xlarge": {ENILimit: 4, IPv4Limit: 15},
"i3.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"i3.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"i3.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"i3.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"i3.large": {ENILimit: 3, IPv4Limit: 10},
"i3.metal": {ENILimit: 15, IPv4Limit: 50},
"i3.xlarge": {ENILimit: 4, IPv4Limit: 15},
"i3en.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"i3en.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"i3en.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"i3en.3xlarge": {ENILimit: 4, IPv4Limit: 15},
"i3en.6xlarge": {ENILimit: 8, IPv4Limit: 30},
"i3en.large": {ENILimit: 3, IPv4Limit: 10},
"i3en.metal": {ENILimit: 15, IPv4Limit: 50},
"i3en.xlarge": {ENILimit: 4, IPv4Limit: 15},
"inf1.24xlarge": {ENILimit: 11, IPv4Limit: 30},
"inf1.2xlarge": {ENILimit: 4, IPv4Limit: 10},
"inf1.6xlarge": {ENILimit: 8, IPv4Limit: 30},
"inf1.xlarge": {ENILimit: 4, IPv4Limit: 10},
"m1.large": {ENILimit: 3, IPv4Limit: 10},
"m1.medium": {ENILimit: 2, IPv4Limit: 6},
"m1.small": {ENILimit: 2, IPv4Limit: 4},
"m1.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m2.2xlarge": {ENILimit: 4, IPv4Limit: 30},
"m2.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m2.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m3.2xlarge": {ENILimit: 4, IPv4Limit: 30},
"m3.large": {ENILimit: 3, IPv4Limit: 10},
"m3.medium": {ENILimit: 2, IPv4Limit: 6},
"m3.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m4.10xlarge": {ENILimit: 8, IPv4Limit: 30},
"m4.16xlarge": {ENILimit: 8, IPv4Limit: 30},
"m4.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m4.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m4.large": {ENILimit: 2, IPv4Limit: 10},
"m4.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5.large": {ENILimit: 3, IPv4Limit: 10},
"m5.metal": {ENILimit: 15, IPv4Limit: 50},
"m5.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5a.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5a.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5a.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5a.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5a.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5a.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5a.large": {ENILimit: 3, IPv4Limit: 10},
"m5a.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5ad.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5ad.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5ad.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5ad.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5ad.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5ad.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5ad.large": {ENILimit: 3, IPv4Limit: 10},
"m5ad.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5d.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5d.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5d.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5d.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5d.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5d.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5d.large": {ENILimit: 3, IPv4Limit: 10},
"m5d.metal": {ENILimit: 15, IPv4Limit: 50},
"m5d.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5dn.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5dn.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5dn.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5dn.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5dn.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5dn.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5dn.large": {ENILimit: 3, IPv4Limit: 10},
"m5dn.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5n.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5n.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5n.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"m5n.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m5n.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5n.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m5n.large": {ENILimit: 3, IPv4Limit: 10},
"m5n.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m6g.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6g.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m6g.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m6g.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6g.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6g.large": {ENILimit: 3, IPv4Limit: 10},
"m6g.medium": {ENILimit: 2, IPv4Limit: 4},
"m6g.metal": {ENILimit: 15, IPv4Limit: 50},
"m6g.xlarge": {ENILimit: 4, IPv4Limit: 15},
"m6gd.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6gd.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"m6gd.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"m6gd.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6gd.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"m6gd.large": {ENILimit: 3, IPv4Limit: 10},
"m6gd.medium": {ENILimit: 2, IPv4Limit: 4},
"m6gd.metal": {ENILimit: 15, IPv4Limit: 50},
"m6gd.xlarge": {ENILimit: 4, IPv4Limit: 15},
"p2.16xlarge": {ENILimit: 8, IPv4Limit: 30},
"p2.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"p2.xlarge": {ENILimit: 4, IPv4Limit: 15},
"p3.16xlarge": {ENILimit: 8, IPv4Limit: 30},
"p3.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"p3.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"p3dn.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r3.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r3.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r3.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r3.large": {ENILimit: 3, IPv4Limit: 10},
"r3.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r4.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r4.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r4.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r4.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r4.large": {ENILimit: 3, IPv4Limit: 10},
"r4.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5.large": {ENILimit: 3, IPv4Limit: 10},
"r5.metal": {ENILimit: 15, IPv4Limit: 50},
"r5.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5a.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5a.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5a.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5a.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5a.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5a.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5a.large": {ENILimit: 3, IPv4Limit: 10},
"r5a.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5ad.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5ad.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5ad.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5ad.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5ad.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5ad.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5ad.large": {ENILimit: 3, IPv4Limit: 10},
"r5ad.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5d.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5d.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5d.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5d.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5d.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5d.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5d.large": {ENILimit: 3, IPv4Limit: 10},
"r5d.metal": {ENILimit: 15, IPv4Limit: 50},
"r5d.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5dn.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5dn.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5dn.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5dn.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5dn.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5dn.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5dn.large": {ENILimit: 3, IPv4Limit: 10},
"r5dn.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5n.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5n.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5n.24xlarge": {ENILimit: 15, IPv4Limit: 50},
"r5n.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r5n.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5n.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r5n.large": {ENILimit: 3, IPv4Limit: 10},
"r5n.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r6g.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6g.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r6g.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r6g.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6g.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6g.large": {ENILimit: 3, IPv4Limit: 10},
"r6g.medium": {ENILimit: 2, IPv4Limit: 4},
"r6g.metal": {ENILimit: 15, IPv4Limit: 50},
"r6g.xlarge": {ENILimit: 4, IPv4Limit: 15},
"r6gd.12xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6gd.16xlarge": {ENILimit: 15, IPv4Limit: 50},
"r6gd.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"r6gd.4xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6gd.8xlarge": {ENILimit: 8, IPv4Limit: 30},
"r6gd.large": {ENILimit: 3, IPv4Limit: 10},
"r6gd.medium": {ENILimit: 2, IPv4Limit: 4},
"r6gd.metal": {ENILimit: 15, IPv4Limit: 50},
"r6gd.xlarge": {ENILimit: 4, IPv4Limit: 15},
"t1.micro": {ENILimit: 2, IPv4Limit: 2},
"t2.2xlarge": {ENILimit: 3, IPv4Limit: 15},
"t2.large": {ENILimit: 3, IPv4Limit: 12},
"t2.medium": {ENILimit: 3, IPv4Limit: 6},
"t2.micro": {ENILimit: 2, IPv4Limit: 2},
"t2.nano": {ENILimit: 2, IPv4Limit: 2},
"t2.small": {ENILimit: 3, IPv4Limit: 4},
"t2.xlarge": {ENILimit: 3, IPv4Limit: 15},
"t3.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"t3.large": {ENILimit: 3, IPv4Limit: 12},
"t3.medium": {ENILimit: 3, IPv4Limit: 6},
"t3.micro": {ENILimit: 2, IPv4Limit: 2},
"t3.nano": {ENILimit: 2, IPv4Limit: 2},
"t3.small": {ENILimit: 3, IPv4Limit: 4},
"t3.xlarge": {ENILimit: 4, IPv4Limit: 15},
"t3a.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"t3a.large": {ENILimit: 3, IPv4Limit: 12},
"t3a.medium": {ENILimit: 3, IPv4Limit: 6},
"t3a.micro": {ENILimit: 2, IPv4Limit: 2},
"t3a.nano": {ENILimit: 2, IPv4Limit: 2},
"t3a.small": {ENILimit: 2, IPv4Limit: 4},
"t3a.xlarge": {ENILimit: 4, IPv4Limit: 15},
"t4g.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"t4g.large": {ENILimit: 3, IPv4Limit: 12},
"t4g.medium": {ENILimit: 3, IPv4Limit: 6},
"t4g.micro": {ENILimit: 2, IPv4Limit: 2},
"t4g.nano": {ENILimit: 2, IPv4Limit: 2},
"t4g.small": {ENILimit: 3, IPv4Limit: 4},
"t4g.xlarge": {ENILimit: 4, IPv4Limit: 15},
"u-12tb1.metal": {ENILimit: 5, IPv4Limit: 30},
"u-18tb1.metal": {ENILimit: 15, IPv4Limit: 50},
"u-24tb1.metal": {ENILimit: 15, IPv4Limit: 50},
"u-6tb1.metal": {ENILimit: 5, IPv4Limit: 30},
"u-9tb1.metal": {ENILimit: 5, IPv4Limit: 30},
"x1.16xlarge": {ENILimit: 8, IPv4Limit: 30},
"x1.32xlarge": {ENILimit: 8, IPv4Limit: 30},
"x1e.16xlarge": {ENILimit: 8, IPv4Limit: 30},
"x1e.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"x1e.32xlarge": {ENILimit: 8, IPv4Limit: 30},
"x1e.4xlarge": {ENILimit: 4, IPv4Limit: 15},
"x1e.8xlarge": {ENILimit: 4, IPv4Limit: 15},
"x1e.xlarge": {ENILimit: 3, IPv4Limit: 10},
"z1d.12xlarge": {ENILimit: 15, IPv4Limit: 50},
"z1d.2xlarge": {ENILimit: 4, IPv4Limit: 15},
"z1d.3xlarge": {ENILimit: 8, IPv4Limit: 30},
"z1d.6xlarge": {ENILimit: 8, IPv4Limit: 30},
"z1d.large": {ENILimit: 3, IPv4Limit: 10},
"z1d.metal": {ENILimit: 15, IPv4Limit: 50},
"z1d.xlarge": {ENILimit: 4, IPv4Limit: 15},
}
| {
"pile_set_name": "Github"
} |
Example Programs
================
Most of these examples use the `argparse` module to handle command line
arguments.
To show a help text explaining all available arguments,
use the ``--help`` argument.
For example::
python3 play_file.py --help
| {
"pile_set_name": "Github"
} |
{
"type": "Module",
"span": {
"start": 0,
"end": 37,
"ctxt": 0
},
"body": [
{
"type": "VariableDeclaration",
"span": {
"start": 0,
"end": 37,
"ctxt": 0
},
"kind": "const",
"declare": false,
"declarations": [
{
"type": "VariableDeclarator",
"span": {
"start": 6,
"end": 37,
"ctxt": 0
},
"id": {
"type": "Identifier",
"span": {
"start": 6,
"end": 9,
"ctxt": 0
},
"value": "bar",
"typeAnnotation": null,
"optional": false
},
"init": {
"type": "TsAsExpression",
"span": {
"start": 12,
"end": 37,
"ctxt": 0
},
"expression": {
"type": "Identifier",
"span": {
"start": 12,
"end": 15,
"ctxt": 0
},
"value": "bas",
"typeAnnotation": null,
"optional": false
},
"typeAnnotation": {
"type": "TsTypeQuery",
"span": {
"start": 19,
"end": 37,
"ctxt": 0
},
"exprName": {
"type": "TsQualifiedName",
"left": {
"type": "Identifier",
"span": {
"start": 26,
"end": 30,
"ctxt": 0
},
"value": "beep",
"typeAnnotation": null,
"optional": false
},
"right": {
"type": "Identifier",
"span": {
"start": 31,
"end": 37,
"ctxt": 0
},
"value": "delete",
"typeAnnotation": null,
"optional": false
}
}
}
},
"definite": false
}
]
}
],
"interpreter": null
}
| {
"pile_set_name": "Github"
} |
/*
* Warewolf - Once bitten, there's no going back
* Copyright 2019 by Warewolf Ltd <[email protected]>
* Licensed under GNU Affero General Public License 3.0 or later.
* Some rights reserved.
* Visit our website for more information <http://warewolf.io/>
* AUTHORS <http://warewolf.io/authors.php> , CONTRIBUTORS <http://warewolf.io/contributors.php>
* @license GNU Affero General Public License <http://www.gnu.org/licenses/agpl-3.0.html>
*/
using System.Collections.Generic;
using Dev2.Common.Interfaces.Infrastructure.Providers.Errors;
using Dev2.Providers.Errors;
using Dev2.Studio.Core;
using Dev2.Studio.Core.Activities.Utils;
using Dev2.Studio.Interfaces;
using Dev2.Studio.Interfaces.DataList;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Moq;
using Unlimited.Applications.BusinessDesignStudio.Activities;
namespace Dev2.Activities.Designers.Tests.Designers2.Core.Credentials
{
[TestClass]
public class CredentialsActivityDesignerViewModelTests
{
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_Constructor")]
public void CredentialsActivityDesignerViewModel_Constructor_Properties_Initialized()
{
//------------Setup for test-------------------------
//------------Execute Test---------------------------
var viewModel = CreateViewModel();
//------------Assert Results-------------------------
Assert.IsNull(viewModel.Errors);
Assert.AreEqual(0, viewModel.TitleBarToggles.Count);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword")]
public void CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword_UserNameAndPasswordBlank_NoErrors()
{
Verify_ValidateUserNameAndPassword("", "", true, null);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword")]
public void CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword_UserNameIsNotBlankAndPasswordIsBlank_HasErrors()
{
Verify_ValidateUserNameAndPassword("aaaa", "", true, Warewolf.Resource.Errors.ErrorResource.CredentialsPasswordNotNullErrorTest);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword")]
public void CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword_UserNameIsBlankAndPasswordIsNotBlank_HasErrors()
{
Verify_ValidateUserNameAndPassword("", "xxx", false, Warewolf.Resource.Errors.ErrorResource.CredentialsUsernameNotNullErrorTest);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword")]
public void CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword_UserNameAndPasswordAreNotBlank_NoErrors()
{
Verify_ValidateUserNameAndPassword("aaa", "xxx", false, null);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword")]
public void CredentialsActivityDesignerViewModel_ValidateUserNameAndPassword_UserNameIsInvalidExpression_HasErrors()
{
Verify_ValidateUserNameAndPassword("a]]", "", false, Warewolf.Resource.Errors.ErrorResource.CredentialsUsernameInvalidExpressionErrorTest);
}
static void Verify_ValidateUserNameAndPassword(string userName, string password, bool isPasswordError, string expectedMessageFormat)
{
//------------Setup for test-------------------------
var mockDataListViewModel = new Mock<IDataListViewModel>();
var mockResourceModel = new Mock<IResourceModel>();
mockResourceModel.Setup(model => model.DataList).Returns("<DataList><a></a></DataList>");
mockDataListViewModel.Setup(model => model.Resource).Returns(mockResourceModel.Object);
DataListSingleton.SetDataList(mockDataListViewModel.Object);
const string LabelText = "Password";
var viewModel = CreateViewModel(userName, password);
//------------Execute Test---------------------------
viewModel.TestValidateUserNameAndPassword();
if(string.IsNullOrEmpty(expectedMessageFormat))
{
Assert.IsNull(viewModel.Errors);
}
else
{
Assert.IsNotNull(viewModel.Errors);
Assert.AreEqual(1, viewModel.Errors.Count);
var error = viewModel.Errors[0];
Assert.AreEqual(string.Format(expectedMessageFormat, LabelText), error.Message);
error.Do();
Assert.IsTrue(isPasswordError ? viewModel.IsPasswordFocused : viewModel.IsUserNameFocused);
}
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_UpdateErrors")]
public void CredentialsActivityDesignerViewModel_UpdateErrors_ErrorsPropertyIsNotNull_ErrorsAreAdded()
{
//------------Setup for test--------------------------
var errors = new List<IActionableErrorInfo>
{
new ActionableErrorInfo(() => { }) { ErrorType = ErrorType.Critical, Message = "Error 2" },
new ActionableErrorInfo(() => { }) { ErrorType = ErrorType.Critical, Message = "Error 3" }
};
var viewModel = CreateViewModel();
viewModel.Errors = new List<IActionableErrorInfo>
{
new ActionableErrorInfo(() => { }) { ErrorType = ErrorType.Critical, Message = "Error 1" },
};
Assert.AreEqual(1, viewModel.Errors.Count);
//------------Execute Test---------------------------
viewModel.TestUpdateErrors(errors);
//------------Assert Results-------------------------
Assert.AreEqual(3, viewModel.Errors.Count);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_UpdateErrors")]
public void CredentialsActivityDesignerViewModel_UpdateErrors_ErrorsParameterIsEmptyList_ErrorsAreNotAdded()
{
//------------Setup for test--------------------------
var errors = new List<IActionableErrorInfo>();
var viewModel = CreateViewModel();
viewModel.Errors = new List<IActionableErrorInfo>
{
new ActionableErrorInfo(() => { }) { ErrorType = ErrorType.Critical, Message = "Error 1" },
};
Assert.AreEqual(1, viewModel.Errors.Count);
//------------Execute Test---------------------------
viewModel.TestUpdateErrors(errors);
//------------Assert Results-------------------------
Assert.AreEqual(1, viewModel.Errors.Count);
}
[TestMethod]
[Owner("Trevor Williams-Ros")]
[TestCategory("CredentialsActivityDesignerViewModel_UpdateErrors")]
public void CredentialsActivityDesignerViewModel_UpdateErrors_ErrorsParameterIsNull_ErrorsAreNotAdded()
{
//------------Setup for test--------------------------
var viewModel = CreateViewModel();
viewModel.Errors = new List<IActionableErrorInfo>
{
new ActionableErrorInfo(() => { }) { ErrorType = ErrorType.Critical, Message = "Error 1" },
};
Assert.AreEqual(1, viewModel.Errors.Count);
//------------Execute Test---------------------------
viewModel.TestUpdateErrors(null);
//------------Assert Results-------------------------
Assert.AreEqual(1, viewModel.Errors.Count);
}
static TestCredentialsActivityDesignerViewModel CreateViewModel(string userName = "", string password = "")
{
var viewModel = new TestCredentialsActivityDesignerViewModel(ModelItemUtils.CreateModelItem(new DsfFileRead { Username = userName, Password = password }));
return viewModel;
}
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<title>SUMIF Function</title>
<meta charset="utf-8" />
<meta name="description" content="" />
<link type="text/css" rel="stylesheet" href="../editor.css" />
</head>
<body>
<div class="mainpart">
<h1>SUMIF Function</h1>
<p>The <b>SUMIF</b> function is one of the math and trigonometry functions. It is used to add all the numbers in the selected range of cells based on the specified criterion and return the result.</p>
<p>The <b>SUMIF</b> function syntax is:</p>
<p style="text-indent: 150px;"><b><em>SUMIF(cell-range, selection-criteria [, sum-range])</em></b></p>
<p><em>where</em></p>
<p style="text-indent: 50px;"><b><em>cell-range</em></b> is the selected range of cells to apply the criterion to.</p>
<p style="text-indent: 50px;"><b><em>selection-criteria</em></b> is the criterion used to determine the cells to sum, a value entered manually or included into the cell you make reference to.</p>
<p style="text-indent: 50px;"><b><em>sum-range</em></b> is the range of cells to sum. It is an optional argument, if omitted, the function will sum the numbers of <b><em>cell-range</em></b>.</p>
<p class="note"><b>Note:</b> you can use wildcard characters when specifying criteria. The question mark "?" can replace any single character and the asterisk "*" can be used instead of any number of characters.</p>
<p>To apply the <b>SUMIF</b> function,</p>
<ol>
<li>select the cell where you wish to display the result,</li>
<li>click the <b>Insert Function</b> <img alt="Insert Function icon" src="../images/insertfunction.png" /> icon situated at the top toolbar,
<br />or right-click within a selected cell and select the <b>Insert Function</b> option from the menu,
<br />or click the <img alt="Function icon" src="../images/function.png" /> icon situated at the formula bar,
</li>
<li>select the <b>Math and trigonometry</b> function group from the list,</li>
<li>click the <b>SUMIF</b> function,</li>
<li>enter the required arguments separating them by commas,</li>
<li>press the <b>Enter</b> button.</li>
</ol>
<p>The result will be displayed in the selected cell.</p>
<p style="text-indent: 150px;"><img alt="SUMIF Function" src="../images/sumif.png" /></p>
</div>
</body>
</html> | {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for web category"""
from . import ContainerTests
import os
from ..large import test_web
from ..tools import get_data_dir, UMAKE
class FirefoxDevContainer(ContainerTests, test_web.FirefoxDevTests):
"""This will test the Firefox dev integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hosts = {443: ["www.mozilla.org", "download.mozilla.org"]}
super().setUp()
# override with container path
self.installed_path = os.path.join(self.install_base_path, "web", "firefox-dev")
def test_install_with_changed_download_page(self):
"""Installing firefox developer should fail if download page has significantly changed"""
download_page_file_path = os.path.join(get_data_dir(), "server-content", "www.mozilla.org", "en-US",
"firefox", "developer", "all")
umake_command = self.command('{} web firefox-dev'.format(UMAKE))
self.bad_download_page_test(umake_command, download_page_file_path)
self.assertFalse(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assertFalse(self.is_in_path(os.path.join(self.binary_dir, self.desktop_filename.split('.')[0])))
class PhantomJSInContainer(ContainerTests, test_web.PhantomJSTests):
"""This will test the PhantomJS integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hosts = {80: ["phantomjs.org"], 443: ['bitbucket.org']}
super().setUp()
# override with container path
self.installed_path = os.path.join(self.install_base_path, "web", "phantomjs")
def test_install_with_changed_download_page(self):
"""Installing firefox developer should fail if download page has significantly changed"""
download_page_file_path = os.path.join(get_data_dir(), "server-content", "phantomjs.org", "download.html")
umake_command = self.command('{} web phantomjs'.format(UMAKE))
self.bad_download_page_test(umake_command, download_page_file_path)
self.assertFalse(self.path_exists(self.exec_path))
class GeckodriverInContainer(ContainerTests, test_web.GeckodriverTests):
"""This will test the Geckodriver integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hosts = {443: ["api.github.com", "github.com"]}
super().setUp()
# override with container path
self.installed_path = os.path.join(self.install_base_path, "web", "geckodriver")
def test_install_with_changed_download_page(self):
"""Installing Geckodriver should fail if download page has significantly changed"""
download_page_file_path = os.path.join(get_data_dir(), "server-content", "api.github.com",
"repos", "mozilla", "geckodriver", "releases", "latest")
umake_command = self.command('{} web geckodriver'.format(UMAKE))
self.bad_download_page_test(self.command(self.command_args), download_page_file_path)
self.assertFalse(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assertFalse(self.is_in_path(self.exec_link))
class ChromedriverInContainer(ContainerTests, test_web.ChromedriverTests):
"""This will test the Chromedriver integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hosts = {443: ["chromedriver.storage.googleapis.com"]}
super().setUp()
# override with container path
self.installed_path = os.path.join(self.install_base_path, "web", "chromedriver")
def test_install_with_changed_download_page(self):
"""Installing Chromedriver should fail if download page has significantly changed"""
download_page_file_path = os.path.join(get_data_dir(), "server-content", "chromedriver.storage.googleapis.com",
"LATEST_RELEASE")
umake_command = self.command('{} web chromedriver'.format(UMAKE))
self.bad_download_page_test(self.command(self.command_args), download_page_file_path)
self.assertFalse(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assertFalse(self.is_in_path(self.exec_link))
| {
"pile_set_name": "Github"
} |
<div class="apiDetail">
<div>
<h2><span>Boolean / Function(treeId, treeNode)</span><span class="path">setting.view.</span>showTitle</h2>
<h3>Overview<span class="h3_info">[ depends on <span class="highlight_green">jquery.ztree.core</span> js ]</span></h3>
<div class="desc">
<p></p>
<div class="longdesc">
<p>Set to show or hide the 'title' attribute of node DOM.</p>
<p class="highlight_red">Please see the <span class="highlight_red">setting.data.key.title</span> attribute</p>
<p>Default: true</p>
</div>
</div>
<h3>Boolean Format</h3>
<div class="desc">
<p> true means: show the 'title' attribute of node DOM.</p>
<p> false means: hide the 'title' attribute of node DOM.</p>
<p class="highlight_red">When setting.view.showTitle = true & setting.data.key.title = '', zTree will set the 'setting.data.key.name' attribute to the 'setting.data.key.title'.</p>
</div>
<h3>Function Parameter Descriptions</h3>
<div class="desc">
<h4><b>treeId</b><span>String</span></h4>
<p>zTree unique identifier: <b class="highlight_red">treeId</b>, easy for users to control.</p>
<h4 class="topLine"><b>treeNode</b><span>JSON</span></h4>
<p>JSON data object of the node which need to show title.</p>
<h4 class="topLine"><b>Return </b><span>Boolean</span></h4>
<p>Return value is same as 'Boolean Format'</p>
</div>
<h3>Examples of setting & function</h3>
<h4>1. Hide the 'title' attribute of node DOM.</h4>
<pre xmlns=""><code>var setting = {
view: {
showTitle: false
}
};
......</code></pre>
<h4>2. Hide the 'title' attribute of node DOM which level=2.</h4>
<pre xmlns=""><code>function showTitleForTree(treeId, treeNode) {
return treeNode.level != 2;
};
var setting = {
view: {
showTitle: showTitleForTree
}
};
......</code></pre>
</div>
</div> | {
"pile_set_name": "Github"
} |
{ stdenv
, pantheon
, autoconf
, automake
, libtool
, gnome3
, which
, fetchgit
, libgtop
, libwnck3
, glib
, vala
, pkgconfig
, libstartup_notification
, gobject-introspection
, gtk-doc
, docbook_xsl
, xorgserver
, dbus
, python3
, wrapGAppsHook
}:
stdenv.mkDerivation rec {
pname = "bamf";
version = "0.5.4";
outputs = [ "out" "dev" "devdoc" ];
src = fetchgit {
url = "https://git.launchpad.net/~unity-team/bamf";
rev = version;
sha256 = "1klvij1wyhdj5d8sr3b16pfixc1yk8ihglpjykg7zrr1f50jfgsz";
};
nativeBuildInputs = [
(python3.withPackages (ps: with ps; [ lxml ])) # Tests
autoconf
automake
dbus
docbook_xsl
gnome3.gnome-common
gobject-introspection
gtk-doc
libtool
pkgconfig
vala
which
wrapGAppsHook
xorgserver
];
buildInputs = [
glib
libgtop
libstartup_notification
libwnck3
];
patches = [
# Port tests and checks to python3 lxml.
./gtester2xunit-python3.patch
];
# Fix hard-coded path
# https://bugs.launchpad.net/bamf/+bug/1780557
postPatch = ''
substituteInPlace data/Makefile.am \
--replace '/usr/lib/systemd/user' '@prefix@/lib/systemd/user'
'';
configureFlags = [
"--enable-gtk-doc"
"--enable-headless-tests"
];
# fix paths
makeFlags = [
"INTROSPECTION_GIRDIR=${placeholder "dev"}/share/gir-1.0/"
"INTROSPECTION_TYPELIBDIR=${placeholder "out"}/lib/girepository-1.0"
];
preConfigure = ''
./autogen.sh
'';
# TODO: Requires /etc/machine-id
doCheck = false;
# glib-2.62 deprecations
NIX_CFLAGS_COMPILE = "-DGLIB_DISABLE_DEPRECATION_WARNINGS";
meta = with stdenv.lib; {
description = "Application matching framework";
longDescription = ''
Removes the headache of applications matching
into a simple DBus daemon and c wrapper library.
'';
homepage = "https://launchpad.net/bamf";
license = licenses.lgpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ davidak ] ++ pantheon.maintainers;
};
}
| {
"pile_set_name": "Github"
} |
{
"action": {
"error": {
"variety": [
"Misdelivery"
],
"vector": [
"Unknown"
]
}
},
"actor": {
"internal": {
"motive": [
"NA"
],
"variety": [
"Unknown"
]
}
},
"asset": {
"assets": [
{
"variety": "U - Desktop"
},
{
"variety": "U - Desktop or laptop"
}
],
"cloud": [
"Unknown"
]
},
"attribute": {
"confidentiality": {
"data": [
{
"variety": "Personal"
}
],
"data_disclosure": "Yes",
"data_victim": [
"Patient"
],
"state": [
"Unknown"
]
}
},
"discovery_method": {
"unknown": true
},
"impact": {
"overall_rating": "Unknown"
},
"incident_id": "D66F8A88-A7CE-44D3-8FBB-948D79939099",
"plus": {
"analysis_status": "Finalized",
"analyst": "Spitler",
"attribute": {
"confidentiality": {
"credit_monitoring": "Unknown"
}
},
"created": "2016-10-06T19:30:00Z",
"dbir_year": 2017,
"github": "8067",
"master_id": "8783AED7-538A-4651-8252-044803D977BC",
"modified": "2016-10-06T19:36:00Z",
"sub_source": "phidbr",
"timeline": {
"notification": {
"day": 7,
"month": 7,
"year": 2016
}
}
},
"reference": "http://www.hipaajournal.com/california-dept-of-corrections-privacy-breach-3495/",
"schema_version": "1.3.4",
"security_incident": "Confirmed",
"source_id": "vcdb",
"summary": "Email misdelivery",
"timeline": {
"compromise": {
"unit": "NA"
},
"exfiltration": {
"unit": "NA"
},
"incident": {
"day": 2,
"month": 5,
"year": 2016
}
},
"victim": {
"country": [
"US"
],
"employee_count": "Unknown",
"industry": "922140",
"region": [
"019021"
],
"state": "CA",
"victim_id": "Division of Adult Institutions' California Health Care Facility"
}
} | {
"pile_set_name": "Github"
} |
<?php
namespace GuzzleHttp\Tests;
use GuzzleHttp\Client;
use GuzzleHttp\Exception\ClientException;
use GuzzleHttp\Handler\MockHandler;
use GuzzleHttp\HandlerStack;
use GuzzleHttp\Pool;
use GuzzleHttp\Promise\Promise;
use GuzzleHttp\Psr7\Request;
use GuzzleHttp\Psr7\Response;
use PHPUnit\Framework\TestCase;
use Psr\Http\Message\RequestInterface;
class PoolTest extends TestCase
{
public function testValidatesIterable()
{
$p = new Pool(new Client(), 'foo');
$this->expectException(\InvalidArgumentException::class);
$p->promise()->wait();
}
public function testValidatesEachElement()
{
$c = new Client();
$requests = ['foo'];
$p = new Pool($c, new \ArrayIterator($requests));
$this->expectException(\InvalidArgumentException::class);
$p->promise()->wait();
}
/**
* @doesNotPerformAssertions
*/
public function testSendsAndRealizesFuture()
{
$c = $this->getClient();
$p = new Pool($c, [new Request('GET', 'http://example.com')]);
$p->promise()->wait();
}
/**
* @doesNotPerformAssertions
*/
public function testExecutesPendingWhenWaiting()
{
$r1 = new Promise(static function () use (&$r1) {
$r1->resolve(new Response());
});
$r2 = new Promise(static function () use (&$r2) {
$r2->resolve(new Response());
});
$r3 = new Promise(static function () use (&$r3) {
$r3->resolve(new Response());
});
$handler = new MockHandler([$r1, $r2, $r3]);
$c = new Client(['handler' => $handler]);
$p = new Pool($c, [
new Request('GET', 'http://example.com'),
new Request('GET', 'http://example.com'),
new Request('GET', 'http://example.com'),
], ['pool_size' => 2]);
$p->promise()->wait();
}
public function testUsesRequestOptions()
{
$h = [];
$handler = new MockHandler([
static function (RequestInterface $request) use (&$h) {
$h[] = $request;
return new Response();
}
]);
$c = new Client(['handler' => $handler]);
$opts = ['options' => ['headers' => ['x-foo' => 'bar']]];
$p = new Pool($c, [new Request('GET', 'http://example.com')], $opts);
$p->promise()->wait();
self::assertCount(1, $h);
self::assertTrue($h[0]->hasHeader('x-foo'));
}
public function testCanProvideCallablesThatReturnResponses()
{
$h = [];
$handler = new MockHandler([
static function (RequestInterface $request) use (&$h) {
$h[] = $request;
return new Response();
}
]);
$c = new Client(['handler' => $handler]);
$optHistory = [];
$fn = static function (array $opts) use (&$optHistory, $c) {
$optHistory = $opts;
return $c->request('GET', 'http://example.com', $opts);
};
$opts = ['options' => ['headers' => ['x-foo' => 'bar']]];
$p = new Pool($c, [$fn], $opts);
$p->promise()->wait();
self::assertCount(1, $h);
self::assertTrue($h[0]->hasHeader('x-foo'));
}
public function testBatchesResults()
{
$requests = [
new Request('GET', 'http://foo.com/200'),
new Request('GET', 'http://foo.com/201'),
new Request('GET', 'http://foo.com/202'),
new Request('GET', 'http://foo.com/404'),
];
$fn = static function (RequestInterface $request) {
return new Response(\substr($request->getUri()->getPath(), 1));
};
$mock = new MockHandler([$fn, $fn, $fn, $fn]);
$handler = HandlerStack::create($mock);
$client = new Client(['handler' => $handler]);
$results = Pool::batch($client, $requests);
self::assertCount(4, $results);
self::assertSame([0, 1, 2, 3], \array_keys($results));
self::assertSame(200, $results[0]->getStatusCode());
self::assertSame(201, $results[1]->getStatusCode());
self::assertSame(202, $results[2]->getStatusCode());
self::assertInstanceOf(ClientException::class, $results[3]);
}
public function testBatchesResultsWithCallbacks()
{
$requests = [
new Request('GET', 'http://foo.com/200'),
new Request('GET', 'http://foo.com/201')
];
$mock = new MockHandler([
static function (RequestInterface $request) {
return new Response(\substr($request->getUri()->getPath(), 1));
}
]);
$client = new Client(['handler' => $mock]);
$results = Pool::batch($client, $requests, [
'fulfilled' => static function ($value) use (&$called) {
$called = true;
}
]);
self::assertCount(2, $results);
self::assertTrue($called);
}
public function testUsesYieldedKeyInFulfilledCallback()
{
$r1 = new Promise(static function () use (&$r1) {
$r1->resolve(new Response());
});
$r2 = new Promise(static function () use (&$r2) {
$r2->resolve(new Response());
});
$r3 = new Promise(static function () use (&$r3) {
$r3->resolve(new Response());
});
$handler = new MockHandler([$r1, $r2, $r3]);
$c = new Client(['handler' => $handler]);
$keys = [];
$requests = [
'request_1' => new Request('GET', 'http://example.com'),
'request_2' => new Request('GET', 'http://example.com'),
'request_3' => new Request('GET', 'http://example.com'),
];
$p = new Pool($c, $requests, [
'pool_size' => 2,
'fulfilled' => static function ($res, $index) use (&$keys) {
$keys[] = $index;
}
]);
$p->promise()->wait();
self::assertCount(3, $keys);
self::assertSame($keys, \array_keys($requests));
}
private function getClient($total = 1)
{
$queue = [];
for ($i = 0; $i < $total; $i++) {
$queue[] = new Response();
}
$handler = new MockHandler($queue);
return new Client(['handler' => $handler]);
}
}
| {
"pile_set_name": "Github"
} |
/*
* MegaMek -
* Copyright (C) 2000,2001,2002,2003,2004,2005 Ben Mazur ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
package megamek.common.options;
import java.io.File;
import java.io.IOException;
import java.util.Enumeration;
import java.util.Vector;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import static org.junit.Assert.*;
/**
*
* @author nderwin
*/
public class GameOptionsTest {
private GameOptions testMe;
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Before
public void setUp() {
testMe = new GameOptions();
}
@Test
public void testSaveAndLoadOptions() throws IOException {
File f = tmpFolder.newFile("test-game-options.xml");
Vector<IBasicOption> options = new Vector<>();
Enumeration<IOption> opts = testMe.getOptions();
int count = 0;
while (opts.hasMoreElements()) {
IOption io = opts.nextElement();
switch (io.getType()) {
case IOption.STRING:
case IOption.CHOICE:
io.setValue(""+count);
break;
case IOption.BOOLEAN:
if (count%2==0) {
io.setValue(Boolean.TRUE);
} else {
io.setValue(Boolean.FALSE);
}
break;
case IOption.INTEGER:
io.setValue(count);
break;
case IOption.FLOAT:
io.setValue(Float.valueOf(""+count));
break;
}
options.add(io);
count++;
}
GameOptions.saveOptions(options, f.getAbsolutePath());
assertTrue(f.exists());
assertTrue(f.length() > 0);
testMe.loadOptions(f, true);
opts = testMe.getOptions();
count = 0;
while (opts.hasMoreElements()) {
IOption io = opts.nextElement();
switch (io.getType()) {
case IOption.STRING:
case IOption.CHOICE:
case IOption.INTEGER:
assertTrue(io.getValue().toString().equals(""+count));
break;
case IOption.BOOLEAN:
if (count%2==0) {
assertTrue(io.booleanValue());
} else {
assertFalse(io.booleanValue());
}
break;
case IOption.FLOAT:
assertEquals(Float.valueOf(""+count), io.floatValue(), 0.0f);
break;
}
count++;
}
}
}
| {
"pile_set_name": "Github"
} |
<html>
<head>
<META HTTP-EQUIV="Content-Type" content="text/html; charset=Windows-1252">
</head>
<body>
<pre>
<table width=100% bgcolor=#CFCFE5><tr> <td> <font face=arial size=+3>
Build Log
</font></table><table width=* cellspacing=0 cellpadding=0><tr><td width=0 bgcolor=#EDEDF5> </td><td width=0 bgcolor=#FFFFFF> </td><td width=*><pre>
<h3>------- Build started: Project: Project, Configuration: Debug|Win32 -------
</h3>
</pre></table><table width=100% bgcolor=#DFDFE5><tr><td><font face=arial size=+2>
Command Lines
</font></table><table width=* cellspacing=0 cellpadding=0><tr><td width=0 bgcolor=#EDEDF5> </td><td width=0 bgcolor=#FFFFFF> </td><td width=*><pre>Creating temporary file "j:\Programacion\Mis programas\I.Worm.Wrath-Rage\Ultima versión\Debug\RSP000001.rsp" with contents
[
/Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /Gm /EHsc /RTC1 /MLd /GS /Fo"Debug/" /Fd"Debug/vc70.pdb" /W3 /c /Wp64 /Zi /TP
".\project.cpp"
]
Creating command line "cl.exe @"j:\Programacion\Mis programas\I.Worm.Wrath-Rage\Ultima versión\Debug\RSP000001.rsp" /nologo"
Creating temporary file "j:\Programacion\Mis programas\I.Worm.Wrath-Rage\Ultima versión\Debug\RSP000002.rsp" with contents
[
/OUT:"Debug/Project.exe" /INCREMENTAL /NOLOGO /DEBUG /PDB:"Debug/Project.pdb" /SUBSYSTEM:WINDOWS /MACHINE:X86 wsock32.lib msgrguid.lib comsupp.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib
".\Debug\b64.obj"
".\Debug\b64decode.obj"
".\Debug\project.obj"
]
Creating command line "link.exe @"j:\Programacion\Mis programas\I.Worm.Wrath-Rage\Ultima versión\Debug\RSP000002.rsp""
</pre></table><table width=100% bgcolor=#DFDFE5><tr><td><font face=arial size=+2>
Output Window
</font></table><table width=* cellspacing=0 cellpadding=0><tr><td width=0 bgcolor=#EDEDF5> </td><td width=0 bgcolor=#FFFFFF> </td><td width=*><pre>Compiling...
project.cpp
project.cpp(991) : warning C4018: '<' : signed/unsigned mismatch
Linking...
LINK : LNK6004: Debug/Project.exe not found or not built by the last incremental link; performing full link
</pre></table><table width=100% bgcolor=#DFDFE5><tr><td><font face=arial size=+2>
Results
</font></table><table width=* cellspacing=0 cellpadding=0><tr><td width=0 bgcolor=#EDEDF5> </td><td width=0 bgcolor=#FFFFFF> </td><td width=*><pre>
Build log was saved at "file://j:\Programacion\Mis programas\I.Worm.Wrath-Rage\Ultima versión\Debug\BuildLog.htm"
Project - 0 error(s), 1 warning(s)</pre></table><table width=100% height=20 bgcolor=#CFCFE5><tr><td><font face=arial size=+2>
</font></table></body></html> | {
"pile_set_name": "Github"
} |
#!/bin/bash -eu
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# remove dependencies on boost's program_options, we don't need it
# and it won't link because oss-fuzz adds -stdlib=libc++ to the flags,
# which would require rebuilding boost
sed -i 's/BOOST_PROGRAM_OPTIONS(\[mt\])//' configure.ac
sed -i 's/AC_MSG_ERROR(\[Boost Program Options library not found\])/AC_MSG_NOTICE(\[Boost Program Options library not found\])/' configure.ac
# we also need to disable building as PIE because libFuzzingEngine.a
# does not appear to be compiled as PIC
sed -i 's/AC_CC_PIE//' configure.ac
# build fuzzing targets
autoreconf -vi
./configure \
--without-dynmodules \
--with-modules='' \
--disable-lua-records \
--disable-ixfrdist \
--enable-fuzz-targets \
--disable-dependency-tracking \
--disable-silent-rules || /bin/bash
cd pdns
make -j$(nproc) fuzz_targets
# copy the fuzzing target binaries
cp fuzz_target_* "${OUT}/"
# copy the zones used in the regression tests to the "zones" corpus
cp ../regression-tests/zones/* ../fuzzing/corpus/zones/
# generate the corpus files
zip -j "${OUT}/fuzz_target_dnsdistcache_seed_corpus.zip" ../fuzzing/corpus/raw-dns-packets/*
zip -j "${OUT}/fuzz_target_moadnsparser_seed_corpus.zip" ../fuzzing/corpus/raw-dns-packets/*
zip -j "${OUT}/fuzz_target_packetcache_seed_corpus.zip" ../fuzzing/corpus/raw-dns-packets/*
zip -j "${OUT}/fuzz_target_zoneparsertng_seed_corpus.zip" ../fuzzing/corpus/zones/*
| {
"pile_set_name": "Github"
} |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2018 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.modules;
import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
import java.security.PrivilegedExceptionAction;
/**
*/
final class GetURLConnectionAction implements PrivilegedExceptionAction<URLConnection> {
private final URL url;
GetURLConnectionAction(final URL url) {
this.url = url;
}
public URLConnection run() throws IOException {
final URLConnection c = url.openConnection();
c.connect();
return c;
}
}
| {
"pile_set_name": "Github"
} |
//
// MIT License
//
// Copyright (c) 2017 Touchwonders B.V.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
import UIKit
import Transition
/**
* The ShapeSourceViewController presents four ShapeViews (plus, minus, divide, multiply).
* Each ShapeView can be dragged up and then dropped in the appearing DropTargetView(Controller).
*/
class ShapeSourceViewController: UIViewController {
@IBOutlet var shapeViews: [ShapeView]!
private(set) var transitionController: TransitionController!
private(set) var interactionController: ShapeInteractionController!
/// We keep a reference to the instantiated dropTargetViewController so that we can ask
/// for its translationFactor (the fraction of its height it should be translated in order
/// to position its dropTarget at the dropPoint).
fileprivate weak var dropTargetViewController: DropTargetViewController?
/// The dropPoint is the point in the view at which the dropTarget should center.
fileprivate var dropPoint: CGPoint {
return CGPoint(x: view.bounds.midX, y: view.bounds.height * 0.55)
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor(white: 0.9, alpha: 1)
let label = UILabel()
label.translatesAutoresizingMaskIntoConstraints = false
label.text = "Drag one of the items below to trigger the transition."
label.textColor = UIColor(white: 0.2, alpha: 1)
label.textAlignment = .center
label.numberOfLines = 0
view.addSubview(label)
NSLayoutConstraint.activate([
label.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 30),
label.rightAnchor.constraint(equalTo: view.rightAnchor, constant: -30),
label.centerYAnchor.constraint(equalTo: view.centerYAnchor)
])
shapeViews.forEach {
$0.isUserInteractionEnabled = false
$0.image = Shape(rawValue: $0.tag)!.selectedImage
}
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
/// Only when our view has a .window, it can be used as operationController in a TransitionController.
/// The window will be used to install the gestureRecognizer, such that after modal presentation it
/// can still be used to detect gestures that should lead to dismissing.
if interactionController == nil {
self.interactionController = ShapeInteractionController(forViewController: self, shapeViews: shapeViews, dropPoint: dropPoint)
self.transitionController = TransitionController(forInteractiveModalPresentationsFrom: self, transitionsSource: self, interactionController: interactionController)
}
hintShapeViews()
}
fileprivate func hintShapeViews() {
shapeViews.forEach {
let hintAnimation = CABasicAnimation(keyPath: "transform.scale")
hintAnimation.fromValue = 1.0
hintAnimation.toValue = 1.1
hintAnimation.autoreverses = true
hintAnimation.duration = 0.15
hintAnimation.beginTime = CACurrentMediaTime() + (CFTimeInterval($0.tag) * 0.05)
$0.layer.add(hintAnimation, forKey: nil)
}
}
}
extension ShapeSourceViewController : TransitionsSource {
/// Provide the Transition for presentation / dismissal
func transitionFor(operationContext: TransitionOperationContext, interactionController: TransitionInteractionController?) -> Transition {
/// The dropTargetViewAnimation should move the dropTargetView exactly to the point at which the dropTarget
/// centers at the requested dropPoint. It uses a closure to request the translation factor when needed,
/// so that the dropTargetViewController is loaded and is given its correct dimensions to derive the translation factor.
let dropTargetViewAnimation = DropTargetViewAnimation(for: operationContext.operation, translationFactor: { [weak self] in
guard let strongSelf = self else { return 0.0 }
return strongSelf.dropTargetViewController?.translationFactorFor(dropPoint: strongSelf.dropPoint) ?? 0.0
})
guard let interactionController = interactionController else {
/// The sharedElement part of the transition describes how the shared element should move and animate.
/// Not providing it is optional, but not possible for this particular example because the transition can
/// only be initiated by interacting with a ShapeView.
fatalError("This transition cannot be performed without a shared element")
}
let shapeInteractionAnimation = ShapeInteractionAnimation(interactionController: interactionController)
return Transition(duration: 0.3, animation: dropTargetViewAnimation, sharedElement: shapeInteractionAnimation)
}
}
extension ShapeSourceViewController : InteractiveModalTransitionOperationDelegate {
/// The `OperationDelegate` for modal transitions is always the `operationController` (in modal transition speak: the `sourceViewController`, the viewController that initiates the transition).
/// It should provide the viewController that should be presented (the `presentedViewController`). The `TransitionController` will eventually call `present(_:animated:completion:)`
/// on the `operationController`, but not before correctly configuring the `presentedViewController` (setting the transitioningDelegate and modalPresentationStyle).
func viewControllerForInteractiveModalPresentation(by sourceViewController: UIViewController, gestureRecognizer: UIGestureRecognizer) -> UIViewController {
let dropTargetViewController = DropTargetViewController.fromStoryboard(self.storyboard)
self.dropTargetViewController = dropTargetViewController
return dropTargetViewController
}
}
extension ShapeSourceViewController : TransitionPhaseDelegate {
func didTransition(from fromViewController: UIViewController, to toViewController: UIViewController, with sharedElement: SharedElement?) {
if toViewController == self {
hintShapeViews()
}
}
}
| {
"pile_set_name": "Github"
} |
#include <vector>
#include <seqan3/alphabet/nucleotide/dna4.hpp>
#include <seqan3/alphabet/quality/aliases.hpp> // includes seqan3::dna4q
#include <seqan3/alphabet/quality/phred42.hpp>
#include <seqan3/core/debug_stream.hpp>
#include <seqan3/range/views/to_char.hpp>
int main()
{
using seqan3::operator""_dna4;
seqan3::dna4_vector vec = "ACTTTGATA"_dna4;
auto v = vec | seqan3::views::to_char;
seqan3::debug_stream << v << '\n'; // [A,C,T,T,T,G,A,T,A]
std::vector<seqan3::phred42> qvec{{0}, {7}, {5}, {3}, {7}, {4}, {30}, {16}, {23}};
auto v3 = qvec | seqan3::views::to_char;
seqan3::debug_stream << v3 << '\n'; // [!,(,&,$,(,%,?,1,8]
std::vector<seqan3::dna4q> qcvec{{'C'_dna4, seqan3::phred42{0}}, {'A'_dna4, seqan3::phred42{7}},
{'G'_dna4, seqan3::phred42{5}}, {'T'_dna4, seqan3::phred42{3}},
{'G'_dna4, seqan3::phred42{7}}, {'A'_dna4, seqan3::phred42{4}},
{'C'_dna4, seqan3::phred42{30}}, {'T'_dna4, seqan3::phred42{16}},
{'A'_dna4, seqan3::phred42{23}}};
auto v4 = qcvec | seqan3::views::to_char;
seqan3::debug_stream << v4 << '\n'; // [C,A,G,T,G,A,C,T,A]
}
| {
"pile_set_name": "Github"
} |
/*
* linux/fs/sysv/ialloc.c
*
* minix/bitmap.c
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext/freelists.c
* Copyright (C) 1992 Remy Card ([email protected])
*
* xenix/alloc.c
* Copyright (C) 1992 Doug Evans
*
* coh/alloc.c
* Copyright (C) 1993 Pascal Haible, Bruno Haible
*
* sysv/ialloc.c
* Copyright (C) 1993 Bruno Haible
*
* This file contains code for allocating/freeing inodes.
*/
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include "sysv.h"
/* We don't trust the value of
sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes
but we nevertheless keep it up to date. */
/* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */
/* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */
static inline sysv_ino_t *
sv_sb_fic_inode(struct super_block * sb, unsigned int i)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
if (sbi->s_bh1 == sbi->s_bh2)
return &sbi->s_sb_fic_inodes[i];
else {
/* 512 byte Xenix FS */
unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]);
if (offset < 512)
return (sysv_ino_t*)(sbi->s_sbd1 + offset);
else
return (sysv_ino_t*)(sbi->s_sbd2 + offset);
}
}
struct sysv_inode *
sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
struct sysv_inode *res;
int block = sbi->s_firstinodezone + sbi->s_block_base;
block += (ino-1) >> sbi->s_inodes_per_block_bits;
*bh = sb_bread(sb, block);
if (!*bh)
return NULL;
res = (struct sysv_inode *)(*bh)->b_data;
return res + ((ino-1) & sbi->s_inodes_per_block_1);
}
static int refill_free_cache(struct super_block *sb)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
struct buffer_head * bh;
struct sysv_inode * raw_inode;
int i = 0, ino;
ino = SYSV_ROOT_INO+1;
raw_inode = sysv_raw_inode(sb, ino, &bh);
if (!raw_inode)
goto out;
while (ino <= sbi->s_ninodes) {
if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) {
*sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino);
if (i == sbi->s_fic_size)
break;
}
if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
brelse(bh);
raw_inode = sysv_raw_inode(sb, ino, &bh);
if (!raw_inode)
goto out;
} else
raw_inode++;
}
brelse(bh);
out:
return i;
}
void sysv_free_inode(struct inode * inode)
{
struct super_block *sb = inode->i_sb;
struct sysv_sb_info *sbi = SYSV_SB(sb);
unsigned int ino;
struct buffer_head * bh;
struct sysv_inode * raw_inode;
unsigned count;
sb = inode->i_sb;
ino = inode->i_ino;
if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) {
printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n");
return;
}
raw_inode = sysv_raw_inode(sb, ino, &bh);
clear_inode(inode);
if (!raw_inode) {
printk("sysv_free_inode: unable to read inode block on device "
"%s\n", inode->i_sb->s_id);
return;
}
lock_super(sb);
count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
if (count < sbi->s_fic_size) {
*sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino);
*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
}
fs16_add(sbi, sbi->s_sb_total_free_inodes, 1);
dirty_sb(sb);
memset(raw_inode, 0, sizeof(struct sysv_inode));
mark_buffer_dirty(bh);
unlock_super(sb);
brelse(bh);
}
struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
{
struct super_block *sb = dir->i_sb;
struct sysv_sb_info *sbi = SYSV_SB(sb);
struct inode *inode;
sysv_ino_t ino;
unsigned count;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
lock_super(sb);
count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) {
count = refill_free_cache(sb);
if (count == 0) {
iput(inode);
unlock_super(sb);
return ERR_PTR(-ENOSPC);
}
}
/* Now count > 0. */
ino = *sv_sb_fic_inode(sb,--count);
*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
fs16_add(sbi, sbi->s_sb_total_free_inodes, -1);
dirty_sb(sb);
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
mode |= S_ISGID;
} else
inode->i_gid = current_fsgid();
inode->i_uid = current_fsuid();
inode->i_ino = fs16_to_cpu(sbi, ino);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
inode->i_blocks = 0;
memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
SYSV_I(inode)->i_dir_start_lookup = 0;
insert_inode_hash(inode);
mark_inode_dirty(inode);
inode->i_mode = mode; /* for sysv_write_inode() */
sysv_write_inode(inode, 0); /* ensure inode not allocated again */
mark_inode_dirty(inode); /* cleared by sysv_write_inode() */
/* That's it. */
unlock_super(sb);
return inode;
}
unsigned long sysv_count_free_inodes(struct super_block * sb)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
struct buffer_head * bh;
struct sysv_inode * raw_inode;
int ino, count, sb_count;
lock_super(sb);
sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes);
if (0)
goto trust_sb;
/* this causes a lot of disk traffic ... */
count = 0;
ino = SYSV_ROOT_INO+1;
raw_inode = sysv_raw_inode(sb, ino, &bh);
if (!raw_inode)
goto Eio;
while (ino <= sbi->s_ninodes) {
if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0)
count++;
if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
brelse(bh);
raw_inode = sysv_raw_inode(sb, ino, &bh);
if (!raw_inode)
goto Eio;
} else
raw_inode++;
}
brelse(bh);
if (count != sb_count)
goto Einval;
out:
unlock_super(sb);
return count;
Einval:
printk("sysv_count_free_inodes: "
"free inode count was %d, correcting to %d\n",
sb_count, count);
if (!(sb->s_flags & MS_RDONLY)) {
*sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count);
dirty_sb(sb);
}
goto out;
Eio:
printk("sysv_count_free_inodes: unable to read inode table\n");
trust_sb:
count = sb_count;
goto out;
}
| {
"pile_set_name": "Github"
} |
.class final Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a$1;
.super Ljava/lang/Object;
.source "SourceFile"
# interfaces
.implements Landroid/content/DialogInterface$OnClickListener;
# annotations
.annotation system Ldalvik/annotation/EnclosingMethod;
value = Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;->onSceneEnd(IILjava/lang/String;Lcom/tencent/mm/t/j;)V
.end annotation
.annotation system Ldalvik/annotation/InnerClass;
accessFlags = 0x0
name = null
.end annotation
# instance fields
.field final synthetic iwg:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;
# direct methods
.method constructor <init>(Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;)V
.locals 0
.prologue
.line 333
iput-object p1, p0, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a$1;->iwg:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;
invoke-direct {p0}, Ljava/lang/Object;-><init>()V
return-void
.end method
# virtual methods
.method public final onClick(Landroid/content/DialogInterface;I)V
.locals 2
.prologue
.line 337
iget-object v0, p0, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a$1;->iwg:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;
iget-object v0, v0, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;->iwf:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI;
const/4 v1, 0x0
invoke-virtual {v0, v1}, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI;->setResult(I)V
.line 338
iget-object v0, p0, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a$1;->iwg:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;
iget-object v0, v0, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI$a;->iwf:Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI;
invoke-virtual {v0}, Lcom/tencent/mm/plugin/wallet_index/ui/WalletBrandUI;->finish()V
.line 339
return-void
.end method
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2019 Google LLC
* Copyright 2019 Mellanox Technologies Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// ARITHMETIC intructions
`DEFINE_B_INSTR(BMATOR, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(BMATXOR, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(BMATFLIP, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CRC32_D, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CRC32C_D, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(ADDIWU, I_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(ADDWU, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(SUBWU, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(ADDU_W, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(SUBU_W, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CLZW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CTZW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(PCNTW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CLMULW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CLMULRW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(CLMULHW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(SHFLW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(UNSHFLW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(BDEPW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(BEXTW, R_FORMAT, ARITHMETIC, RV64B)
`DEFINE_B_INSTR(BFPW, R_FORMAT, ARITHMETIC, RV64B)
// SHIFT intructions
`DEFINE_B_INSTR(SLLIU_W, I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(SLOW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SROW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(ROLW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(RORW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SBCLRW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SBSETW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SBINVW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SBEXTW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(GREVW, R_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(SLOIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(SROIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(RORIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(SBCLRIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(SBSETIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(SBINVIW , I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(GREVIW, I_FORMAT, SHIFT, RV64B, UIMM)
`DEFINE_B_INSTR(FSLW, R4_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(FSRW, R4_FORMAT, SHIFT, RV64B)
`DEFINE_B_INSTR(FSRIW, I_FORMAT, SHIFT, RV64B, UIMM)
// LOGICAL instructions
`DEFINE_B_INSTR(GORCW, R_FORMAT, LOGICAL, RV64B)
`DEFINE_B_INSTR(GORCIW, I_FORMAT, LOGICAL, RV64B, UIMM)
`DEFINE_B_INSTR(PACKW, R_FORMAT, LOGICAL, RV64B)
`DEFINE_B_INSTR(PACKUW, R_FORMAT, LOGICAL, RV64B)
| {
"pile_set_name": "Github"
} |
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdlib.h>
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->shutdown_req = NULL;
handle->connect_req = NULL;
handle->pipe_fname = NULL;
handle->ipc = ipc;
return 0;
}
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
struct sockaddr_un saddr;
const char* pipe_fname;
int sockfd;
int err;
pipe_fname = NULL;
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return UV_EINVAL;
/* Make a copy of the file name, it outlives this function's scope. */
pipe_fname = uv__strdup(name);
if (pipe_fname == NULL)
return UV_ENOMEM;
/* We've got a copy, don't touch the original any more. */
name = NULL;
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto err_socket;
sockfd = err;
memset(&saddr, 0, sizeof saddr);
uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
err = UV__ERR(errno);
/* Convert ENOENT to EACCES for compatibility with Windows. */
if (err == UV_ENOENT)
err = UV_EACCES;
uv__close(sockfd);
goto err_socket;
}
/* Success. */
handle->flags |= UV_HANDLE_BOUND;
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
handle->io_watcher.fd = sockfd;
return 0;
err_socket:
uv__free((void*)pipe_fname);
return err;
}
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
if (uv__stream_fd(handle) == -1)
return UV_EINVAL;
if (handle->ipc)
return UV_EINVAL;
#if defined(__MVS__) || defined(__PASE__)
/* On zOS, backlog=0 has undefined behaviour */
/* On IBMi PASE, backlog=0 leads to "Connection refused" error */
if (backlog == 0)
backlog = 1;
else if (backlog < 0)
backlog = SOMAXCONN;
#endif
if (listen(uv__stream_fd(handle), backlog))
return UV__ERR(errno);
handle->connection_cb = cb;
handle->io_watcher.cb = uv__server_io;
uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
return 0;
}
void uv__pipe_close(uv_pipe_t* handle) {
if (handle->pipe_fname) {
/*
* Unlink the file system entity before closing the file descriptor.
* Doing it the other way around introduces a race where our process
* unlinks a socket with the same name that's just been created by
* another thread or process.
*/
unlink(handle->pipe_fname);
uv__free((void*)handle->pipe_fname);
handle->pipe_fname = NULL;
}
uv__stream_close((uv_stream_t*)handle);
}
int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
int flags;
int mode;
int err;
flags = 0;
if (uv__fd_exists(handle->loop, fd))
return UV_EEXIST;
do
mode = fcntl(fd, F_GETFL);
while (mode == -1 && errno == EINTR);
if (mode == -1)
return UV__ERR(errno); /* according to docs, must be EBADF */
err = uv__nonblock(fd, 1);
if (err)
return err;
#if defined(__APPLE__)
err = uv__stream_try_select((uv_stream_t*) handle, &fd);
if (err)
return err;
#endif /* defined(__APPLE__) */
mode &= O_ACCMODE;
if (mode != O_WRONLY)
flags |= UV_HANDLE_READABLE;
if (mode != O_RDONLY)
flags |= UV_HANDLE_WRITABLE;
return uv__stream_open((uv_stream_t*)handle, fd, flags);
}
void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
struct sockaddr_un saddr;
int new_sock;
int err;
int r;
new_sock = (uv__stream_fd(handle) == -1);
if (new_sock) {
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto out;
handle->io_watcher.fd = err;
}
memset(&saddr, 0, sizeof saddr);
uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
saddr.sun_family = AF_UNIX;
do {
r = connect(uv__stream_fd(handle),
(struct sockaddr*)&saddr, sizeof saddr);
}
while (r == -1 && errno == EINTR);
if (r == -1 && errno != EINPROGRESS) {
err = UV__ERR(errno);
#if defined(__CYGWIN__) || defined(__MSYS__)
/* EBADF is supposed to mean that the socket fd is bad, but
Cygwin reports EBADF instead of ENOTSOCK when the file is
not a socket. We do not expect to see a bad fd here
(e.g. due to new_sock), so translate the error. */
if (err == UV_EBADF)
err = UV_ENOTSOCK;
#endif
goto out;
}
err = 0;
if (new_sock) {
err = uv__stream_open((uv_stream_t*)handle,
uv__stream_fd(handle),
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
}
if (err == 0)
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
out:
handle->delayed_error = err;
handle->connect_req = req;
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*)handle;
req->cb = cb;
QUEUE_INIT(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err)
uv__io_feed(handle->loop, &handle->io_watcher);
}
static int uv__pipe_getsockpeername(const uv_pipe_t* handle,
uv__peersockfunc func,
char* buffer,
size_t* size) {
struct sockaddr_un sa;
socklen_t addrlen;
int err;
addrlen = sizeof(sa);
memset(&sa, 0, addrlen);
err = uv__getsockpeername((const uv_handle_t*) handle,
func,
(struct sockaddr*) &sa,
(int*) &addrlen);
if (err < 0) {
*size = 0;
return err;
}
#if defined(__linux__)
if (sa.sun_path[0] == 0)
/* Linux abstract namespace */
addrlen -= offsetof(struct sockaddr_un, sun_path);
else
#endif
addrlen = strlen(sa.sun_path);
if ((size_t)addrlen >= *size) {
*size = addrlen + 1;
return UV_ENOBUFS;
}
memcpy(buffer, sa.sun_path, addrlen);
*size = addrlen;
/* only null-terminate if it's not an abstract socket */
if (buffer[0] != '\0')
buffer[addrlen] = '\0';
return 0;
}
int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
return uv__pipe_getsockpeername(handle, getsockname, buffer, size);
}
int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
return uv__pipe_getsockpeername(handle, getpeername, buffer, size);
}
void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
}
int uv_pipe_pending_count(uv_pipe_t* handle) {
uv__stream_queued_fds_t* queued_fds;
if (!handle->ipc)
return 0;
if (handle->accepted_fd == -1)
return 0;
if (handle->queued_fds == NULL)
return 1;
queued_fds = handle->queued_fds;
return queued_fds->offset + 1;
}
uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
if (!handle->ipc)
return UV_UNKNOWN_HANDLE;
if (handle->accepted_fd == -1)
return UV_UNKNOWN_HANDLE;
else
return uv__handle_type(handle->accepted_fd);
}
int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
unsigned desired_mode;
struct stat pipe_stat;
char* name_buffer;
size_t name_len;
int r;
if (handle == NULL || uv__stream_fd(handle) == -1)
return UV_EBADF;
if (mode != UV_READABLE &&
mode != UV_WRITABLE &&
mode != (UV_WRITABLE | UV_READABLE))
return UV_EINVAL;
/* Unfortunately fchmod does not work on all platforms, we will use chmod. */
name_len = 0;
r = uv_pipe_getsockname(handle, NULL, &name_len);
if (r != UV_ENOBUFS)
return r;
name_buffer = uv__malloc(name_len);
if (name_buffer == NULL)
return UV_ENOMEM;
r = uv_pipe_getsockname(handle, name_buffer, &name_len);
if (r != 0) {
uv__free(name_buffer);
return r;
}
/* stat must be used as fstat has a bug on Darwin */
if (stat(name_buffer, &pipe_stat) == -1) {
uv__free(name_buffer);
return -errno;
}
desired_mode = 0;
if (mode & UV_READABLE)
desired_mode |= S_IRUSR | S_IRGRP | S_IROTH;
if (mode & UV_WRITABLE)
desired_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
/* Exit early if pipe already has desired mode. */
if ((pipe_stat.st_mode & desired_mode) == desired_mode) {
uv__free(name_buffer);
return 0;
}
pipe_stat.st_mode |= desired_mode;
r = chmod(name_buffer, pipe_stat.st_mode);
uv__free(name_buffer);
return r != -1 ? 0 : UV__ERR(errno);
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.